repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
djcapelis/lockbox-hw-kernel-sparc | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
vivekananda/fbeats | django/contrib/gis/db/backends/oracle/introspection.py | 388 | 1777 | import cx_Oracle
from django.db.backends.oracle.introspection import DatabaseIntrospection
class OracleIntrospection(DatabaseIntrospection):
# Associating any OBJECTVAR instances with GeometryField. Of course,
# this won't work right on Oracle objects that aren't MDSYS.SDO_GEOMETRY,
# but it is the only object type supported within Django anyways.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[cx_Oracle.OBJECT] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying USER_SDO_GEOM_METADATA to get the SRID and dimension information.
try:
cursor.execute('SELECT "DIMINFO", "SRID" FROM "USER_SDO_GEOM_METADATA" WHERE "TABLE_NAME"=%s AND "COLUMN_NAME"=%s',
(table_name.upper(), geo_col.upper()))
row = cursor.fetchone()
except Exception, msg:
raise Exception('Could not find entry in USER_SDO_GEOM_METADATA corresponding to "%s"."%s"\n'
'Error message: %s.' % (table_name, geo_col, msg))
# TODO: Research way to find a more specific geometry field type for
# the column's contents.
field_type = 'GeometryField'
# Getting the field parameters.
field_params = {}
dim, srid = row
if srid != 4326:
field_params['srid'] = srid
# Length of object array ( SDO_DIM_ARRAY ) is number of dimensions.
dim = len(dim)
if dim != 2:
field_params['dim'] = dim
finally:
cursor.close()
return field_type, field_params
| bsd-3-clause |
scarriere/CSGames-AI2015 | AIClient_Python/test/test_mathUtils.py | 1 | 1127 | from unittest import TestCase
from mathUtils.Vector2 import Vector2
from mathUtils.MathUtils import MathUtils
from mathUtils.Direction import Direction
class TestMathUtils(TestCase):
def test_getDirectionVector(self):
a = Vector2(1, 1)
b = Vector2(7, 7)
self.assertEqual(Vector2(6, 6), MathUtils.getDirectionVector(a, b))
def test_getDirectionFromPositions(self):
a = Vector2(0, 7)
b = Vector2(7, 7)
self.assertEqual(Direction.RIGHT, MathUtils.getDirectionFromPositions(a, b))
def test_getDirection(self):
self.assertEqual(Direction.RIGHT, MathUtils.getDirection(Vector2(2, 0)))
self.assertEqual(Direction.LEFT, MathUtils.getDirection(Vector2(-2, 0)))
self.assertEqual(Direction.UP, MathUtils.getDirection(Vector2(0, 2)))
self.assertEqual(Direction.DOWN, MathUtils.getDirection(Vector2(0, -2)))
self.assertRaises(Exception, MathUtils.getDirection, (Vector2(2, 2)))
self.assertRaises(Exception, MathUtils.getDirection, (Vector2(-2, -2)))
self.assertRaises(Exception, MathUtils.getDirection, (Vector2(0, 0)))
| mit |
Zhongqilong/kbengine | kbe/src/lib/python/Lib/test/test_minidom.py | 60 | 64328 | # test for xml.dom.minidom
import pickle
from test.support import run_unittest, findfile
import unittest
import xml.dom.minidom
from xml.dom.minidom import parse, Node, Document, parseString
from xml.dom.minidom import getDOMImplementation
tstfile = findfile("test.xml", subdir="xmltestdata")
# The tests of DocumentType importing use these helpers to construct
# the documents to work with, since not all DOM builders actually
# create the DocumentType nodes.
def create_doc_without_doctype(doctype=None):
return getDOMImplementation().createDocument(None, "doc", doctype)
def create_nonempty_doctype():
doctype = getDOMImplementation().createDocumentType("doc", None, None)
doctype.entities._seq = []
doctype.notations._seq = []
notation = xml.dom.minidom.Notation("my-notation", None,
"http://xml.python.org/notations/my")
doctype.notations._seq.append(notation)
entity = xml.dom.minidom.Entity("my-entity", None,
"http://xml.python.org/entities/my",
"my-notation")
entity.version = "1.0"
entity.encoding = "utf-8"
entity.actualEncoding = "us-ascii"
doctype.entities._seq.append(entity)
return doctype
def create_doc_with_doctype():
doctype = create_nonempty_doctype()
doc = create_doc_without_doctype(doctype)
doctype.entities.item(0).ownerDocument = doc
doctype.notations.item(0).ownerDocument = doc
return doc
class MinidomTest(unittest.TestCase):
def confirm(self, test, testname = "Test"):
self.assertTrue(test, testname)
def checkWholeText(self, node, s):
t = node.wholeText
self.confirm(t == s, "looking for %r, found %r" % (s, t))
def testParseFromFile(self):
with open(tstfile) as file:
dom = parse(file)
dom.unlink()
self.confirm(isinstance(dom, Document))
def testGetElementsByTagName(self):
dom = parse(tstfile)
self.confirm(dom.getElementsByTagName("LI") == \
dom.documentElement.getElementsByTagName("LI"))
dom.unlink()
def testInsertBefore(self):
dom = parseString("<doc><foo/></doc>")
root = dom.documentElement
elem = root.childNodes[0]
nelem = dom.createElement("element")
root.insertBefore(nelem, elem)
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2
and root.childNodes[0] is nelem
and root.childNodes.item(0) is nelem
and root.childNodes[1] is elem
and root.childNodes.item(1) is elem
and root.firstChild is nelem
and root.lastChild is elem
and root.toxml() == "<doc><element/><foo/></doc>"
, "testInsertBefore -- node properly placed in tree")
nelem = dom.createElement("element")
root.insertBefore(nelem, None)
self.confirm(len(root.childNodes) == 3
and root.childNodes.length == 3
and root.childNodes[1] is elem
and root.childNodes.item(1) is elem
and root.childNodes[2] is nelem
and root.childNodes.item(2) is nelem
and root.lastChild is nelem
and nelem.previousSibling is elem
and root.toxml() == "<doc><element/><foo/><element/></doc>"
, "testInsertBefore -- node properly placed in tree")
nelem2 = dom.createElement("bar")
root.insertBefore(nelem2, nelem)
self.confirm(len(root.childNodes) == 4
and root.childNodes.length == 4
and root.childNodes[2] is nelem2
and root.childNodes.item(2) is nelem2
and root.childNodes[3] is nelem
and root.childNodes.item(3) is nelem
and nelem2.nextSibling is nelem
and nelem.previousSibling is nelem2
and root.toxml() ==
"<doc><element/><foo/><bar/><element/></doc>"
, "testInsertBefore -- node properly placed in tree")
dom.unlink()
def _create_fragment_test_nodes(self):
dom = parseString("<doc/>")
orig = dom.createTextNode("original")
c1 = dom.createTextNode("foo")
c2 = dom.createTextNode("bar")
c3 = dom.createTextNode("bat")
dom.documentElement.appendChild(orig)
frag = dom.createDocumentFragment()
frag.appendChild(c1)
frag.appendChild(c2)
frag.appendChild(c3)
return dom, orig, c1, c2, c3, frag
def testInsertBeforeFragment(self):
dom, orig, c1, c2, c3, frag = self._create_fragment_test_nodes()
dom.documentElement.insertBefore(frag, None)
self.confirm(tuple(dom.documentElement.childNodes) ==
(orig, c1, c2, c3),
"insertBefore(<fragment>, None)")
frag.unlink()
dom.unlink()
dom, orig, c1, c2, c3, frag = self._create_fragment_test_nodes()
dom.documentElement.insertBefore(frag, orig)
self.confirm(tuple(dom.documentElement.childNodes) ==
(c1, c2, c3, orig),
"insertBefore(<fragment>, orig)")
frag.unlink()
dom.unlink()
def testAppendChild(self):
dom = parse(tstfile)
dom.documentElement.appendChild(dom.createComment("Hello"))
self.confirm(dom.documentElement.childNodes[-1].nodeName == "#comment")
self.confirm(dom.documentElement.childNodes[-1].data == "Hello")
dom.unlink()
def testAppendChildFragment(self):
dom, orig, c1, c2, c3, frag = self._create_fragment_test_nodes()
dom.documentElement.appendChild(frag)
self.confirm(tuple(dom.documentElement.childNodes) ==
(orig, c1, c2, c3),
"appendChild(<fragment>)")
frag.unlink()
dom.unlink()
def testReplaceChildFragment(self):
dom, orig, c1, c2, c3, frag = self._create_fragment_test_nodes()
dom.documentElement.replaceChild(frag, orig)
orig.unlink()
self.confirm(tuple(dom.documentElement.childNodes) == (c1, c2, c3),
"replaceChild(<fragment>)")
frag.unlink()
dom.unlink()
def testLegalChildren(self):
dom = Document()
elem = dom.createElement('element')
text = dom.createTextNode('text')
self.assertRaises(xml.dom.HierarchyRequestErr, dom.appendChild, text)
dom.appendChild(elem)
self.assertRaises(xml.dom.HierarchyRequestErr, dom.insertBefore, text,
elem)
self.assertRaises(xml.dom.HierarchyRequestErr, dom.replaceChild, text,
elem)
nodemap = elem.attributes
self.assertRaises(xml.dom.HierarchyRequestErr, nodemap.setNamedItem,
text)
self.assertRaises(xml.dom.HierarchyRequestErr, nodemap.setNamedItemNS,
text)
elem.appendChild(text)
dom.unlink()
def testNamedNodeMapSetItem(self):
dom = Document()
elem = dom.createElement('element')
attrs = elem.attributes
attrs["foo"] = "bar"
a = attrs.item(0)
self.confirm(a.ownerDocument is dom,
"NamedNodeMap.__setitem__() sets ownerDocument")
self.confirm(a.ownerElement is elem,
"NamedNodeMap.__setitem__() sets ownerElement")
self.confirm(a.value == "bar",
"NamedNodeMap.__setitem__() sets value")
self.confirm(a.nodeValue == "bar",
"NamedNodeMap.__setitem__() sets nodeValue")
elem.unlink()
dom.unlink()
def testNonZero(self):
dom = parse(tstfile)
self.confirm(dom)# should not be zero
dom.appendChild(dom.createComment("foo"))
self.confirm(not dom.childNodes[-1].childNodes)
dom.unlink()
def testUnlink(self):
dom = parse(tstfile)
self.assertTrue(dom.childNodes)
dom.unlink()
self.assertFalse(dom.childNodes)
def testContext(self):
with parse(tstfile) as dom:
self.assertTrue(dom.childNodes)
self.assertFalse(dom.childNodes)
def testElement(self):
dom = Document()
dom.appendChild(dom.createElement("abc"))
self.confirm(dom.documentElement)
dom.unlink()
def testAAA(self):
dom = parseString("<abc/>")
el = dom.documentElement
el.setAttribute("spam", "jam2")
self.confirm(el.toxml() == '<abc spam="jam2"/>', "testAAA")
a = el.getAttributeNode("spam")
self.confirm(a.ownerDocument is dom,
"setAttribute() sets ownerDocument")
self.confirm(a.ownerElement is dom.documentElement,
"setAttribute() sets ownerElement")
dom.unlink()
def testAAB(self):
dom = parseString("<abc/>")
el = dom.documentElement
el.setAttribute("spam", "jam")
el.setAttribute("spam", "jam2")
self.confirm(el.toxml() == '<abc spam="jam2"/>', "testAAB")
dom.unlink()
def testAddAttr(self):
dom = Document()
child = dom.appendChild(dom.createElement("abc"))
child.setAttribute("def", "ghi")
self.confirm(child.getAttribute("def") == "ghi")
self.confirm(child.attributes["def"].value == "ghi")
child.setAttribute("jkl", "mno")
self.confirm(child.getAttribute("jkl") == "mno")
self.confirm(child.attributes["jkl"].value == "mno")
self.confirm(len(child.attributes) == 2)
child.setAttribute("def", "newval")
self.confirm(child.getAttribute("def") == "newval")
self.confirm(child.attributes["def"].value == "newval")
self.confirm(len(child.attributes) == 2)
dom.unlink()
def testDeleteAttr(self):
dom = Document()
child = dom.appendChild(dom.createElement("abc"))
self.confirm(len(child.attributes) == 0)
child.setAttribute("def", "ghi")
self.confirm(len(child.attributes) == 1)
del child.attributes["def"]
self.confirm(len(child.attributes) == 0)
dom.unlink()
def testRemoveAttr(self):
dom = Document()
child = dom.appendChild(dom.createElement("abc"))
child.setAttribute("def", "ghi")
self.confirm(len(child.attributes) == 1)
self.assertRaises(xml.dom.NotFoundErr, child.removeAttribute, "foo")
child.removeAttribute("def")
self.confirm(len(child.attributes) == 0)
dom.unlink()
def testRemoveAttrNS(self):
dom = Document()
child = dom.appendChild(
dom.createElementNS("http://www.python.org", "python:abc"))
child.setAttributeNS("http://www.w3.org", "xmlns:python",
"http://www.python.org")
child.setAttributeNS("http://www.python.org", "python:abcattr", "foo")
self.assertRaises(xml.dom.NotFoundErr, child.removeAttributeNS,
"foo", "http://www.python.org")
self.confirm(len(child.attributes) == 2)
child.removeAttributeNS("http://www.python.org", "abcattr")
self.confirm(len(child.attributes) == 1)
dom.unlink()
def testRemoveAttributeNode(self):
dom = Document()
child = dom.appendChild(dom.createElement("foo"))
child.setAttribute("spam", "jam")
self.confirm(len(child.attributes) == 1)
node = child.getAttributeNode("spam")
self.assertRaises(xml.dom.NotFoundErr, child.removeAttributeNode,
None)
child.removeAttributeNode(node)
self.confirm(len(child.attributes) == 0
and child.getAttributeNode("spam") is None)
dom2 = Document()
child2 = dom2.appendChild(dom2.createElement("foo"))
node2 = child2.getAttributeNode("spam")
self.assertRaises(xml.dom.NotFoundErr, child2.removeAttributeNode,
node2)
dom.unlink()
def testHasAttribute(self):
dom = Document()
child = dom.appendChild(dom.createElement("foo"))
child.setAttribute("spam", "jam")
self.confirm(child.hasAttribute("spam"))
def testChangeAttr(self):
dom = parseString("<abc/>")
el = dom.documentElement
el.setAttribute("spam", "jam")
self.confirm(len(el.attributes) == 1)
el.setAttribute("spam", "bam")
# Set this attribute to be an ID and make sure that doesn't change
# when changing the value:
el.setIdAttribute("spam")
self.confirm(len(el.attributes) == 1
and el.attributes["spam"].value == "bam"
and el.attributes["spam"].nodeValue == "bam"
and el.getAttribute("spam") == "bam"
and el.getAttributeNode("spam").isId)
el.attributes["spam"] = "ham"
self.confirm(len(el.attributes) == 1
and el.attributes["spam"].value == "ham"
and el.attributes["spam"].nodeValue == "ham"
and el.getAttribute("spam") == "ham"
and el.attributes["spam"].isId)
el.setAttribute("spam2", "bam")
self.confirm(len(el.attributes) == 2
and el.attributes["spam"].value == "ham"
and el.attributes["spam"].nodeValue == "ham"
and el.getAttribute("spam") == "ham"
and el.attributes["spam2"].value == "bam"
and el.attributes["spam2"].nodeValue == "bam"
and el.getAttribute("spam2") == "bam")
el.attributes["spam2"] = "bam2"
self.confirm(len(el.attributes) == 2
and el.attributes["spam"].value == "ham"
and el.attributes["spam"].nodeValue == "ham"
and el.getAttribute("spam") == "ham"
and el.attributes["spam2"].value == "bam2"
and el.attributes["spam2"].nodeValue == "bam2"
and el.getAttribute("spam2") == "bam2")
dom.unlink()
def testGetAttrList(self):
pass
def testGetAttrValues(self):
pass
def testGetAttrLength(self):
pass
def testGetAttribute(self):
dom = Document()
child = dom.appendChild(
dom.createElementNS("http://www.python.org", "python:abc"))
self.assertEqual(child.getAttribute('missing'), '')
def testGetAttributeNS(self):
dom = Document()
child = dom.appendChild(
dom.createElementNS("http://www.python.org", "python:abc"))
child.setAttributeNS("http://www.w3.org", "xmlns:python",
"http://www.python.org")
self.assertEqual(child.getAttributeNS("http://www.w3.org", "python"),
'http://www.python.org')
self.assertEqual(child.getAttributeNS("http://www.w3.org", "other"),
'')
child2 = child.appendChild(dom.createElement('abc'))
self.assertEqual(child2.getAttributeNS("http://www.python.org", "missing"),
'')
def testGetAttributeNode(self): pass
def testGetElementsByTagNameNS(self):
d="""<foo xmlns:minidom='http://pyxml.sf.net/minidom'>
<minidom:myelem/>
</foo>"""
dom = parseString(d)
elems = dom.getElementsByTagNameNS("http://pyxml.sf.net/minidom",
"myelem")
self.confirm(len(elems) == 1
and elems[0].namespaceURI == "http://pyxml.sf.net/minidom"
and elems[0].localName == "myelem"
and elems[0].prefix == "minidom"
and elems[0].tagName == "minidom:myelem"
and elems[0].nodeName == "minidom:myelem")
dom.unlink()
def get_empty_nodelist_from_elements_by_tagName_ns_helper(self, doc, nsuri,
lname):
nodelist = doc.getElementsByTagNameNS(nsuri, lname)
self.confirm(len(nodelist) == 0)
def testGetEmptyNodeListFromElementsByTagNameNS(self):
doc = parseString('<doc/>')
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, 'http://xml.python.org/namespaces/a', 'localname')
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, '*', 'splat')
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, 'http://xml.python.org/namespaces/a', '*')
doc = parseString('<doc xmlns="http://xml.python.org/splat"><e/></doc>')
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, "http://xml.python.org/splat", "not-there")
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, "*", "not-there")
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, "http://somewhere.else.net/not-there", "e")
def testElementReprAndStr(self):
dom = Document()
el = dom.appendChild(dom.createElement("abc"))
string1 = repr(el)
string2 = str(el)
self.confirm(string1 == string2)
dom.unlink()
def testElementReprAndStrUnicode(self):
dom = Document()
el = dom.appendChild(dom.createElement("abc"))
string1 = repr(el)
string2 = str(el)
self.confirm(string1 == string2)
dom.unlink()
def testElementReprAndStrUnicodeNS(self):
dom = Document()
el = dom.appendChild(
dom.createElementNS("http://www.slashdot.org", "slash:abc"))
string1 = repr(el)
string2 = str(el)
self.confirm(string1 == string2)
self.confirm("slash:abc" in string1)
dom.unlink()
def testAttributeRepr(self):
dom = Document()
el = dom.appendChild(dom.createElement("abc"))
node = el.setAttribute("abc", "def")
self.confirm(str(node) == repr(node))
dom.unlink()
def testTextNodeRepr(self): pass
def testWriteXML(self):
str = '<?xml version="1.0" ?><a b="c"/>'
dom = parseString(str)
domstr = dom.toxml()
dom.unlink()
self.confirm(str == domstr)
def testAltNewline(self):
str = '<?xml version="1.0" ?>\n<a b="c"/>\n'
dom = parseString(str)
domstr = dom.toprettyxml(newl="\r\n")
dom.unlink()
self.confirm(domstr == str.replace("\n", "\r\n"))
def test_toprettyxml_with_text_nodes(self):
# see issue #4147, text nodes are not indented
decl = '<?xml version="1.0" ?>\n'
self.assertEqual(parseString('<B>A</B>').toprettyxml(),
decl + '<B>A</B>\n')
self.assertEqual(parseString('<C>A<B>A</B></C>').toprettyxml(),
decl + '<C>\n\tA\n\t<B>A</B>\n</C>\n')
self.assertEqual(parseString('<C><B>A</B>A</C>').toprettyxml(),
decl + '<C>\n\t<B>A</B>\n\tA\n</C>\n')
self.assertEqual(parseString('<C><B>A</B><B>A</B></C>').toprettyxml(),
decl + '<C>\n\t<B>A</B>\n\t<B>A</B>\n</C>\n')
self.assertEqual(parseString('<C><B>A</B>A<B>A</B></C>').toprettyxml(),
decl + '<C>\n\t<B>A</B>\n\tA\n\t<B>A</B>\n</C>\n')
def test_toprettyxml_with_adjacent_text_nodes(self):
# see issue #4147, adjacent text nodes are indented normally
dom = Document()
elem = dom.createElement('elem')
elem.appendChild(dom.createTextNode('TEXT'))
elem.appendChild(dom.createTextNode('TEXT'))
dom.appendChild(elem)
decl = '<?xml version="1.0" ?>\n'
self.assertEqual(dom.toprettyxml(),
decl + '<elem>\n\tTEXT\n\tTEXT\n</elem>\n')
def test_toprettyxml_preserves_content_of_text_node(self):
# see issue #4147
for str in ('<B>A</B>', '<A><B>C</B></A>'):
dom = parseString(str)
dom2 = parseString(dom.toprettyxml())
self.assertEqual(
dom.getElementsByTagName('B')[0].childNodes[0].toxml(),
dom2.getElementsByTagName('B')[0].childNodes[0].toxml())
def testProcessingInstruction(self):
dom = parseString('<e><?mypi \t\n data \t\n ?></e>')
pi = dom.documentElement.firstChild
self.confirm(pi.target == "mypi"
and pi.data == "data \t\n "
and pi.nodeName == "mypi"
and pi.nodeType == Node.PROCESSING_INSTRUCTION_NODE
and pi.attributes is None
and not pi.hasChildNodes()
and len(pi.childNodes) == 0
and pi.firstChild is None
and pi.lastChild is None
and pi.localName is None
and pi.namespaceURI == xml.dom.EMPTY_NAMESPACE)
def testProcessingInstructionRepr(self): pass
def testTextRepr(self): pass
def testWriteText(self): pass
def testDocumentElement(self): pass
def testTooManyDocumentElements(self):
doc = parseString("<doc/>")
elem = doc.createElement("extra")
# Should raise an exception when adding an extra document element.
self.assertRaises(xml.dom.HierarchyRequestErr, doc.appendChild, elem)
elem.unlink()
doc.unlink()
def testCreateElementNS(self): pass
def testCreateAttributeNS(self): pass
def testParse(self): pass
def testParseString(self): pass
def testComment(self): pass
def testAttrListItem(self): pass
def testAttrListItems(self): pass
def testAttrListItemNS(self): pass
def testAttrListKeys(self): pass
def testAttrListKeysNS(self): pass
def testRemoveNamedItem(self):
doc = parseString("<doc a=''/>")
e = doc.documentElement
attrs = e.attributes
a1 = e.getAttributeNode("a")
a2 = attrs.removeNamedItem("a")
self.confirm(a1.isSameNode(a2))
self.assertRaises(xml.dom.NotFoundErr, attrs.removeNamedItem, "a")
def testRemoveNamedItemNS(self):
doc = parseString("<doc xmlns:a='http://xml.python.org/' a:b=''/>")
e = doc.documentElement
attrs = e.attributes
a1 = e.getAttributeNodeNS("http://xml.python.org/", "b")
a2 = attrs.removeNamedItemNS("http://xml.python.org/", "b")
self.confirm(a1.isSameNode(a2))
self.assertRaises(xml.dom.NotFoundErr, attrs.removeNamedItemNS,
"http://xml.python.org/", "b")
def testAttrListValues(self): pass
def testAttrListLength(self): pass
def testAttrList__getitem__(self): pass
def testAttrList__setitem__(self): pass
def testSetAttrValueandNodeValue(self): pass
def testParseElement(self): pass
def testParseAttributes(self): pass
def testParseElementNamespaces(self): pass
def testParseAttributeNamespaces(self): pass
def testParseProcessingInstructions(self): pass
def testChildNodes(self): pass
def testFirstChild(self): pass
def testHasChildNodes(self):
dom = parseString("<doc><foo/></doc>")
doc = dom.documentElement
self.assertTrue(doc.hasChildNodes())
dom2 = parseString("<doc/>")
doc2 = dom2.documentElement
self.assertFalse(doc2.hasChildNodes())
def _testCloneElementCopiesAttributes(self, e1, e2, test):
attrs1 = e1.attributes
attrs2 = e2.attributes
keys1 = list(attrs1.keys())
keys2 = list(attrs2.keys())
keys1.sort()
keys2.sort()
self.confirm(keys1 == keys2, "clone of element has same attribute keys")
for i in range(len(keys1)):
a1 = attrs1.item(i)
a2 = attrs2.item(i)
self.confirm(a1 is not a2
and a1.value == a2.value
and a1.nodeValue == a2.nodeValue
and a1.namespaceURI == a2.namespaceURI
and a1.localName == a2.localName
, "clone of attribute node has proper attribute values")
self.confirm(a2.ownerElement is e2,
"clone of attribute node correctly owned")
def _setupCloneElement(self, deep):
dom = parseString("<doc attr='value'><foo/></doc>")
root = dom.documentElement
clone = root.cloneNode(deep)
self._testCloneElementCopiesAttributes(
root, clone, "testCloneElement" + (deep and "Deep" or "Shallow"))
# mutilate the original so shared data is detected
root.tagName = root.nodeName = "MODIFIED"
root.setAttribute("attr", "NEW VALUE")
root.setAttribute("added", "VALUE")
return dom, clone
def testCloneElementShallow(self):
dom, clone = self._setupCloneElement(0)
self.confirm(len(clone.childNodes) == 0
and clone.childNodes.length == 0
and clone.parentNode is None
and clone.toxml() == '<doc attr="value"/>'
, "testCloneElementShallow")
dom.unlink()
def testCloneElementDeep(self):
dom, clone = self._setupCloneElement(1)
self.confirm(len(clone.childNodes) == 1
and clone.childNodes.length == 1
and clone.parentNode is None
and clone.toxml() == '<doc attr="value"><foo/></doc>'
, "testCloneElementDeep")
dom.unlink()
def testCloneDocumentShallow(self):
doc = parseString("<?xml version='1.0'?>\n"
"<!-- comment -->"
"<!DOCTYPE doc [\n"
"<!NOTATION notation SYSTEM 'http://xml.python.org/'>\n"
"]>\n"
"<doc attr='value'/>")
doc2 = doc.cloneNode(0)
self.confirm(doc2 is None,
"testCloneDocumentShallow:"
" shallow cloning of documents makes no sense!")
def testCloneDocumentDeep(self):
doc = parseString("<?xml version='1.0'?>\n"
"<!-- comment -->"
"<!DOCTYPE doc [\n"
"<!NOTATION notation SYSTEM 'http://xml.python.org/'>\n"
"]>\n"
"<doc attr='value'/>")
doc2 = doc.cloneNode(1)
self.confirm(not (doc.isSameNode(doc2) or doc2.isSameNode(doc)),
"testCloneDocumentDeep: document objects not distinct")
self.confirm(len(doc.childNodes) == len(doc2.childNodes),
"testCloneDocumentDeep: wrong number of Document children")
self.confirm(doc2.documentElement.nodeType == Node.ELEMENT_NODE,
"testCloneDocumentDeep: documentElement not an ELEMENT_NODE")
self.confirm(doc2.documentElement.ownerDocument.isSameNode(doc2),
"testCloneDocumentDeep: documentElement owner is not new document")
self.confirm(not doc.documentElement.isSameNode(doc2.documentElement),
"testCloneDocumentDeep: documentElement should not be shared")
if doc.doctype is not None:
# check the doctype iff the original DOM maintained it
self.confirm(doc2.doctype.nodeType == Node.DOCUMENT_TYPE_NODE,
"testCloneDocumentDeep: doctype not a DOCUMENT_TYPE_NODE")
self.confirm(doc2.doctype.ownerDocument.isSameNode(doc2))
self.confirm(not doc.doctype.isSameNode(doc2.doctype))
def testCloneDocumentTypeDeepOk(self):
doctype = create_nonempty_doctype()
clone = doctype.cloneNode(1)
self.confirm(clone is not None
and clone.nodeName == doctype.nodeName
and clone.name == doctype.name
and clone.publicId == doctype.publicId
and clone.systemId == doctype.systemId
and len(clone.entities) == len(doctype.entities)
and clone.entities.item(len(clone.entities)) is None
and len(clone.notations) == len(doctype.notations)
and clone.notations.item(len(clone.notations)) is None
and len(clone.childNodes) == 0)
for i in range(len(doctype.entities)):
se = doctype.entities.item(i)
ce = clone.entities.item(i)
self.confirm((not se.isSameNode(ce))
and (not ce.isSameNode(se))
and ce.nodeName == se.nodeName
and ce.notationName == se.notationName
and ce.publicId == se.publicId
and ce.systemId == se.systemId
and ce.encoding == se.encoding
and ce.actualEncoding == se.actualEncoding
and ce.version == se.version)
for i in range(len(doctype.notations)):
sn = doctype.notations.item(i)
cn = clone.notations.item(i)
self.confirm((not sn.isSameNode(cn))
and (not cn.isSameNode(sn))
and cn.nodeName == sn.nodeName
and cn.publicId == sn.publicId
and cn.systemId == sn.systemId)
def testCloneDocumentTypeDeepNotOk(self):
doc = create_doc_with_doctype()
clone = doc.doctype.cloneNode(1)
self.confirm(clone is None, "testCloneDocumentTypeDeepNotOk")
def testCloneDocumentTypeShallowOk(self):
doctype = create_nonempty_doctype()
clone = doctype.cloneNode(0)
self.confirm(clone is not None
and clone.nodeName == doctype.nodeName
and clone.name == doctype.name
and clone.publicId == doctype.publicId
and clone.systemId == doctype.systemId
and len(clone.entities) == 0
and clone.entities.item(0) is None
and len(clone.notations) == 0
and clone.notations.item(0) is None
and len(clone.childNodes) == 0)
def testCloneDocumentTypeShallowNotOk(self):
doc = create_doc_with_doctype()
clone = doc.doctype.cloneNode(0)
self.confirm(clone is None, "testCloneDocumentTypeShallowNotOk")
def check_import_document(self, deep, testName):
doc1 = parseString("<doc/>")
doc2 = parseString("<doc/>")
self.assertRaises(xml.dom.NotSupportedErr, doc1.importNode, doc2, deep)
def testImportDocumentShallow(self):
self.check_import_document(0, "testImportDocumentShallow")
def testImportDocumentDeep(self):
self.check_import_document(1, "testImportDocumentDeep")
def testImportDocumentTypeShallow(self):
src = create_doc_with_doctype()
target = create_doc_without_doctype()
self.assertRaises(xml.dom.NotSupportedErr, target.importNode,
src.doctype, 0)
def testImportDocumentTypeDeep(self):
src = create_doc_with_doctype()
target = create_doc_without_doctype()
self.assertRaises(xml.dom.NotSupportedErr, target.importNode,
src.doctype, 1)
# Testing attribute clones uses a helper, and should always be deep,
# even if the argument to cloneNode is false.
def check_clone_attribute(self, deep, testName):
doc = parseString("<doc attr='value'/>")
attr = doc.documentElement.getAttributeNode("attr")
self.assertNotEqual(attr, None)
clone = attr.cloneNode(deep)
self.confirm(not clone.isSameNode(attr))
self.confirm(not attr.isSameNode(clone))
self.confirm(clone.ownerElement is None,
testName + ": ownerElement should be None")
self.confirm(clone.ownerDocument.isSameNode(attr.ownerDocument),
testName + ": ownerDocument does not match")
self.confirm(clone.specified,
testName + ": cloned attribute must have specified == True")
def testCloneAttributeShallow(self):
self.check_clone_attribute(0, "testCloneAttributeShallow")
def testCloneAttributeDeep(self):
self.check_clone_attribute(1, "testCloneAttributeDeep")
def check_clone_pi(self, deep, testName):
doc = parseString("<?target data?><doc/>")
pi = doc.firstChild
self.assertEqual(pi.nodeType, Node.PROCESSING_INSTRUCTION_NODE)
clone = pi.cloneNode(deep)
self.confirm(clone.target == pi.target
and clone.data == pi.data)
def testClonePIShallow(self):
self.check_clone_pi(0, "testClonePIShallow")
def testClonePIDeep(self):
self.check_clone_pi(1, "testClonePIDeep")
def testNormalize(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode("first"))
root.appendChild(doc.createTextNode("second"))
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2,
"testNormalize -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 1
and root.childNodes.length == 1
and root.firstChild is root.lastChild
and root.firstChild.data == "firstsecond"
, "testNormalize -- result")
doc.unlink()
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode(""))
doc.normalize()
self.confirm(len(root.childNodes) == 0
and root.childNodes.length == 0,
"testNormalize -- single empty node removed")
doc.unlink()
def testNormalizeCombineAndNextSibling(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode("first"))
root.appendChild(doc.createTextNode("second"))
root.appendChild(doc.createElement("i"))
self.confirm(len(root.childNodes) == 3
and root.childNodes.length == 3,
"testNormalizeCombineAndNextSibling -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2
and root.firstChild.data == "firstsecond"
and root.firstChild is not root.lastChild
and root.firstChild.nextSibling is root.lastChild
and root.firstChild.previousSibling is None
and root.lastChild.previousSibling is root.firstChild
and root.lastChild.nextSibling is None
, "testNormalizeCombinedAndNextSibling -- result")
doc.unlink()
def testNormalizeDeleteWithPrevSibling(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode("first"))
root.appendChild(doc.createTextNode(""))
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2,
"testNormalizeDeleteWithPrevSibling -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 1
and root.childNodes.length == 1
and root.firstChild.data == "first"
and root.firstChild is root.lastChild
and root.firstChild.nextSibling is None
and root.firstChild.previousSibling is None
, "testNormalizeDeleteWithPrevSibling -- result")
doc.unlink()
def testNormalizeDeleteWithNextSibling(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode(""))
root.appendChild(doc.createTextNode("second"))
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2,
"testNormalizeDeleteWithNextSibling -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 1
and root.childNodes.length == 1
and root.firstChild.data == "second"
and root.firstChild is root.lastChild
and root.firstChild.nextSibling is None
and root.firstChild.previousSibling is None
, "testNormalizeDeleteWithNextSibling -- result")
doc.unlink()
def testNormalizeDeleteWithTwoNonTextSiblings(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createElement("i"))
root.appendChild(doc.createTextNode(""))
root.appendChild(doc.createElement("i"))
self.confirm(len(root.childNodes) == 3
and root.childNodes.length == 3,
"testNormalizeDeleteWithTwoSiblings -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2
and root.firstChild is not root.lastChild
and root.firstChild.nextSibling is root.lastChild
and root.firstChild.previousSibling is None
and root.lastChild.previousSibling is root.firstChild
and root.lastChild.nextSibling is None
, "testNormalizeDeleteWithTwoSiblings -- result")
doc.unlink()
def testNormalizeDeleteAndCombine(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode(""))
root.appendChild(doc.createTextNode("second"))
root.appendChild(doc.createTextNode(""))
root.appendChild(doc.createTextNode("fourth"))
root.appendChild(doc.createTextNode(""))
self.confirm(len(root.childNodes) == 5
and root.childNodes.length == 5,
"testNormalizeDeleteAndCombine -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 1
and root.childNodes.length == 1
and root.firstChild is root.lastChild
and root.firstChild.data == "secondfourth"
and root.firstChild.previousSibling is None
and root.firstChild.nextSibling is None
, "testNormalizeDeleteAndCombine -- result")
doc.unlink()
def testNormalizeRecursion(self):
doc = parseString("<doc>"
"<o>"
"<i/>"
"t"
#
#x
"</o>"
"<o>"
"<o>"
"t2"
#x2
"</o>"
"t3"
#x3
"</o>"
#
"</doc>")
root = doc.documentElement
root.childNodes[0].appendChild(doc.createTextNode(""))
root.childNodes[0].appendChild(doc.createTextNode("x"))
root.childNodes[1].childNodes[0].appendChild(doc.createTextNode("x2"))
root.childNodes[1].appendChild(doc.createTextNode("x3"))
root.appendChild(doc.createTextNode(""))
self.confirm(len(root.childNodes) == 3
and root.childNodes.length == 3
and len(root.childNodes[0].childNodes) == 4
and root.childNodes[0].childNodes.length == 4
and len(root.childNodes[1].childNodes) == 3
and root.childNodes[1].childNodes.length == 3
and len(root.childNodes[1].childNodes[0].childNodes) == 2
and root.childNodes[1].childNodes[0].childNodes.length == 2
, "testNormalize2 -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2
and len(root.childNodes[0].childNodes) == 2
and root.childNodes[0].childNodes.length == 2
and len(root.childNodes[1].childNodes) == 2
and root.childNodes[1].childNodes.length == 2
and len(root.childNodes[1].childNodes[0].childNodes) == 1
and root.childNodes[1].childNodes[0].childNodes.length == 1
, "testNormalize2 -- childNodes lengths")
self.confirm(root.childNodes[0].childNodes[1].data == "tx"
and root.childNodes[1].childNodes[0].childNodes[0].data == "t2x2"
and root.childNodes[1].childNodes[1].data == "t3x3"
, "testNormalize2 -- joined text fields")
self.confirm(root.childNodes[0].childNodes[1].nextSibling is None
and root.childNodes[0].childNodes[1].previousSibling
is root.childNodes[0].childNodes[0]
and root.childNodes[0].childNodes[0].previousSibling is None
and root.childNodes[0].childNodes[0].nextSibling
is root.childNodes[0].childNodes[1]
and root.childNodes[1].childNodes[1].nextSibling is None
and root.childNodes[1].childNodes[1].previousSibling
is root.childNodes[1].childNodes[0]
and root.childNodes[1].childNodes[0].previousSibling is None
and root.childNodes[1].childNodes[0].nextSibling
is root.childNodes[1].childNodes[1]
, "testNormalize2 -- sibling pointers")
doc.unlink()
def testBug0777884(self):
doc = parseString("<o>text</o>")
text = doc.documentElement.childNodes[0]
self.assertEqual(text.nodeType, Node.TEXT_NODE)
# Should run quietly, doing nothing.
text.normalize()
doc.unlink()
def testBug1433694(self):
doc = parseString("<o><i/>t</o>")
node = doc.documentElement
node.childNodes[1].nodeValue = ""
node.normalize()
self.confirm(node.childNodes[-1].nextSibling is None,
"Final child's .nextSibling should be None")
def testSiblings(self):
doc = parseString("<doc><?pi?>text?<elm/></doc>")
root = doc.documentElement
(pi, text, elm) = root.childNodes
self.confirm(pi.nextSibling is text and
pi.previousSibling is None and
text.nextSibling is elm and
text.previousSibling is pi and
elm.nextSibling is None and
elm.previousSibling is text, "testSiblings")
doc.unlink()
def testParents(self):
doc = parseString(
"<doc><elm1><elm2/><elm2><elm3/></elm2></elm1></doc>")
root = doc.documentElement
elm1 = root.childNodes[0]
(elm2a, elm2b) = elm1.childNodes
elm3 = elm2b.childNodes[0]
self.confirm(root.parentNode is doc and
elm1.parentNode is root and
elm2a.parentNode is elm1 and
elm2b.parentNode is elm1 and
elm3.parentNode is elm2b, "testParents")
doc.unlink()
def testNodeListItem(self):
doc = parseString("<doc><e/><e/></doc>")
children = doc.childNodes
docelem = children[0]
self.confirm(children[0] is children.item(0)
and children.item(1) is None
and docelem.childNodes.item(0) is docelem.childNodes[0]
and docelem.childNodes.item(1) is docelem.childNodes[1]
and docelem.childNodes.item(0).childNodes.item(0) is None,
"test NodeList.item()")
doc.unlink()
def testEncodings(self):
doc = parseString('<foo>€</foo>')
self.assertEqual(doc.toxml(),
'<?xml version="1.0" ?><foo>\u20ac</foo>')
self.assertEqual(doc.toxml('utf-8'),
b'<?xml version="1.0" encoding="utf-8"?><foo>\xe2\x82\xac</foo>')
self.assertEqual(doc.toxml('iso-8859-15'),
b'<?xml version="1.0" encoding="iso-8859-15"?><foo>\xa4</foo>')
self.assertEqual(doc.toxml('us-ascii'),
b'<?xml version="1.0" encoding="us-ascii"?><foo>€</foo>')
self.assertEqual(doc.toxml('utf-16'),
'<?xml version="1.0" encoding="utf-16"?>'
'<foo>\u20ac</foo>'.encode('utf-16'))
# Verify that character decoding errors raise exceptions instead
# of crashing
self.assertRaises(UnicodeDecodeError, parseString,
b'<fran\xe7ais>Comment \xe7a va ? Tr\xe8s bien ?</fran\xe7ais>')
doc.unlink()
class UserDataHandler:
called = 0
def handle(self, operation, key, data, src, dst):
dst.setUserData(key, data + 1, self)
src.setUserData(key, None, None)
self.called = 1
def testUserData(self):
dom = Document()
n = dom.createElement('e')
self.confirm(n.getUserData("foo") is None)
n.setUserData("foo", None, None)
self.confirm(n.getUserData("foo") is None)
n.setUserData("foo", 12, 12)
n.setUserData("bar", 13, 13)
self.confirm(n.getUserData("foo") == 12)
self.confirm(n.getUserData("bar") == 13)
n.setUserData("foo", None, None)
self.confirm(n.getUserData("foo") is None)
self.confirm(n.getUserData("bar") == 13)
handler = self.UserDataHandler()
n.setUserData("bar", 12, handler)
c = n.cloneNode(1)
self.confirm(handler.called
and n.getUserData("bar") is None
and c.getUserData("bar") == 13)
n.unlink()
c.unlink()
dom.unlink()
def checkRenameNodeSharedConstraints(self, doc, node):
# Make sure illegal NS usage is detected:
self.assertRaises(xml.dom.NamespaceErr, doc.renameNode, node,
"http://xml.python.org/ns", "xmlns:foo")
doc2 = parseString("<doc/>")
self.assertRaises(xml.dom.WrongDocumentErr, doc2.renameNode, node,
xml.dom.EMPTY_NAMESPACE, "foo")
def testRenameAttribute(self):
doc = parseString("<doc a='v'/>")
elem = doc.documentElement
attrmap = elem.attributes
attr = elem.attributes['a']
# Simple renaming
attr = doc.renameNode(attr, xml.dom.EMPTY_NAMESPACE, "b")
self.confirm(attr.name == "b"
and attr.nodeName == "b"
and attr.localName is None
and attr.namespaceURI == xml.dom.EMPTY_NAMESPACE
and attr.prefix is None
and attr.value == "v"
and elem.getAttributeNode("a") is None
and elem.getAttributeNode("b").isSameNode(attr)
and attrmap["b"].isSameNode(attr)
and attr.ownerDocument.isSameNode(doc)
and attr.ownerElement.isSameNode(elem))
# Rename to have a namespace, no prefix
attr = doc.renameNode(attr, "http://xml.python.org/ns", "c")
self.confirm(attr.name == "c"
and attr.nodeName == "c"
and attr.localName == "c"
and attr.namespaceURI == "http://xml.python.org/ns"
and attr.prefix is None
and attr.value == "v"
and elem.getAttributeNode("a") is None
and elem.getAttributeNode("b") is None
and elem.getAttributeNode("c").isSameNode(attr)
and elem.getAttributeNodeNS(
"http://xml.python.org/ns", "c").isSameNode(attr)
and attrmap["c"].isSameNode(attr)
and attrmap[("http://xml.python.org/ns", "c")].isSameNode(attr))
# Rename to have a namespace, with prefix
attr = doc.renameNode(attr, "http://xml.python.org/ns2", "p:d")
self.confirm(attr.name == "p:d"
and attr.nodeName == "p:d"
and attr.localName == "d"
and attr.namespaceURI == "http://xml.python.org/ns2"
and attr.prefix == "p"
and attr.value == "v"
and elem.getAttributeNode("a") is None
and elem.getAttributeNode("b") is None
and elem.getAttributeNode("c") is None
and elem.getAttributeNodeNS(
"http://xml.python.org/ns", "c") is None
and elem.getAttributeNode("p:d").isSameNode(attr)
and elem.getAttributeNodeNS(
"http://xml.python.org/ns2", "d").isSameNode(attr)
and attrmap["p:d"].isSameNode(attr)
and attrmap[("http://xml.python.org/ns2", "d")].isSameNode(attr))
# Rename back to a simple non-NS node
attr = doc.renameNode(attr, xml.dom.EMPTY_NAMESPACE, "e")
self.confirm(attr.name == "e"
and attr.nodeName == "e"
and attr.localName is None
and attr.namespaceURI == xml.dom.EMPTY_NAMESPACE
and attr.prefix is None
and attr.value == "v"
and elem.getAttributeNode("a") is None
and elem.getAttributeNode("b") is None
and elem.getAttributeNode("c") is None
and elem.getAttributeNode("p:d") is None
and elem.getAttributeNodeNS(
"http://xml.python.org/ns", "c") is None
and elem.getAttributeNode("e").isSameNode(attr)
and attrmap["e"].isSameNode(attr))
self.assertRaises(xml.dom.NamespaceErr, doc.renameNode, attr,
"http://xml.python.org/ns", "xmlns")
self.checkRenameNodeSharedConstraints(doc, attr)
doc.unlink()
def testRenameElement(self):
doc = parseString("<doc/>")
elem = doc.documentElement
# Simple renaming
elem = doc.renameNode(elem, xml.dom.EMPTY_NAMESPACE, "a")
self.confirm(elem.tagName == "a"
and elem.nodeName == "a"
and elem.localName is None
and elem.namespaceURI == xml.dom.EMPTY_NAMESPACE
and elem.prefix is None
and elem.ownerDocument.isSameNode(doc))
# Rename to have a namespace, no prefix
elem = doc.renameNode(elem, "http://xml.python.org/ns", "b")
self.confirm(elem.tagName == "b"
and elem.nodeName == "b"
and elem.localName == "b"
and elem.namespaceURI == "http://xml.python.org/ns"
and elem.prefix is None
and elem.ownerDocument.isSameNode(doc))
# Rename to have a namespace, with prefix
elem = doc.renameNode(elem, "http://xml.python.org/ns2", "p:c")
self.confirm(elem.tagName == "p:c"
and elem.nodeName == "p:c"
and elem.localName == "c"
and elem.namespaceURI == "http://xml.python.org/ns2"
and elem.prefix == "p"
and elem.ownerDocument.isSameNode(doc))
# Rename back to a simple non-NS node
elem = doc.renameNode(elem, xml.dom.EMPTY_NAMESPACE, "d")
self.confirm(elem.tagName == "d"
and elem.nodeName == "d"
and elem.localName is None
and elem.namespaceURI == xml.dom.EMPTY_NAMESPACE
and elem.prefix is None
and elem.ownerDocument.isSameNode(doc))
self.checkRenameNodeSharedConstraints(doc, elem)
doc.unlink()
def testRenameOther(self):
# We have to create a comment node explicitly since not all DOM
# builders used with minidom add comments to the DOM.
doc = xml.dom.minidom.getDOMImplementation().createDocument(
xml.dom.EMPTY_NAMESPACE, "e", None)
node = doc.createComment("comment")
self.assertRaises(xml.dom.NotSupportedErr, doc.renameNode, node,
xml.dom.EMPTY_NAMESPACE, "foo")
doc.unlink()
def testWholeText(self):
doc = parseString("<doc>a</doc>")
elem = doc.documentElement
text = elem.childNodes[0]
self.assertEqual(text.nodeType, Node.TEXT_NODE)
self.checkWholeText(text, "a")
elem.appendChild(doc.createTextNode("b"))
self.checkWholeText(text, "ab")
elem.insertBefore(doc.createCDATASection("c"), text)
self.checkWholeText(text, "cab")
# make sure we don't cross other nodes
splitter = doc.createComment("comment")
elem.appendChild(splitter)
text2 = doc.createTextNode("d")
elem.appendChild(text2)
self.checkWholeText(text, "cab")
self.checkWholeText(text2, "d")
x = doc.createElement("x")
elem.replaceChild(x, splitter)
splitter = x
self.checkWholeText(text, "cab")
self.checkWholeText(text2, "d")
x = doc.createProcessingInstruction("y", "z")
elem.replaceChild(x, splitter)
splitter = x
self.checkWholeText(text, "cab")
self.checkWholeText(text2, "d")
elem.removeChild(splitter)
self.checkWholeText(text, "cabd")
self.checkWholeText(text2, "cabd")
def testPatch1094164(self):
doc = parseString("<doc><e/></doc>")
elem = doc.documentElement
e = elem.firstChild
self.confirm(e.parentNode is elem, "Before replaceChild()")
# Check that replacing a child with itself leaves the tree unchanged
elem.replaceChild(e, e)
self.confirm(e.parentNode is elem, "After replaceChild()")
def testReplaceWholeText(self):
def setup():
doc = parseString("<doc>a<e/>d</doc>")
elem = doc.documentElement
text1 = elem.firstChild
text2 = elem.lastChild
splitter = text1.nextSibling
elem.insertBefore(doc.createTextNode("b"), splitter)
elem.insertBefore(doc.createCDATASection("c"), text1)
return doc, elem, text1, splitter, text2
doc, elem, text1, splitter, text2 = setup()
text = text1.replaceWholeText("new content")
self.checkWholeText(text, "new content")
self.checkWholeText(text2, "d")
self.confirm(len(elem.childNodes) == 3)
doc, elem, text1, splitter, text2 = setup()
text = text2.replaceWholeText("new content")
self.checkWholeText(text, "new content")
self.checkWholeText(text1, "cab")
self.confirm(len(elem.childNodes) == 5)
doc, elem, text1, splitter, text2 = setup()
text = text1.replaceWholeText("")
self.checkWholeText(text2, "d")
self.confirm(text is None
and len(elem.childNodes) == 2)
def testSchemaType(self):
doc = parseString(
"<!DOCTYPE doc [\n"
" <!ENTITY e1 SYSTEM 'http://xml.python.org/e1'>\n"
" <!ENTITY e2 SYSTEM 'http://xml.python.org/e2'>\n"
" <!ATTLIST doc id ID #IMPLIED \n"
" ref IDREF #IMPLIED \n"
" refs IDREFS #IMPLIED \n"
" enum (a|b) #IMPLIED \n"
" ent ENTITY #IMPLIED \n"
" ents ENTITIES #IMPLIED \n"
" nm NMTOKEN #IMPLIED \n"
" nms NMTOKENS #IMPLIED \n"
" text CDATA #IMPLIED \n"
" >\n"
"]><doc id='name' notid='name' text='splat!' enum='b'"
" ref='name' refs='name name' ent='e1' ents='e1 e2'"
" nm='123' nms='123 abc' />")
elem = doc.documentElement
# We don't want to rely on any specific loader at this point, so
# just make sure we can get to all the names, and that the
# DTD-based namespace is right. The names can vary by loader
# since each supports a different level of DTD information.
t = elem.schemaType
self.confirm(t.name is None
and t.namespace == xml.dom.EMPTY_NAMESPACE)
names = "id notid text enum ref refs ent ents nm nms".split()
for name in names:
a = elem.getAttributeNode(name)
t = a.schemaType
self.confirm(hasattr(t, "name")
and t.namespace == xml.dom.EMPTY_NAMESPACE)
def testSetIdAttribute(self):
doc = parseString("<doc a1='v' a2='w'/>")
e = doc.documentElement
a1 = e.getAttributeNode("a1")
a2 = e.getAttributeNode("a2")
self.confirm(doc.getElementById("v") is None
and not a1.isId
and not a2.isId)
e.setIdAttribute("a1")
self.confirm(e.isSameNode(doc.getElementById("v"))
and a1.isId
and not a2.isId)
e.setIdAttribute("a2")
self.confirm(e.isSameNode(doc.getElementById("v"))
and e.isSameNode(doc.getElementById("w"))
and a1.isId
and a2.isId)
# replace the a1 node; the new node should *not* be an ID
a3 = doc.createAttribute("a1")
a3.value = "v"
e.setAttributeNode(a3)
self.confirm(doc.getElementById("v") is None
and e.isSameNode(doc.getElementById("w"))
and not a1.isId
and a2.isId
and not a3.isId)
# renaming an attribute should not affect its ID-ness:
doc.renameNode(a2, xml.dom.EMPTY_NAMESPACE, "an")
self.confirm(e.isSameNode(doc.getElementById("w"))
and a2.isId)
def testSetIdAttributeNS(self):
NS1 = "http://xml.python.org/ns1"
NS2 = "http://xml.python.org/ns2"
doc = parseString("<doc"
" xmlns:ns1='" + NS1 + "'"
" xmlns:ns2='" + NS2 + "'"
" ns1:a1='v' ns2:a2='w'/>")
e = doc.documentElement
a1 = e.getAttributeNodeNS(NS1, "a1")
a2 = e.getAttributeNodeNS(NS2, "a2")
self.confirm(doc.getElementById("v") is None
and not a1.isId
and not a2.isId)
e.setIdAttributeNS(NS1, "a1")
self.confirm(e.isSameNode(doc.getElementById("v"))
and a1.isId
and not a2.isId)
e.setIdAttributeNS(NS2, "a2")
self.confirm(e.isSameNode(doc.getElementById("v"))
and e.isSameNode(doc.getElementById("w"))
and a1.isId
and a2.isId)
# replace the a1 node; the new node should *not* be an ID
a3 = doc.createAttributeNS(NS1, "a1")
a3.value = "v"
e.setAttributeNode(a3)
self.confirm(e.isSameNode(doc.getElementById("w")))
self.confirm(not a1.isId)
self.confirm(a2.isId)
self.confirm(not a3.isId)
self.confirm(doc.getElementById("v") is None)
# renaming an attribute should not affect its ID-ness:
doc.renameNode(a2, xml.dom.EMPTY_NAMESPACE, "an")
self.confirm(e.isSameNode(doc.getElementById("w"))
and a2.isId)
def testSetIdAttributeNode(self):
NS1 = "http://xml.python.org/ns1"
NS2 = "http://xml.python.org/ns2"
doc = parseString("<doc"
" xmlns:ns1='" + NS1 + "'"
" xmlns:ns2='" + NS2 + "'"
" ns1:a1='v' ns2:a2='w'/>")
e = doc.documentElement
a1 = e.getAttributeNodeNS(NS1, "a1")
a2 = e.getAttributeNodeNS(NS2, "a2")
self.confirm(doc.getElementById("v") is None
and not a1.isId
and not a2.isId)
e.setIdAttributeNode(a1)
self.confirm(e.isSameNode(doc.getElementById("v"))
and a1.isId
and not a2.isId)
e.setIdAttributeNode(a2)
self.confirm(e.isSameNode(doc.getElementById("v"))
and e.isSameNode(doc.getElementById("w"))
and a1.isId
and a2.isId)
# replace the a1 node; the new node should *not* be an ID
a3 = doc.createAttributeNS(NS1, "a1")
a3.value = "v"
e.setAttributeNode(a3)
self.confirm(e.isSameNode(doc.getElementById("w")))
self.confirm(not a1.isId)
self.confirm(a2.isId)
self.confirm(not a3.isId)
self.confirm(doc.getElementById("v") is None)
# renaming an attribute should not affect its ID-ness:
doc.renameNode(a2, xml.dom.EMPTY_NAMESPACE, "an")
self.confirm(e.isSameNode(doc.getElementById("w"))
and a2.isId)
def testPickledDocument(self):
doc = parseString("<?xml version='1.0' encoding='us-ascii'?>\n"
"<!DOCTYPE doc PUBLIC 'http://xml.python.org/public'"
" 'http://xml.python.org/system' [\n"
" <!ELEMENT e EMPTY>\n"
" <!ENTITY ent SYSTEM 'http://xml.python.org/entity'>\n"
"]><doc attr='value'> text\n"
"<?pi sample?> <!-- comment --> <e/> </doc>")
s = pickle.dumps(doc)
doc2 = pickle.loads(s)
stack = [(doc, doc2)]
while stack:
n1, n2 = stack.pop()
self.confirm(n1.nodeType == n2.nodeType
and len(n1.childNodes) == len(n2.childNodes)
and n1.nodeName == n2.nodeName
and not n1.isSameNode(n2)
and not n2.isSameNode(n1))
if n1.nodeType == Node.DOCUMENT_TYPE_NODE:
len(n1.entities)
len(n2.entities)
len(n1.notations)
len(n2.notations)
self.confirm(len(n1.entities) == len(n2.entities)
and len(n1.notations) == len(n2.notations))
for i in range(len(n1.notations)):
# XXX this loop body doesn't seem to be executed?
no1 = n1.notations.item(i)
no2 = n1.notations.item(i)
self.confirm(no1.name == no2.name
and no1.publicId == no2.publicId
and no1.systemId == no2.systemId)
stack.append((no1, no2))
for i in range(len(n1.entities)):
e1 = n1.entities.item(i)
e2 = n2.entities.item(i)
self.confirm(e1.notationName == e2.notationName
and e1.publicId == e2.publicId
and e1.systemId == e2.systemId)
stack.append((e1, e2))
if n1.nodeType != Node.DOCUMENT_NODE:
self.confirm(n1.ownerDocument.isSameNode(doc)
and n2.ownerDocument.isSameNode(doc2))
for i in range(len(n1.childNodes)):
stack.append((n1.childNodes[i], n2.childNodes[i]))
def testSerializeCommentNodeWithDoubleHyphen(self):
doc = create_doc_without_doctype()
doc.appendChild(doc.createComment("foo--bar"))
self.assertRaises(ValueError, doc.toxml)
def testEmptyXMLNSValue(self):
doc = parseString("<element xmlns=''>\n"
"<foo/>\n</element>")
doc2 = parseString(doc.toxml())
self.confirm(doc2.namespaceURI == xml.dom.EMPTY_NAMESPACE)
def testExceptionOnSpacesInXMLNSValue(self):
with self.assertRaisesRegex(ValueError, 'Unsupported syntax'):
parseString('<element xmlns:abc="http:abc.com/de f g/hi/j k"><abc:foo /></element>')
def testDocRemoveChild(self):
doc = parse(tstfile)
title_tag = doc.documentElement.getElementsByTagName("TITLE")[0]
self.assertRaises( xml.dom.NotFoundErr, doc.removeChild, title_tag)
num_children_before = len(doc.childNodes)
doc.removeChild(doc.childNodes[0])
num_children_after = len(doc.childNodes)
self.assertTrue(num_children_after == num_children_before - 1)
def testProcessingInstructionNameError(self):
# wrong variable in .nodeValue property will
# lead to "NameError: name 'data' is not defined"
doc = parse(tstfile)
pi = doc.createProcessingInstruction("y", "z")
pi.nodeValue = "crash"
def test_main():
run_unittest(MinidomTest)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
datagrok/python-misc | datagrok/math/stats.py | 1 | 1192 | """Utilities for statistics"""
def sorted(xs):
"""Return a sorted copy of the list xs"""
_xs = list(xs)
_xs.sort()
return _xs
def stemleaf(ns):
"""Given a list of integers ns, print a stem-and-leaf display."""
return _stemleaf(sorted(ns))
def dsd(ns):
"""Given a list of integers ns, print a double-stem display."""
return _dsd(sorted(ns))
def fsd(ns):
"""Given a list of integers ns, print a five-stem display."""
return _fsd(sorted(ns))
def _stemleaf(ns):
"""Given a sorted list of integers ns, print a stem-and-leaf display."""
for q in range(10*(min(ns)/10), 10*(max(ns)/10+1), 10):
print "%d|%s" % (q/10, ''.join([str(x % 10) for x in ns if x<q+10 and x>=q]))
def _dsd(ns):
"""Given a sorted list of integers ns, print a double-stem display."""
for q in range(10*(min(ns)/10), 10*(max(ns)/10+1), 5):
print "%d|%s" % (q/10, ''.join([str(x % 10) for x in ns if x<q+5 and x>=q]))
def _fsd(ns):
"""Given a sorted list of integers ns, print a five-stem display."""
for q in range(10*(min(ns)/10), 10*(max(ns)/10+1), 2):
print "%d|%s" % (q/10, ''.join([str(x % 10) for x in ns if x<q+2 and x>=q]))
| agpl-3.0 |
funson/rt-xen | tools/python/xen/remus/qdisc.py | 22 | 4860 | import socket, struct
import netlink
qdisc_kinds = {}
TC_H_ROOT = 0xFFFFFFFF
class QdiscException(Exception): pass
class request(object):
"qdisc request message"
def __init__(self, cmd, flags=0, dev=None, handle=0):
self.n = netlink.nlmsg()
self.t = netlink.tcmsg()
self.n.nlmsg_flags = netlink.NLM_F_REQUEST|flags
self.n.nlmsg_type = cmd
self.t.tcm_family = socket.AF_UNSPEC
if not handle:
handle = TC_H_ROOT
self.t.tcm_parent = handle
if dev:
self.t.tcm_ifindex = dev
def pack(self):
t = self.t.pack()
self.n.body = t
return self.n.pack()
class addrequest(request):
def __init__(self, dev, handle, qdisc):
flags = netlink.NLM_F_EXCL|netlink.NLM_F_CREATE
super(addrequest, self).__init__(netlink.RTM_NEWQDISC, flags=flags,
dev=dev, handle=handle)
self.n.addattr(netlink.TCA_KIND, qdisc.kind + '\0')
opts = qdisc.pack()
if opts:
self.n.addattr(netlink.TCA_OPTIONS, opts)
class delrequest(request):
def __init__(self, dev, handle):
super(delrequest, self).__init__(netlink.RTM_DELQDISC, dev=dev,
handle=handle)
class changerequest(request):
def __init__(self, dev, handle, qdisc):
super(changerequest, self).__init__(netlink.RTM_NEWQDISC,
dev=dev, handle=handle)
self.n.addattr(netlink.TCA_KIND, qdisc.kind + '\0')
opts = qdisc.pack()
if opts:
self.n.addattr(netlink.TCA_OPTIONS, opts)
class Qdisc(object):
def __new__(cls, qdict=None, *args, **opts):
if qdict:
kind = qdict.get('kind')
cls = qdisc_kinds.get(kind, cls)
obj = super(Qdisc, cls).__new__(cls)
return obj
def __init__(self, qdict):
self._qdict = qdict
self.kind = qdict['kind']
self.handle = qdict['handle'] >> 16
def parse(self, opts):
if opts:
raise QdiscException('cannot parse qdisc parameters')
def optstr(self):
if self.qdict['options']:
return '[cannot parse qdisc parameters]'
else:
return ''
def pack(self):
return ''
TC_PRIO_MAX = 15
class PrioQdisc(Qdisc):
fmt = 'i%sB' % (TC_PRIO_MAX + 1)
def __init__(self, qdict):
super(PrioQdisc, self).__init__(qdict)
if qdict.get('options'):
self.unpack(qdict['options'])
else:
self.bands = 3
self.priomap = [1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]
def pack(self):
#return struct.pack(self.fmt, self.bands, *self.priomap)
return ''
def unpack(self, opts):
args = struct.unpack(self.fmt, opts)
self.bands = args[0]
self.priomap = args[1:]
def optstr(self):
mapstr = ' '.join([str(p) for p in self.priomap])
return 'bands %d priomap %s' % (self.bands, mapstr)
qdisc_kinds['prio'] = PrioQdisc
qdisc_kinds['pfifo_fast'] = PrioQdisc
class CfifoQdisc(Qdisc):
fmt = 'II'
def __init__(self, qdict):
super(CfifoQdisc, self).__init__(qdict)
if qdict.get('options'):
self.unpack(qdict['options'])
else:
self.epoch = 0
self.vmid = 0
def pack(self):
return struct.pack(self.fmt, self.epoch, self.vmid)
def unpack(self, opts):
self.epoch, self.vmid = struct.unpack(self.fmt, opts)
def parse(self, opts):
args = list(opts)
try:
while args:
arg = args.pop(0)
if arg == 'epoch':
self.epoch = int(args.pop(0))
continue
if arg.lower() == 'vmid':
self.vmid = int(args.pop(0))
continue
except Exception, inst:
raise QdiscException(str(inst))
def optstr(self):
return 'epoch %d vmID %d' % (self.epoch, self.vmid)
qdisc_kinds['cfifo'] = CfifoQdisc
TC_PLUG_CHECKPOINT = 0
TC_PLUG_RELEASE = 1
class PlugQdisc(Qdisc):
fmt = 'I'
def __init__(self, qdict=None):
if not qdict:
qdict = {'kind': 'plug',
'handle': TC_H_ROOT}
super(PlugQdisc, self).__init__(qdict)
self.action = 0
def pack(self):
return struct.pack(self.fmt, self.action)
def parse(self, args):
if not args:
raise QdiscException('no action given')
arg = args[0]
if arg == 'checkpoint':
self.action = TC_PLUG_CHECKPOINT
elif arg == 'release':
self.action = TC_PLUG_RELEASE
else:
raise QdiscException('unknown action')
qdisc_kinds['plug'] = PlugQdisc
| gpl-2.0 |
skillness/OpenNI | Externals/PSCommon/Windows/CreateRedist/CopyToRepository.py | 7 | 3337 | import os
import sys
import re
import time
import traceback
packageFullPath = "..\..\..\..\..\PrimeSenseVersions.nsh"
def find_package_number(findStr, text):
for line in text:
temp = re.search(findStr, line)
if temp != None:
packageNumber = temp.group(1)
return packageNumber
def copy_files_to_repository(SourcePath,RepositoryPath, BuildDate, PackageVersion, Bits, ProjectName,
Major_version, Minor_version, Maintenance_version, Build_version):
fullVersion = Major_version + "." + Minor_version + "." + Maintenance_version + "." + Build_version
destPath = os.path.join(RepositoryPath, BuildDate + "__" + PackageVersion, "Win" + Bits,
ProjectName + "-" + fullVersion)
os.system("rmdir /S /q " + destPath)
os.system("mkdir " + destPath)
os.system("xcopy /E /I " + SourcePath + " " + destPath)
def copy_zip_to_repository(SourcePath,RepositoryPath, BuildDate, PackageVersion, Bits, ProjectName,
Major_version, Minor_version, Maintenance_version, Build_version):
fullVersion = Major_version + "." + Minor_version + "." + Maintenance_version + "." + Build_version
destPath = os.path.join(RepositoryPath, BuildDate + "__" + PackageVersion, "Win" + Bits,
ProjectName + "-" + fullVersion)
os.system("rmdir /S /q " + destPath)
os.system("mkdir " + destPath)
os.system("xcopy /I " + SourcePath + " " + destPath)
def open_package_file(path):
files = open(path).readlines()
packageNumber = find_package_number("!define PACKAGE_VER\s+\"(\S+)\"", files)
return packageNumber
if __name__ == "__main__":
try:
if len(sys.argv) != 10:
print (("Usage: copyToRepository.py <FinalPath> <RepositoryPath> <BuildDate> <bits> " \
+ "<ProjectName> <Major_version> <Minor_version> <Maintenance_version> <Build_version>"))
sys.exit(1)
finalPath = sys.argv[1]
repositoryPath = sys.argv[2]
buildDate = sys.argv[3]
bits = sys.argv[4]
projectName = sys.argv[5]
major_version = sys.argv[6]
minor_version = sys.argv[7]
maintenance_version = sys.argv[8]
build_version = sys.argv[9]
packageNumber = ''
if not(os.path.exists(packageFullPath)):
# Redist of OpenNI openSource
packageFullPath = "..\..\..\..\..\..\PrimeSenseVersions.nsh"
packageNumber = open_package_file(packageFullPath)
if packageNumber == '':
sys.exit(1)
copy_zip_to_repository("..\..\..\..\*.zip",repositoryPath, buildDate, packageNumber, bits,
projectName, major_version, minor_version, maintenance_version, build_version)
else:
# Redist of OpenNI
packageNumber = open_package_file(packageFullPath)
if packageNumber == '':
sys.exit(1)
copy_files_to_repository(finalPath,repositoryPath, buildDate, packageNumber, bits,
projectName, major_version, minor_version, maintenance_version, build_version)
sys.exit(0)
except SystemExit as e:
sys.exit(e)
except:
print ((traceback.print_exc()))
sys.exit(1)
| apache-2.0 |
letolab/airy | airy/utils/translation/trans_null.py | 1 | 2647 | # These are versions of the functions in django.utils.translation.trans_real
# that don't actually do anything. This is purely for performance, so that
# settings.USE_I18N = False can use this module rather than trans_real.py.
import warnings
from airy.core.conf import settings
from airy.utils.encoding import force_unicode
from airy.utils.safestring import mark_safe, SafeData
def ngettext(singular, plural, number):
if number == 1: return singular
return plural
ngettext_lazy = ngettext
def ungettext(singular, plural, number):
return force_unicode(ngettext(singular, plural, number))
def pgettext(context, message):
return ugettext(message)
def npgettext(context, singular, plural, number):
return ungettext(singular, plural, number)
activate = lambda x: None
deactivate = deactivate_all = lambda: None
get_language = lambda: settings.LANGUAGE_CODE
get_language_bidi = lambda: settings.LANGUAGE_CODE in settings.LANGUAGES_BIDI
check_for_language = lambda x: True
# date formats shouldn't be used using gettext anymore. This
# is kept for backward compatibility
TECHNICAL_ID_MAP = {
"DATE_WITH_TIME_FULL": settings.DATETIME_FORMAT,
"DATE_FORMAT": settings.DATE_FORMAT,
"DATETIME_FORMAT": settings.DATETIME_FORMAT,
"TIME_FORMAT": settings.TIME_FORMAT,
"YEAR_MONTH_FORMAT": settings.YEAR_MONTH_FORMAT,
"MONTH_DAY_FORMAT": settings.MONTH_DAY_FORMAT,
}
def gettext(message):
result = TECHNICAL_ID_MAP.get(message, message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def ugettext(message):
return force_unicode(gettext(message))
gettext_noop = gettext_lazy = _ = gettext
def to_locale(language):
p = language.find('-')
if p >= 0:
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def get_language_from_request(request):
return settings.LANGUAGE_CODE
# get_date_formats and get_partial_date_formats aren't used anymore by Django
# but are kept for backward compatibility.
def get_date_formats():
warnings.warn(
'`django.utils.translation.get_date_formats` is deprecated. '
'Please update your code to use the new i18n aware formatting.',
DeprecationWarning
)
return settings.DATE_FORMAT, settings.DATETIME_FORMAT, settings.TIME_FORMAT
def get_partial_date_formats():
warnings.warn(
'`django.utils.translation.get_partial_date_formats` is deprecated. '
'Please update your code to use the new i18n aware formatting.',
DeprecationWarning
)
return settings.YEAR_MONTH_FORMAT, settings.MONTH_DAY_FORMAT
| bsd-2-clause |
shakamunyi/tensorflow | tensorflow/contrib/keras/python/keras/layers/convolutional_recurrent.py | 2 | 24940 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convolutional-recurrent layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.keras.python.keras import activations
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras import constraints
from tensorflow.contrib.keras.python.keras import initializers
from tensorflow.contrib.keras.python.keras import regularizers
from tensorflow.contrib.keras.python.keras.engine import InputSpec
from tensorflow.contrib.keras.python.keras.layers.recurrent import Recurrent
from tensorflow.contrib.keras.python.keras.utils import conv_utils
from tensorflow.python.framework import tensor_shape
class ConvRecurrent2D(Recurrent):
"""Abstract base class for convolutional recurrent layers.
Do not use in a model -- it's not a functional layer!
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the strides of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, time, ..., channels)`
while `channels_first` corresponds to
inputs with shape `(batch, time, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
go_backwards: Boolean (default False).
If True, rocess the input sequence backwards.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
Input shape:
5D tensor with shape `(num_samples, timesteps, channels, rows, cols)`.
Output shape:
- if `return_sequences`: 5D tensor with shape
`(num_samples, timesteps, channels, rows, cols)`.
- else, 4D tensor with shape `(num_samples, channels, rows, cols)`.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an `Embedding` layer with the `mask_zero` parameter
set to `True`.
**Note:** for the time being, masking is only supported with Theano.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch.
This assumes a one-to-one mapping between
samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
a `batch_input_size=(...)` to the first layer in your model.
This is the expected shape of your inputs *including the batch
size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
return_sequences=False,
go_backwards=False,
stateful=False,
**kwargs):
super(ConvRecurrent2D, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2,
'dilation_rate')
self.return_sequences = return_sequences
self.go_backwards = go_backwards
self.stateful = stateful
self.input_spec = [InputSpec(ndim=5)]
self.state_spec = None
def _compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
rows = input_shape[3]
cols = input_shape[4]
elif self.data_format == 'channels_last':
rows = input_shape[2]
cols = input_shape[3]
rows = conv_utils.conv_output_length(
rows,
self.kernel_size[0],
padding=self.padding,
stride=self.strides[0],
dilation=self.dilation_rate[0])
cols = conv_utils.conv_output_length(
cols,
self.kernel_size[1],
padding=self.padding,
stride=self.strides[1],
dilation=self.dilation_rate[1])
if self.return_sequences:
if self.data_format == 'channels_first':
output_shape = [input_shape[0], input_shape[1],
self.filters, rows, cols]
elif self.data_format == 'channels_last':
output_shape = [input_shape[0], input_shape[1],
rows, cols, self.filters]
else:
if self.data_format == 'channels_first':
output_shape = [input_shape[0], self.filters, rows, cols]
elif self.data_format == 'channels_last':
output_shape = [input_shape[0], rows, cols, self.filters]
if self.return_state:
if self.data_format == 'channels_first':
output_shapes = [output_shape] + [(input_shape[0],
self.filters,
rows,
cols) for _ in range(2)]
elif self.data_format == 'channels_last':
output_shapes = [output_shape] + [(input_shape[0],
rows,
cols,
self.filters) for _ in range(2)]
return [tensor_shape.TensorShape(shape) for shape in output_shapes]
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'return_sequences': self.return_sequences,
'go_backwards': self.go_backwards,
'stateful': self.stateful
}
base_config = super(ConvRecurrent2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ConvLSTM2D(ConvRecurrent2D):
"""Convolutional LSTM.
It is similar to an LSTM layer, but the input transformations
and recurrent transformations are both convolutional.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the strides of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, time, ..., channels)`
while `channels_first` corresponds to
inputs with shape `(batch, time, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs..
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state..
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Use in combination with `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
go_backwards: Boolean (default False).
If True, rocess the input sequence backwards.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
Input shape:
- if data_format='channels_first'
5D tensor with shape:
`(samples,time, channels, rows, cols)`
- if data_format='channels_last'
5D tensor with shape:
`(samples,time, rows, cols, channels)`
Output shape:
- if `return_sequences`
- if data_format='channels_first'
5D tensor with shape:
`(samples, time, filters, output_row, output_col)`
- if data_format='channels_last'
5D tensor with shape:
`(samples, time, output_row, output_col, filters)`
- else
- if data_format ='channels_first'
4D tensor with shape:
`(samples, filters, output_row, output_col)`
- if data_format='channels_last'
4D tensor with shape:
`(samples, output_row, output_col, filters)`
where o_row and o_col depend on the shape of the filter and
the padding
Raises:
ValueError: in case of invalid constructor arguments.
References:
- [Convolutional LSTM Network: A Machine Learning Approach for
Precipitation Nowcasting](http://arxiv.org/abs/1506.04214v1)
The current implementation does not include the feedback loop on the
cells output
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
return_sequences=False,
go_backwards=False,
stateful=False,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(ConvLSTM2D, self).__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
return_sequences=return_sequences,
go_backwards=go_backwards,
stateful=stateful,
**kwargs)
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_spec = [InputSpec(ndim=4), InputSpec(ndim=4)]
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_shape = tuple(tensor_shape.TensorShape(input_shape).as_list())
batch_size = input_shape[0] if self.stateful else None
self.input_spec[0] = InputSpec(shape=(batch_size, None) + input_shape[2:])
if self.stateful:
self.reset_states()
else:
# initial states: 2 all-zero tensor of shape (filters)
self.states = [None, None]
if self.data_format == 'channels_first':
channel_axis = 2
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
state_shape = [None] * 4
state_shape[channel_axis] = input_dim
state_shape = tuple(state_shape)
self.state_spec = [
InputSpec(shape=state_shape),
InputSpec(shape=state_shape)
]
kernel_shape = self.kernel_size + (input_dim, self.filters * 4)
self.kernel_shape = kernel_shape
recurrent_kernel_shape = self.kernel_size + (self.filters, self.filters * 4)
self.kernel = self.add_weight(
shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=recurrent_kernel_shape,
initializer=self.recurrent_initializer,
name='recurrent_kernel',
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.filters * 4,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
if self.unit_forget_bias:
bias_value = np.zeros((self.filters * 4,))
bias_value[self.filters:self.filters * 2] = 1.
K.set_value(self.bias, bias_value)
else:
self.bias = None
self.kernel_i = self.kernel[:, :, :, :self.filters]
self.recurrent_kernel_i = self.recurrent_kernel[:, :, :, :self.filters]
self.kernel_f = self.kernel[:, :, :, self.filters:self.filters * 2]
self.recurrent_kernel_f = self.recurrent_kernel[:, :, :, self.filters:
self.filters * 2]
self.kernel_c = self.kernel[:, :, :, self.filters * 2:self.filters * 3]
self.recurrent_kernel_c = self.recurrent_kernel[:, :, :, self.filters * 2:
self.filters * 3]
self.kernel_o = self.kernel[:, :, :, self.filters * 3:]
self.recurrent_kernel_o = self.recurrent_kernel[:, :, :, self.filters * 3:]
if self.use_bias:
self.bias_i = self.bias[:self.filters]
self.bias_f = self.bias[self.filters:self.filters * 2]
self.bias_c = self.bias[self.filters * 2:self.filters * 3]
self.bias_o = self.bias[self.filters * 3:]
else:
self.bias_i = None
self.bias_f = None
self.bias_c = None
self.bias_o = None
self.built = True
def get_initial_state(self, inputs):
# (samples, timesteps, rows, cols, filters)
initial_state = K.zeros_like(inputs)
# (samples, rows, cols, filters)
initial_state = K.sum(initial_state, axis=1)
shape = list(self.kernel_shape)
shape[-1] = self.filters
initial_state = self.input_conv(
initial_state, K.zeros(tuple(shape)), padding=self.padding)
initial_states = [initial_state for _ in range(2)]
return initial_states
def reset_states(self):
if not self.stateful:
raise RuntimeError('Layer must be stateful.')
input_shape = self.input_spec[0].shape
if not input_shape[0]:
raise ValueError('If a RNN is stateful, a complete '
'input_shape must be provided '
'(including batch size). '
'Got input shape: ' + str(input_shape))
if self.return_state:
output_shape = tuple(self._compute_output_shape(input_shape)[0].as_list())
else:
output_shape = tuple(self._compute_output_shape(input_shape).as_list())
if self.return_sequences:
output_shape = (input_shape[0],) + output_shape[2:]
else:
output_shape = (input_shape[0],) + output_shape[1:]
if hasattr(self, 'states'):
K.set_value(self.states[0],
np.zeros(output_shape))
K.set_value(self.states[1],
np.zeros(output_shape))
else:
self.states = [
K.zeros(output_shape),
K.zeros(output_shape)
]
def get_constants(self, inputs, training=None):
constants = []
if self.implementation == 0 and 0 < self.dropout < 1:
ones = K.zeros_like(inputs)
ones = K.sum(ones, axis=1)
ones += 1
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = [
K.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(4)
]
constants.append(dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.recurrent_dropout < 1:
shape = list(self.kernel_shape)
shape[-1] = self.filters
ones = K.zeros_like(inputs)
ones = K.sum(ones, axis=1)
ones = self.input_conv(ones, K.zeros(shape), padding=self.padding)
ones += 1.
def dropped_inputs(): # pylint: disable=function-redefined
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [
K.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(4)
]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def input_conv(self, x, w, b=None, padding='valid'):
conv_out = K.conv2d(
x,
w,
strides=self.strides,
padding=padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if b is not None:
conv_out = K.bias_add(conv_out, b, data_format=self.data_format)
return conv_out
def reccurent_conv(self, x, w):
conv_out = K.conv2d(
x, w, strides=(1, 1), padding='same', data_format=self.data_format)
return conv_out
def step(self, inputs, states):
assert len(states) == 4
h_tm1 = states[0]
c_tm1 = states[1]
dp_mask = states[2]
rec_dp_mask = states[3]
x_i = self.input_conv(
inputs * dp_mask[0], self.kernel_i, self.bias_i, padding=self.padding)
x_f = self.input_conv(
inputs * dp_mask[1], self.kernel_f, self.bias_f, padding=self.padding)
x_c = self.input_conv(
inputs * dp_mask[2], self.kernel_c, self.bias_c, padding=self.padding)
x_o = self.input_conv(
inputs * dp_mask[3], self.kernel_o, self.bias_o, padding=self.padding)
h_i = self.reccurent_conv(h_tm1 * rec_dp_mask[0], self.recurrent_kernel_i)
h_f = self.reccurent_conv(h_tm1 * rec_dp_mask[1], self.recurrent_kernel_f)
h_c = self.reccurent_conv(h_tm1 * rec_dp_mask[2], self.recurrent_kernel_c)
h_o = self.reccurent_conv(h_tm1 * rec_dp_mask[3], self.recurrent_kernel_o)
i = self.recurrent_activation(x_i + h_i)
f = self.recurrent_activation(x_f + h_f)
c = f * c_tm1 + i * self.activation(x_c + h_c)
o = self.recurrent_activation(x_o + h_o)
h = o * self.activation(c)
return h, [h, c]
def get_config(self):
config = {
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'unit_forget_bias':
self.unit_forget_bias,
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout
}
base_config = super(ConvLSTM2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| apache-2.0 |
wrongtest/nnlight | src/computation_on_java_impl/layers/pooling.py | 1 | 1033 | from layer.basic.pooling import MaxPoolingWithTimeLayer as MaxPoolingWithTimeLayerBase
class MaxPoolingWithTimeLayer(MaxPoolingWithTimeLayerBase):
def get_computation_on_java_code(self, code, binder):
datatype = binder.get_base_type(self.input)
input_var = binder.get_name(self.input)
output_var = binder.get_name(self.output)
code.field("int", "samples", val=input_var + ".length")
code.field("int", "length", val=input_var + "[0].length")
code.field("int", "features", val=input_var + "[0][0].length")
code.begin_for("int i=0; i<samples; i++")
code.begin_for("int j=0; j<features; j++")
code.field(datatype, "maximum", val=input_var + "[i][0][j]")
code.begin_for("int k=1; k<length; k++")
code.begin_if("maximum < %s[i][k][j]" % input_var)
code.assignment("maximum", "%s[i][k][j]" % input_var)
code.end()
code.end()
code.assignment(output_var + "[i][j]", "maximum")
code.end()
code.end()
| gpl-2.0 |
bitpay/bitcoin | qa/rpc-tests/getchaintips.py | 66 | 2133 | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the getchaintips API. We introduce a network split, work
# on chains of different lengths, and join the network together again.
# This gives us two tips, verify that it works.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class GetChainTipsTest (BitcoinTestFramework):
def run_test (self):
BitcoinTestFramework.run_test (self)
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 1)
assert_equal (tips[0]['branchlen'], 0)
assert_equal (tips[0]['height'], 200)
assert_equal (tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network ()
self.nodes[0].generate(10)
self.nodes[2].generate(20)
self.sync_all ()
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210)
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
| mit |
denys-duchier/Scolar | config/softs/jaxml-3.01/jaxml.py | 2 | 49250 | # Module for XML, HTML and CGI output
# jaxml
# (C) Jerome Alet <[email protected]> 2000-2002
# You're welcome to redistribute this software under the
# terms of the GNU General Public Licence version 2.0
# or, at your option, any higher version.
#
# You can read the complete GNU GPL in the file COPYING
# which should come along with this software, or visit
# the Free Software Foundation's WEB site http://www.fsf.org
#
# $Id: jaxml.py,v 1.43 2003/06/26 06:59:32 jerome Exp $
#
# $Log: jaxml.py,v $
# Revision 1.43 2003/06/26 06:59:32 jerome
# Small fix.
#
# Revision 1.42 2003/02/13 14:36:09 jerome
# Version 3.0
#
# Revision 1.41 2003/02/13 10:33:58 jerome
# Version number changed to 3.0beta
# Named _push() and _pop() possibility (untested)
# Complete namespaces support thanks to Jean Jordaan
#
# Revision 1.40 2002/04/25 09:08:34 jerome
# New copyright strings
#
# Revision 1.39 2002/03/02 09:19:36 jerome
# typo in _do_nothing() in CGI scripts
#
# Revision 1.38 2001/04/23 12:17:08 jerome
# Nothing is output when there's no content to output.
#
# Revision 1.37 2001/02/23 15:02:49 jerome
# Correction of a minor bug which prevented headers to be kept correct
# when adding or multiplying documents
#
# Revision 1.36 2001/02/22 08:27:07 jerome
# The copy module is not needed anymore.
#
# Revision 1.35 2001/02/21 16:26:15 jerome
# Version number changed to 2.21
# The _updatemapping() method now returns the new mapping's content.
#
# Revision 1.34 2001/02/21 11:54:56 jerome
# Typo
#
# Revision 1.33 2001/02/21 11:40:47 jerome
# - version number changed to 2.20
# - basic arithmetic operations can now be made on XML_document
# instances, these constructs are now accepted:
#
# firstdoc + seconddoc
# seconddoc + firstdoc
#
# Where firstdoc is an instance of XML_document
# or one of its subclasses, and seconddoc is
# either an instance of XML_document or one of
# its subclasses or a string of text.
#
# yourdoc * intvalue
# intvalue * yourdoc
#
# Will repeat your document just like the * operator
# works with strings of text.
#
# - an infinite loop problem occured when doing a dir(yourdoc),
# it is now corrected, but as a consequences every method
# name beginning with "__" can't be used as a tag name.
# This shouldn't cause any problem, because tag names
# beginning with "__" are probably a very bad idea, if allowed
# at all.
# - an _updatemapping method was added to allow you to initialise
# or update the internal mapping used for the new templating
# facility.
#
# Revision 1.32 2001/02/19 13:42:10 jerome
# Suppressed a remaining debugging test
#
# Revision 1.31 2001/02/19 13:38:38 jerome
# Version changed to 2.10
# Added a new templating method, using documents as pseudo mappings:
# mydoc["some text"] = "another text"
# will replace all occurences of "some text" with "another text" on
# rendition (only), i.e. when either str() or repr() are called.
# Truth value can now be tested: empty documents return false.
#
# Revision 1.30 2001/02/14 10:49:20 jerome
# Typo
#
# Revision 1.29 2001/02/14 10:48:44 jerome
# Version number changed to 2.10
# Docstrings added to the _TAGGED_document.Tag class
# __repr__ is defined once for all
#
# Revision 1.28 2001/02/06 09:50:30 jerome
# Added documentation for the _template() method
# Added some doc for the HTML_document() and CGI_document() classes
#
# Revision 1.27 2001/02/05 16:03:59 jerome
# The CGI_document() constructor now accepts version and encoding arguments
#
# Revision 1.26 2001/02/05 14:49:55 jerome
# Exit code when using the old Html_document class was set to -1 (unsuccessful) instead of 0 (successful)
#
# Revision 1.25 2001/02/05 14:43:10 jerome
# Version number changed to 2.00beta1
#
# Revision 1.24 2001/02/05 14:31:07 jerome
# Version number changed to 2.00
# jaxml now includes what was in the old jahtml module, and features two new
# classes: HTML_document() and CGI_document().
# jaxml's API hasn't changed.
# jahtml's old API was changed to better match jaxml's one.
#
# ========================================================================
# = You don't need the old jahtml module anymore, but before removing it =
# = you must modify your programs to take care of the new API. =
# ========================================================================
#
# Revision 1.23 2001/01/26 12:43:16 jerome
# Rollback on "speed optimisations"
#
# Revision 1.22 2001/01/26 11:01:44 jerome
# The reduce line is commented out because it is much more slower then string.join + map
#
# Revision 1.21 2001/01/26 10:44:07 jerome
# Another speed optimisation
#
# Revision 1.20 2001/01/26 10:08:29 jerome
# Large scale speed optimisations
#
# Revision 1.19 2001/01/25 15:09:34 jerome
# Another optimisation
#
# Revision 1.18 2001/01/25 15:01:57 jerome
# Small speed optimisation in the _pop() method
#
# Revision 1.17 2001/01/25 13:28:48 jerome
# Version number changed to 1.26
# The notation for naming spaces was introduced:
#
# doc.space.tag(...)
#
# will produce:
#
# <space:tag>
# ...
# </space:tag>
#
# Revision 1.16 2001/01/25 12:22:03 jerome
# A new useful notation was introduced, you can now
# do something like:
#
# doc.onetag("...", attr="yes").othertag().adnauseam("easy tag nesting")
#
# Revision 1.15 2001/01/25 11:25:50 jerome
# Version number changed to 1.24
# Tags which enclose nothing are now handled correctly
# Calls to yourtag("Some text", dummy="DUMMY") will
# now produce:
#
# <yourtag dummy="DUMMY">Some text</yourtag>
#
# instead of :
#
# <yourtag dummy="DUMMY">
# Some text
# </yourtag>
#
# Some changes to the test program to reflect the new behaviour
#
# Revision 1.14 2001/01/23 10:30:24 jerome
# The _output() method now accepts None as its file argument
# Minor changes to the documentation
# Copyright year changed to 2000-2001
#
# Revision 1.13 2000/10/04 11:50:30 jerome
# The license is correctly set to "GNU GPL" in setup.py
# Version number change to 1.23
#
# Revision 1.12 2000/09/29 13:49:36 jerome
# The documentation referenced a non existing file.
#
# Revision 1.11 2000/09/29 13:25:37 jerome
# Small but correction with empty text, use None instead
#
# Revision 1.10 2000/09/29 11:14:18 jerome
# The traceback module is not needed anymore
#
# Revision 1.9 2000/09/29 11:02:26 jerome
# With the help of Kragen Sitaker idea posted on comp.lang.python,
# the speed increase factor is now almost 2.5 compared to the 1.1 version.
# Test made on the test.py program launched 5000 times.
#
# Revision 1.8 2000/09/29 08:55:04 jerome
# Near 13% speed optimisation on the test program launched 5000 times.
#
# Revision 1.7 2000/09/29 08:43:30 jerome
# Optimisations
#
# Revision 1.6 2000/09/29 07:42:52 jerome
# Version number changed to 1.2
#
# Revision 1.5 2000/09/28 10:06:09 jerome
# The frenglish word "imbricated" was replaced by the correct english one "nested",
# thanks to Kragen Sitaker.
# Version number changed to 1.1 because seems stable and want more testers: the
# Freshmeat Version Number Effect ;-)
#
# Revision 1.4 2000/09/15 08:30:41 jerome
# Version string and Documentation string added.
#
# Revision 1.3 2000/09/15 08:27:10 jerome
# Clarification on the licensing issue.
# General documentation changes.
# No code changes but version set to 0.3
#
# Revision 1.2 2000/09/14 07:15:29 jerome
# All tag attributes values are now quoted correctly.
# Using attributes with no value at all is not allowed anymore.
# Now xmllib doesn't complain anymore on sampleXML.py output.
#
#
import sys
import os
import string
import cStringIO
import time
__version__ = "3.01"
__doc__ = """
This python module defines a class named XML_document which will
allow you to generate XML documents (yeah !) more easily than
using print or similar functions.
Here's a list of available methods:
===================================
__init__(version, encoding)
The instance constructor, automatically called
when you create a new instance of XML_document.
you can optionnally pass a version and encoding
string, the defaults are "1.0" and "iso-8859-1".
_indentstring(istr)
istr is the new indentation string used
to nicely present your XML documents. By
default istr is equal to 4 space characters.
_output(filearg)
use it to save the XML document to a file.
The optionnal filearg argument may be:
None, "", or "-" which stands for sys.stdout.
a file name.
any file object.
_text(sometext)
use it to insert plain text at the current position
in the document.
_push()
saves the current position in the XML document.
use it if you're going to create a bunch of nested
XML tags and want to escape from them later to continue
your document at the same indentation level.
you can pass an optional 'name' argument, to mark
a position by its name.
_pop()
restores the latest saved position.
use it to escape from nested tags and continue
your XML document at the same indentation level than
the latest time you called _push().
you can pass an optional 'name' argument, to continue
at the same indentation level as when you called _push()
with the same 'name' argument.
_template(file, **vars)
loads a template file and insert it as plain text at the current
position in the document, replacing ##varname## variables
in the template file with their corresponding value passed
in vars[varname]
_updatemapping(newmap)
updates the internal mapping used for replacing some strings with
others when rendering. This can be used as an easy way to
do templating without the need of an external file.
Pass None or no argument to reset the mapping to an empty one.
This method returns the new mapping's content.
Some more methods are available but not meant to be used directly, they
are: __nonzero__, __getitem__, __setitem__, __delitem__, __coerce__, __add__,
__radd__, __mul__, __rmul__, and __copy__. They are used automatically when doing
special things, read the source for details.
ANY and ALL other method you may call will be treated as an XML
tag, unless it already exists as a method in XML_document or a subclass of it,
or its name begins with "__". I suggest you to only add methods whose names
begin with '_' to keep things simple and clear: "__" is reserved for future
use.
The file test/test.py is an example program which generates
some documents, just play with it (use and modify) and you'll
learn quickly how to use jaxml. Its source code is documented and
attempts at describing and trying all jaxml's possibilities, so reading
it is probably the best way to become powerful with jaxml in less than
10 minutes.
Really, PLEASE READ the file test/test.py to learn all possibilities.
=========================================================================
Since version 2.00, jaxml integrates the full functionnalities of the
old jahtml module via the HTML_document and CGI_document classes, however
the API for these two classes has changed to be cleaner and don't use any
predefined set of tags.
The HTML_document() and CGI_document() classes both inherit from XML_document()
and all its methods (see above), but also feature some useful helper methods.
Please read the jaxml module sources and the test/test.py program to learn how
to use them.
=========================================================================
The only difficult things are:
------------------------------
* you have to use the _push() and _pop() methods if you need
to get out of a bunch of nested tags.
* if you call a method (tag) with a string as the first
unnamed parameter, you'll don't need _push() or _pop()
because your tag will be automatically closed immediately.
* if you call a method (tag) with a python mapping as the
first or second unamed parameter, this mapping is used
to correctly handle XML namespaces or attributes
which are python reserved words (e.g. class), please
look at test/test.py to see an example.
"""
class _TAGGED_document :
"""This class defines a tagged document"""
class Tag :
"""This class defines a tag
This is largely inspired from a post in comp.lang.python
by Kragen Sitaker at the end of September 2000. Many
thanks to him !!!
"""
def __init__(self, parent, tagname) :
"""Save a link to the parent and the name of the tag for future reference
parent
The parent object, probably a _TAGGED_document instance.
tagname
The name of this tag
"""
self.__parent = parent
self.__tagname = tagname
def __call__(self, _text_ = None, *nsattributes, **attributes) :
"""Inserts the tag and its attributes in the document
_text_
eventually a string to be enclosed in the tag. the
name _text_ was chosen to not conflict with a probable user's attribute
called 'text'
"""
#
# NameSpace idea from Jean Jordaan
if type(_text_) == type({}) :
nsattributes = (_text_, )
_text_ = None
nsargs = ""
lg = len(nsattributes)
if (lg > 1) :
raise ValueError, "jaxml: Invalid attributes %s" % str(nsattributes[0])
elif lg :
nsattr = nsattributes[0]
try :
for ns in nsattr.keys() :
tags = nsattr[ns]
try :
for tag in tags.keys() :
nsargs = nsargs + ' %s%s%s="%s"' % (ns, (ns and ':'), tag, str(tags[tag]))
except AttributeError :
nsargs = nsargs + ' %s="%s"' % (ns, str(tags))
except AttributeError :
raise ValueError, "jaxml: Invalid attributes %s" % str(nsattr)
# first, we compute the attributes string
# we vonluntarily do the test because of the speed optimisation
# it gives when there's no attribute
if attributes :
# interestingly the "reduce" line is much more slower than the "string.join + map" one
# arg = reduce(lambda s,x,a=attributes: '%s %s="%s"' % (s, x, str(a[x])), attributes.keys(), "")
arg = string.join(map(lambda x,a=attributes: ' %s="%s"' % (x, str(a[x])), attributes.keys()), "")
else :
arg = ""
# if a "first" argument was passed, enclose it in the tag
# and just get out of this tag
if _text_ is not None :
self.__parent._text("<%s%s>%s</%s>" % (self.__tagname, arg + nsargs, str(_text_), self.__tagname))
else :
# future tags will be inserted inside this one
self.__parent._tag__(self.__tagname, arg + nsargs)
return self.__parent
def __getattr__(self, name) :
"""Handles naming spaces (Space:Tag)
name
The name of the (sub)tag part
The current tag's name becomes the naming space's name.
name becomes the new tag's name.
"""
return self.__parent.Tag(self.__parent, "%s:%s" % (self.__tagname, name))
def __init__(self) :
"""Initialize local datas"""
# the document itself
self.__page = []
self.__pushed = []
self.__pusheddict = {}
self.__position = 0
# Initialise a mapping to implement another templating
# facility for postprocessing
self._updatemapping()
# sets the default indentation string
self._indentstring()
def __copy__(self) :
"""Creates a copy of the current document"""
# create an instance of the same class
new = self.__class__()
# copy the "private" members
new.__page = self.__page[:]
new.__pushed = self.__pushed[:]
new.__pusheddict = self.__pusheddict.copy()
new.__position = self.__position
new.__indentstring = self.__indentstring
new.__mapping = self.__mapping.copy()
# copy the "public" ones which are not callable (shouldn't occur anyway)
for (key, value) in self.__dict__.items() :
if (key[:2] == "__") and (key[-2:] == "__") and not callable(getattr(self, key)) :
setattr(new, key, value)
return new
def __mul__(self, number) :
"""Allows a document to be repeated
number
The number of times to repeat the document
allows constructs like: mydoc * 3
"""
if type(number) != type(1) :
raise TypeError, "jaxml.py: __mul__ operation not permitted on these operands."
if number < 0 :
raise ValueError, "jaxml.py: can't repeat a document a negative number of times."
if number == 0 :
# returns an empty document
return self.__class__()
else :
# a multiplication is just a big addition...
new = self.__copy__()
for i in range(number - 1) :
new = new + self
return new
def __rmul__(self, number) :
"""Allows a document to be repeated
number
The number of times to repeat the document
allows construts like: 3 * mydoc
"""
return self * number
def __add__(self, other) :
"""Allows two documents to be concatenated
other
The document or string of text to concatenate to self
This is not a real concatenation: the second
document (other) is in fact inserted at the current
position in the first one (self).
Also allows constructs like: mydoc + "some text"
"""
if (not isinstance(other, _TAGGED_document)) and (type(other) != type("")) :
raise TypeError, "jaxml.py: __add__ operation not permitted on these operands."
# first we make a copy of the original
new = self.__copy__()
# we must also "concatenate" our two template mappings
new.__mapping.update(other.__mapping)
# then we insert other as a single string of text
# skipping the last new line character.
# we use the parent class __str__ method to skip
# all the leading garbage like XML or HTTP headers.
# we should insert it as tags + text instead of plain text...
new._text(_TAGGED_document.__str__(other)[:-1])
return new
def __radd__(self, other) :
"""Allows two documents to be concatenated
other
The document or string of text to which self will be concatenated
This is not a real concatenation: the first
document (self) is in fact inserted at the current
position in the second one (other).
Also allows constructs like: "some text" + mydoc
"""
return other + self
def __coerce__(self, other) :
"""Try to convert two documents to a common type"""
if isinstance(other, _TAGGED_document) :
# no problem, compatible types
return (self, other)
elif type(other) == type("") :
# a string of text must be converted
# to self's type
new = self.__class__()
new._text(other)
return (self, new)
elif type(other) == type(1) :
# probably a __mul__ operation
return (self, other)
else :
# conversion is impossible
return None
def __getattr__(self, name) :
"""Here's the magic: we create tags on demand
name
The name of the tag we want to create
"""
# don't accept __xxxxx names
# we reserve them for internal or/and future use
if (name[:2] != "__") :
return self.Tag(self, name)
def __nonzero__(self) :
"""For truth value testing, returns 1 when the document is not empty"""
if self.__page :
return 1
else :
return 0
def __getitem__(self, key) :
"""returns key's value in the internal mapping"""
return self.__mapping[key]
def __setitem__(self, key, value) :
"""sets key's value in the internal mapping"""
self.__mapping[key] = value
def __delitem__(self, key) :
"""deletes this key from the internal mapping"""
del self.__mapping[key]
def __str__(self) :
"""returns the document as a string of text"""
outstr = cStringIO.StringIO()
indentation = ""
lgindent = len(self.__indentstring)
lastopened = None
for (text, arg, offset) in self.__page :
if offset == -1 : # closing tag
indentation = indentation[: -lgindent]
if text != lastopened : # normal case
outstr.write("%s</%s>\n" % (indentation, text))
else : # noting enclosed
outstr.seek(-2, 1)
outstr.write(" />\n")
lastopened = None
elif offset == 1 : # opening tag
outstr.write("%s<%s%s>\n" % (indentation, text, arg))
indentation = indentation + self.__indentstring
lastopened = text
else : # plain text
outstr.write("%s%s\n" % (indentation, text))
lastopened = None
outstr.flush()
retval = outstr.getvalue()
outstr.close()
# and now we use the internal mapping
# to postprocess the document.
# This may prove to be useful for replacing chars with their
# equivalent SGML entities for example, or for templating
# without a template file.
for (key, value) in self.__mapping.items() :
retval = string.replace(retval, key, value)
return retval
def __repr__(self) :
"""Returns a printable representation of the document, same as str() for now"""
# we define it with a 'def' instead of doing __repr__ = __str__ like the previous versions did
# because we may redefine __str__ in subclasses and don't want to
# have to redefine __repr__ too.
#
# This way it is done once for all:
return str(self)
def __adjust_stack(self, offset) :
"""Adjust the stack of pushed positions.
offset
offset by which adjust the stack
"""
if self.__pushed :
pos, oldoffset = self.__pushed.pop()
self.__pushed.append((pos, oldoffset + offset))
def _tag__(self, tag, arg) :
self.__page.insert(self.__position, (tag, arg, 1))
self.__position = self.__position + 1
self.__page.insert(self.__position, (tag, None, -1))
self.__adjust_stack(2)
#
# Callable interface starts here
def _push(self, name=None) :
"""Push the current tag's position.
useful before a block of nested tags
name : can be used to name the pushed position and pop it later directly
"""
if name :
self.__pusheddict[name] = len(self.__pushed)
self.__pushed.append((self.__position, 0))
def _pop(self, name=None) :
"""Restore the latest pushed position.
useful to get out of a block of nested tags
name : can be used to restore a named position, not necessarily the latest.
"""
if self.__pushed :
maxindex = len(self.__pushed) - 1
if name :
try :
index = self.__pusheddict[name]
del self.__pusheddict[name]
except KeyError :
raise KeyError, "jaxml named position %s doesn't exist" % name
else :
index = maxindex
while maxindex >= index :
pos, offset = self.__pushed.pop()
self.__position = pos + offset
self.__adjust_stack(offset) # we report the offset on previously saved tags
maxindex = maxindex - 1
def _text(self, text):
"""Insert plain text in the document
text
text to be inserted
"""
self.__page.insert(self.__position, (str(text), None, 0))
self.__position = self.__position + 1
self.__adjust_stack(1)
def _indentstring(self, newindentstring = " "):
"""Sets the indentation string for the output (default is 4 space characters)"""
self.__indentstring = newindentstring
def _updatemapping(self, newmap = None) :
"""Updates the internal mapping for the new templating facility,
and returns the new mapping's content
newmap
a Python mapping object to initialise or extend the
mapping. If None then the mapping is reset to an empty dictionnary
which is the default value.
"""
if newmap == None :
# clears the template mapping
self.__mapping = {}
return self.__mapping
elif type(newmap) == type({}) :
# update or extend the current mapping
self.__mapping.update(newmap)
return self.__mapping
else :
raise TypeError, "jaxml.py: _updatemapping's parameter must be a Python mapping object."
def _output(self, file = "-") :
"""Ouput the page, with indentation.
file
the optional file object or filename to output to
("-" or None or "" means sys.stdout)
"""
isopen = 0
if (type(file) == type("")) or (file is None) :
if file and (file != "-") :
outf = open(file, "w")
isopen = 1
else :
outf = sys.stdout
else :
outf = file # we assume it's a file object
outf.write("%s" % str(self))
outf.flush()
if isopen :
outf.close()
class XML_document(_TAGGED_document) :
"""This class defines an XML document"""
def __init__(self, version = "1.0", encoding = "iso-8859-1") :
"""Initialize local datas.
arguments:
version: xml version string
encoding: xml encoding language
"""
_TAGGED_document.__init__(self)
self.__version__ = version
self.__encoding__ = encoding
def __str__(self) :
"""returns the XML document as a string of text"""
tagdocstr = _TAGGED_document.__str__(self)
if tagdocstr :
return ("""<?xml version="%s" encoding="%s"?>\n""" % (self.__version__, self.__encoding__)) + tagdocstr
else :
return ""
def __subst_lines(self, lines, **vars):
"""Substitues var names with their values.
parts of this function come from the Whiz package
THANKS TO Neale Pickett ! Here follows the original license terms for Whiz:
## Author: Neale Pickett <[email protected]>
## Time-stamp: <99/02/11 10:45:42 neale>
## This software and ancillary information (herein called "SOFTWARE")
## called html.py made avaiable under the terms described here. The
## SOFTWARE has been approved for release with associated LA-CC Number
## 89-47.
## Unless otherwise indicated, this SOFTWARE has been authored by an
## employee or employees of the University of California, operator of
## the Los Alamos National Laboratory under contract No. W-7405-ENG-36
## with the U.S. Department of Energy. The U.S. Government has rights
## to use, reproduce, and distribute this SOFTWARE. The public may
## copy, distribute, prepare derivative works and publicly display this
## SOFTWARE without charge, provided that this Notice and any statement
## of authorship are reproduced on all copies. Neither the Government
## nor the University makes any warranty, express or implied, or assumes
## any liability or responsibility for the use of this SOFTWARE.
## If SOFTWARE is modified to produce derivative works, such modified
## SOFTWARE should be clearly marked, so as not to confuse it with the
## version available from LANL.
"""
import regex
container = regex.compile('\(<!-- \)?##\([-_A-Za-z0-9]+\)##\( -->\)?')
for line in lines:
while container.search(line) != -1:
try:
replacement = str(vars[container.group(2)])
except KeyError:
replacement = str('<!-- Unmatched variable: ' + container.group(2) + ' -->')
pre = line[:container.regs[0][0]]
post = line[container.regs[0][1]:]
if string.strip(pre) == '':
# pre is just whitespace, so pad our replacement's lines with that space
lines = string.split(replacement, '\n')
new = [lines[0]]
for l in lines[1:]:
new.append(pre + l)
replacement = string.join(new, '\n')
line = "%s%s%s" % (pre, replacement, post)
self._text(line)
def _template(self, file = "-", **vars) :
"""Include an external file in the current doc
and replaces ##vars## with their values.
Parts of this function come from the Whiz package
THANKS TO Neale Pickett ! Here follows the original license terms for Whiz:
## Author: Neale Pickett <[email protected]>
## Time-stamp: <99/02/11 10:45:42 neale>
## This software and ancillary information (herein called "SOFTWARE")
## called html.py made avaiable under the terms described here. The
## SOFTWARE has been approved for release with associated LA-CC Number
## 89-47.
## Unless otherwise indicated, this SOFTWARE has been authored by an
## employee or employees of the University of California, operator of
## the Los Alamos National Laboratory under contract No. W-7405-ENG-36
## with the U.S. Department of Energy. The U.S. Government has rights
## to use, reproduce, and distribute this SOFTWARE. The public may
## copy, distribute, prepare derivative works and publicly display this
## SOFTWARE without charge, provided that this Notice and any statement
## of authorship are reproduced on all copies. Neither the Government
## nor the University makes any warranty, express or implied, or assumes
## any liability or responsibility for the use of this SOFTWARE.
## If SOFTWARE is modified to produce derivative works, such modified
## SOFTWARE should be clearly marked, so as not to confuse it with the
## version available from LANL.
"""
if (file is None) or (type(file) == type("")) :
if file and (file != "-") :
inf = open(file, "r")
else :
inf = sys.stdin
else :
inf = file
lines = map(lambda l: l[:-1], inf.readlines())
if inf != sys.stdin :
inf.close()
apply(self.__subst_lines, (lines,), vars)
class HTML_document(XML_document) :
"""This class defines a useful method to output a default header,
as well as some methods defined for easying the use of this module and
keep porting from the old jahtml module easy too.
"""
def _default_header(self, title = "JAXML Default HTML Document", **modifiers) :
"""Begins a normal document.
title
the title of the document
modifiers
usual meta name= content= tags (keywords, description, etc...)
WARNING: doesn't work with other meta tags
"""
self.html()
self._push()
self.head()
self.title(title)
for mod in modifiers.keys() :
if modifiers[mod] != None :
self._push()
self.meta(name = string.upper(mod), content = modifiers[mod])
self._pop()
self._pop()
#
# Here we define some methods for easy porting from the old jahtml module
#
def __fake_input(self, _text_ = None, **args) :
self._push()
retcode = apply(self.input, (None, ), args)
self._pop()
return retcode
def _submit(self, **args) :
"""Submit button input type, beware of the leading underscore"""
args["type"] = "submit"
return apply(self.__fake_input, (None, ), args)
def _reset(self, **args) :
"""Reset button input type, beware of the leading underscore"""
args["type"] = "reset"
return apply(self.__fake_input, (None, ), args)
def _radio(self, **args) :
"""Radio button input type, beware of the leading underscore"""
args["type"] = "radio"
return apply(self.__fake_input, (None, ), args)
def _checkbox(self, **args) :
"""Checkbox input type, beware of the leading underscore"""
args["type"] = "checkbox"
return apply(self.__fake_input, (None, ), args)
def _password(self, **args) :
"""Password input type, beware of the leading underscore"""
args["type"] = "password"
return apply(self.__fake_input, (None, ), args)
def _hidden(self, **args) :
"""Hidden input type, beware of the leading underscore"""
args["type"] = "hidden"
return apply(self.__fake_input, (None, ), args)
def _textinput(self, **args) :
"""Text input type, beware of the leading underscore and the trailing 'input'"""
args["type"] = "text"
return apply(self.__fake_input, (None, ), args)
def _button(self, **args) :
"""Button input type, beware of the leading underscore"""
args["type"] = "button"
return apply(self.__fake_input, (None, ), args)
def _file(self, **args) :
"""File input type, beware of the leading underscore"""
args["type"] = "file"
return apply(self.__fake_input, (None, ), args)
def _image(self, **args) :
"""Image input type, beware of the leading underscore"""
args["type"] = "image"
return apply(self.__fake_input, (None, ), args)
def _meta(self, **args) :
"""The META tag, beware of the leading underscore"""
self._push()
retcode = apply(self.meta, (None, ), args)
self._pop()
return retcode
def _br(self, **args) :
"""The BR tag, beware of the leading underscore"""
self._push()
retcode = apply(self.br, (None, ), args)
self._pop()
return retcode
def _hr(self, **args) :
"""The HR tag, beware of the leading underscore"""
self._push()
retcode = apply(self.hr, (None, ), args)
self._pop()
return retcode
class CGI_document(HTML_document) :
"""
This class defines a CGI document.
it inherits from the HTML_document class, but more methods are present
"""
__possibleargs = {"version": "1.0", "encoding": "iso-8859-1", "content_type": "text/html", "content_disposition": "", "expires": "", "pragma": "", "redirect": "", "status": "", "statmes": "", "debug": None}
def __init__(self, **args) :
"""
Initialise local datas.
"""
HTML_document.__init__(self)
for key in self.__possibleargs.keys() :
if args.has_key(key) :
value = args[key]
else :
value = self.__possibleargs[key]
setattr(self, "__" + key + "__", value)
def __str__(self) :
"""Returns the CGI output as a string."""
if self.__redirect__ :
return "Location: %s\n\n" % self.__redirect__
else :
val = "Content-type: %s\n" % self.__content_type__
if self.__status__ :
val = val + "Status: %s %s\n" % (self.__status__, self.__statmes__)
if self.__pragma__ :
val = val + "Pragma: %s\n" % self.__pragma__
if self.__expires__ :
val = val + "Expires: %s\n" % self.__expires__
if self.__content_disposition__ :
val = val + "Content-Disposition: %s\n" % self.__content_disposition__
return val + "\n" + HTML_document.__str__(self)
def _set_debug(self, file) :
"""Sets the flag to send the output to a file too."""
self.__debug__ = file
def _set_pragma(self, pragma) :
"""Defines the pragma value.
pragma
The pragma's value
"""
self.__pragma__ = pragma
def _set_expires(self, expires) :
"""Defines the expiration date of the CGI output.
expires
The expiration date
"""
self.__expires__ = expires
def _set_redirect(self, url) :
"""Defines the redirection url.
url
The redirection url to send
"""
self.__redirect__ = url
def _set_content_type(self, content_type = "text/html") :
"""Defines the content type of the CGI output.
content_type
The new content type, default is text/html
"""
self.__content_type__ = content_type
def _set_content_disposition(self, content_disposition = "") :
"""Defines the content disposition of the CGI output.
content_disposition
The new disposition, default is ""
"""
self.__content_disposition__ = content_disposition
def _set_status(self, status, message="") :
"""Defines the status to return.
statsus
The status value
message
The message following the status value
"""
self.__status__ = status
self.__statmes__ = message
def _do_nothing(self, message = "No response") :
"""Set status to 204 (do nothing)."""
self._set_status("204", message)
def _envvar(self, varname) :
"""Returns the variable value or None."""
if os.environ.has_key(varname) :
return os.environ[varname]
def _server_software(self) :
"""Returns the SERVER_SOFTWARE environment variable value."""
return self._envvar('SERVER_SOFTWARE')
def _server_name(self) :
"""Returns the SERVER_NAME environment variable value."""
return self._envvar('SERVER_NAME')
def _gateway_interface(self) :
"""Returns the GATEWAY_INTERFACE environment variable value."""
return self._envvar('GATEWAY_INTERFACE')
def _server_protocol(self) :
"""Returns the SERVER_PROTOCOL environment variable value."""
return self._envvar('SERVER_PROTOCOL')
def _server_port(self) :
"""Returns the SERVER_PORT environment variable value."""
return self._envvar('SERVER_PORT')
def _request_method(self) :
"""Returns the REQUEST_METHOD environment variable value."""
return self._envvar('REQUEST_METHOD')
def _path_info(self) :
"""Returns the PATH_INFO environment variable value."""
return self._envvar('PATH_INFO')
def _path_translated(self) :
"""Returns the PATH_TRANSLATED environment variable value."""
return self._envvar('PATH_TRANSLATED')
def _document_root(self) :
"""Returns the DOCUMENT_ROOT environment variable value."""
return self._envvar('DOCUMENT_ROOT')
def _script_name(self) :
"""Returns the SCRIPT_NAME environment variable value."""
return self._envvar('SCRIPT_NAME')
def _query_string(self) :
"""Returns the QUERY_STRING environment variable value."""
return self._envvar('QUERY_STRING')
def _remote_host(self) :
"""Returns the REMOTE_HOST environment variable value."""
return self._envvar('REMOTE_HOST')
def _remote_addr(self) :
"""Returns the REMOTE_ADDR environment variable value."""
return self._envvar('REMOTE_ADDR')
def _auth_type(self) :
"""Returns the AUTH_TYPE environment variable value."""
return self._envvar('AUTH_TYPE')
def _remote_user(self) :
"""Returns the REMOTE_USER environment variable value."""
return self._envvar('REMOTE_USER')
def _remote_ident(self) :
"""Returns the REMOTE_IDENT environment variable value."""
return self._envvar('REMOTE_IDENT')
def _content_type(self) :
"""Returns the CONTENT_TYPE environment variable value."""
return self._envvar('CONTENT_TYPE')
def _content_length(self) :
"""Returns the CONTENT_LENGTH environment variable value."""
return self._envvar('CONTENT_LENGTH')
def _http_accept(self) :
"""Returns the HTTP_ACCEPT environment variable value."""
return self._envvar('HTTP_ACCEPT')
def _http_user_agent(self) :
"""Returns the HTTP_USER_AGENT environment variable value."""
return self._envvar('HTTP_USER_AGENT')
def _http_referer(self) :
"""Returns the HTTP_REFERER environment variable value."""
return self._envvar('HTTP_REFERER')
def _log_message(self, msg = "Error in a CGI Script made with jaxml", level = "error") :
"""Logs a message to the HTTP server's error log file (usually on stderr)."""
sys.stderr.write("[%s] [%s] %s\n" % (time.asctime(time.localtime(time.time())), level, msg))
def _log_message_and_exit(self, msg = "Fatal Error in a CGI Script made with jaxml", level = "error") :
"""Logs a message to the HTTP server's error log file (usually on stderr) and exits unsuccessfully."""
self.log_message(msg, level)
sys.exit(-1)
def _output(self, file = "-") :
"""Prints the CGI script output to stdout or file.
If self.__debug__ is defined it is used as a file
to which send the output to too.
"""
HTML_document._output(self, file)
if self.__debug__ :
HTML_document._output(self, self.__debug__)
class Html_document :
"""This class warns the programmer when used, and exits the program.
This is done to say that the jahtml module is now obsolete"""
def __init__(self) :
"""Warns and Exit"""
sys.stderr.write("EXITING: The jaxml.Html_document() class shouldn't be used anymore.\nUse jaxml.HTML_document() instead, and modify your programs according to the new API.\n")
sys.exit(-1)
| gpl-2.0 |
arun6582/django | tests/forms_tests/field_tests/test_nullbooleanfield.py | 49 | 3555 | from django.forms import Form, HiddenInput, NullBooleanField, RadioSelect
from django.test import SimpleTestCase
from . import FormFieldAssertionsMixin
class NullBooleanFieldTest(FormFieldAssertionsMixin, SimpleTestCase):
def test_nullbooleanfield_clean(self):
f = NullBooleanField()
self.assertIsNone(f.clean(''))
self.assertTrue(f.clean(True))
self.assertFalse(f.clean(False))
self.assertIsNone(f.clean(None))
self.assertFalse(f.clean('0'))
self.assertTrue(f.clean('1'))
self.assertIsNone(f.clean('2'))
self.assertIsNone(f.clean('3'))
self.assertIsNone(f.clean('hello'))
self.assertTrue(f.clean('true'))
self.assertFalse(f.clean('false'))
def test_nullbooleanfield_2(self):
# The internal value is preserved if using HiddenInput (#7753).
class HiddenNullBooleanForm(Form):
hidden_nullbool1 = NullBooleanField(widget=HiddenInput, initial=True)
hidden_nullbool2 = NullBooleanField(widget=HiddenInput, initial=False)
f = HiddenNullBooleanForm()
self.assertHTMLEqual(
'<input type="hidden" name="hidden_nullbool1" value="True" id="id_hidden_nullbool1" />'
'<input type="hidden" name="hidden_nullbool2" value="False" id="id_hidden_nullbool2" />',
str(f)
)
def test_nullbooleanfield_3(self):
class HiddenNullBooleanForm(Form):
hidden_nullbool1 = NullBooleanField(widget=HiddenInput, initial=True)
hidden_nullbool2 = NullBooleanField(widget=HiddenInput, initial=False)
f = HiddenNullBooleanForm({'hidden_nullbool1': 'True', 'hidden_nullbool2': 'False'})
self.assertIsNone(f.full_clean())
self.assertTrue(f.cleaned_data['hidden_nullbool1'])
self.assertFalse(f.cleaned_data['hidden_nullbool2'])
def test_nullbooleanfield_4(self):
# Make sure we're compatible with MySQL, which uses 0 and 1 for its
# boolean values (#9609).
NULLBOOL_CHOICES = (('1', 'Yes'), ('0', 'No'), ('', 'Unknown'))
class MySQLNullBooleanForm(Form):
nullbool0 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
nullbool1 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
nullbool2 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
f = MySQLNullBooleanForm({'nullbool0': '1', 'nullbool1': '0', 'nullbool2': ''})
self.assertIsNone(f.full_clean())
self.assertTrue(f.cleaned_data['nullbool0'])
self.assertFalse(f.cleaned_data['nullbool1'])
self.assertIsNone(f.cleaned_data['nullbool2'])
def test_nullbooleanfield_changed(self):
f = NullBooleanField()
self.assertTrue(f.has_changed(False, None))
self.assertTrue(f.has_changed(None, False))
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed(False, False))
self.assertTrue(f.has_changed(True, False))
self.assertTrue(f.has_changed(True, None))
self.assertTrue(f.has_changed(True, False))
# HiddenInput widget sends string values for boolean but doesn't clean them in value_from_datadict
self.assertFalse(f.has_changed(False, 'False'))
self.assertFalse(f.has_changed(True, 'True'))
self.assertFalse(f.has_changed(None, ''))
self.assertTrue(f.has_changed(False, 'True'))
self.assertTrue(f.has_changed(True, 'False'))
self.assertTrue(f.has_changed(None, 'False'))
| bsd-3-clause |
tridao/cvxpy | cvxpy/atoms/affine/sum_entries.py | 5 | 2288 | """
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.atoms.affine.affine_atom import AffAtom
from cvxpy.atoms.axis_atom import AxisAtom
import cvxpy.utilities as u
import cvxpy.lin_ops.lin_utils as lu
import numpy as np
class sum_entries(AxisAtom, AffAtom):
""" Summing the entries of an expression.
Attributes
----------
expr : CVXPY Expression
The expression to sum the entries of.
"""
def __init__(self, expr, axis=None):
super(sum_entries, self).__init__(expr, axis=axis)
@AffAtom.numpy_numeric
def numeric(self, values):
"""Sums the entries of value.
"""
return np.sum(values[0], axis=self.axis)
@staticmethod
def graph_implementation(arg_objs, size, data=None):
"""Sum the linear expression's entries.
Parameters
----------
arg_objs : list
LinExpr for each argument.
size : tuple
The size of the resulting expression.
data :
Additional data required by the atom.
Returns
-------
tuple
(LinOp for objective, list of constraints)
"""
axis = data[0]
if axis is None:
obj = lu.sum_entries(arg_objs[0])
elif axis == 1:
const_size = (arg_objs[0].size[1], 1)
ones = lu.create_const(np.ones(const_size), const_size)
obj = lu.rmul_expr(arg_objs[0], ones, size)
else: # axis == 0
const_size = (1, arg_objs[0].size[0])
ones = lu.create_const(np.ones(const_size), const_size)
obj = lu.mul_expr(ones, arg_objs[0], size)
return (obj, [])
| gpl-3.0 |
cdsgroup/qcdb | databases/A24.py | 2 | 34755 | #
# @BEGIN LICENSE
#
# QCDB: quantum chemistry common driver and databases
#
# Copyright (c) 2011-2017 The QCDB Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of QCDB.
#
# QCDB is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# QCDB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with QCDB; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database (Hobza) of interaction energies for bimolecular complexes.
| Geometries from JCTC 9 2151 (2013).
| Reference interaction energies from Rezac and Hobza, and others (see below).
- **cp** ``'off'`` <erase this comment and after unless on is a valid option> || ``'on'``
- **rlxd** ``'off'`` <erase this comment and after unless on is valid option> || ``'on'``
- **benchmark**
- ``'A240'`` original pub, Riley et al. JCTC 9 2151 (2013).
- ``'A24A'`` weighted average CP/unCP, Burns et al. JCTC 10 49 (2014).
- |dl| ``'A24B'`` |dr| highest extrapolated CP CCSD(T) values (aq5z or a56z).
- **subset**
- ``'small'`` <members_description>
- ``'large'`` <members_description>
- ``'<subset>'`` <members_description>
"""
import re
import qcdb
# <<< A24 Database Module >>>
dbse = 'A24'
# <<< Database Members >>>
HRXN = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
HRXN_SM = [2]
HRXN_LG = []
HB = [1,2,3,4,5]
MX = [6,7,8,9,10,11,12,13,16]
DD = [14,15,17,18,19,20,21,22,23,24]
#weak = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
#weak_hb = [1,2,3,4,5]
#weak_mx = [6,7,8,9,10,11,12,13,16]
#weak_dd = [14,15,17,18,19,20,21,22,23,24]
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV_CP = {} # order of active reagents per counterpoise-corrected reaction
ACTV_SA = {} # order of active reagents for non-supermolecular calculations
for rxn in HRXN:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-%s-monoA-unCP' % (dbse, rxn) : -1,
'%s-%s-monoB-unCP' % (dbse, rxn) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-unCP' % (dbse, rxn),
'%s-%s-monoB-unCP' % (dbse, rxn) ]
# <<< Reference Values [kcal/mol] >>>
BIND = {}
# A240: Original publication JCTC 9 2151 (2013)
BIND_A240 = {}
BIND_A240['%s-%s' % (dbse, 1)] = -6.493
BIND_A240['%s-%s' % (dbse, 2)] = -5.006
BIND_A240['%s-%s' % (dbse, 3)] = -4.745
BIND_A240['%s-%s' % (dbse, 4)] = -4.581
BIND_A240['%s-%s' % (dbse, 5)] = -3.137
BIND_A240['%s-%s' % (dbse, 6)] = -1.654
BIND_A240['%s-%s' % (dbse, 7)] = -0.765
BIND_A240['%s-%s' % (dbse, 8)] = -0.663
BIND_A240['%s-%s' % (dbse, 9)] = -4.554
BIND_A240['%s-%s' % (dbse, 10)] = -2.557
BIND_A240['%s-%s' % (dbse, 11)] = -1.621
BIND_A240['%s-%s' % (dbse, 12)] = -1.524
BIND_A240['%s-%s' % (dbse, 13)] = -1.374
BIND_A240['%s-%s' % (dbse, 14)] = -1.090
BIND_A240['%s-%s' % (dbse, 15)] = -0.502
BIND_A240['%s-%s' % (dbse, 16)] = -1.485
BIND_A240['%s-%s' % (dbse, 17)] = -0.827
BIND_A240['%s-%s' % (dbse, 18)] = -0.607
BIND_A240['%s-%s' % (dbse, 19)] = -0.533
BIND_A240['%s-%s' % (dbse, 20)] = -0.405
BIND_A240['%s-%s' % (dbse, 21)] = -0.364
BIND_A240['%s-%s' % (dbse, 22)] = 0.821
BIND_A240['%s-%s' % (dbse, 23)] = 0.934
BIND_A240['%s-%s' % (dbse, 24)] = 1.115
# A24A: Weighted averaged reference used in JCTC 10 49 (2014)
BIND_A24A = {}
BIND_A24A['%s-%s' % (dbse, 1)] = -6.502
BIND_A24A['%s-%s' % (dbse, 2)] = -5.007
BIND_A24A['%s-%s' % (dbse, 3)] = -4.758
BIND_A24A['%s-%s' % (dbse, 4)] = -4.569
BIND_A24A['%s-%s' % (dbse, 5)] = -3.131
BIND_A24A['%s-%s' % (dbse, 6)] = -1.633
BIND_A24A['%s-%s' % (dbse, 7)] = -0.761
BIND_A24A['%s-%s' % (dbse, 8)] = -0.669
BIND_A24A['%s-%s' % (dbse, 9)] = -4.520
BIND_A24A['%s-%s' % (dbse, 10)] = -2.560
BIND_A24A['%s-%s' % (dbse, 11)] = -1.618
BIND_A24A['%s-%s' % (dbse, 12)] = -1.520
BIND_A24A['%s-%s' % (dbse, 13)] = -1.376
BIND_A24A['%s-%s' % (dbse, 14)] = -1.088
BIND_A24A['%s-%s' % (dbse, 15)] = -0.505
BIND_A24A['%s-%s' % (dbse, 16)] = -1.484
BIND_A24A['%s-%s' % (dbse, 17)] = -0.831
BIND_A24A['%s-%s' % (dbse, 18)] = -0.610
BIND_A24A['%s-%s' % (dbse, 19)] = -0.534
BIND_A24A['%s-%s' % (dbse, 20)] = -0.397
BIND_A24A['%s-%s' % (dbse, 21)] = -0.347
BIND_A24A['%s-%s' % (dbse, 22)] = 0.835
BIND_A24A['%s-%s' % (dbse, 23)] = 0.945
BIND_A24A['%s-%s' % (dbse, 24)] = 1.131
# A24B: Highest extrapolated CP CCSD(T) values (q5 or 56)
BIND_A24B = {}
BIND_A24B['%s-%s' % (dbse, 1)] = -6.506 # 56
BIND_A24B['%s-%s' % (dbse, 2)] = -5.015 # 56
BIND_A24B['%s-%s' % (dbse, 3)] = -4.751 # 56
BIND_A24B['%s-%s' % (dbse, 4)] = -4.592 # 56
BIND_A24B['%s-%s' % (dbse, 5)] = -3.142 # 56
BIND_A24B['%s-%s' % (dbse, 6)] = -1.661 # 56
BIND_A24B['%s-%s' % (dbse, 7)] = -0.767
BIND_A24B['%s-%s' % (dbse, 8)] = -0.665 # 56
BIND_A24B['%s-%s' % (dbse, 9)] = -4.565
BIND_A24B['%s-%s' % (dbse, 10)] = -2.564
BIND_A24B['%s-%s' % (dbse, 11)] = -1.626
BIND_A24B['%s-%s' % (dbse, 12)] = -1.527
BIND_A24B['%s-%s' % (dbse, 13)] = -1.377
BIND_A24B['%s-%s' % (dbse, 14)] = -1.094
BIND_A24B['%s-%s' % (dbse, 15)] = -0.504
BIND_A24B['%s-%s' % (dbse, 16)] = -1.493
BIND_A24B['%s-%s' % (dbse, 17)] = -0.830
BIND_A24B['%s-%s' % (dbse, 18)] = -0.609
BIND_A24B['%s-%s' % (dbse, 19)] = -0.534
BIND_A24B['%s-%s' % (dbse, 20)] = -0.406 # 56
BIND_A24B['%s-%s' % (dbse, 21)] = -0.354 # 56
BIND_A24B['%s-%s' % (dbse, 22)] = 0.818
BIND_A24B['%s-%s' % (dbse, 23)] = 0.930
BIND_A24B['%s-%s' % (dbse, 24)] = 1.115
# A24C: Includes (Q), core, rel corrections PCCP 17 19268 (2015)
BIND_A24C = {}
BIND_A24C['%s-%s' % (dbse, 1)] = -6.546
BIND_A24C['%s-%s' % (dbse, 2)] = -5.036
BIND_A24C['%s-%s' % (dbse, 3)] = -4.769
BIND_A24C['%s-%s' % (dbse, 4)] = -4.585
BIND_A24C['%s-%s' % (dbse, 5)] = -3.169
BIND_A24C['%s-%s' % (dbse, 6)] = -1.662
BIND_A24C['%s-%s' % (dbse, 7)] = -0.779
BIND_A24C['%s-%s' % (dbse, 8)] = -0.681
BIND_A24C['%s-%s' % (dbse, 9)] = -4.515
BIND_A24C['%s-%s' % (dbse, 10)] = -2.586
BIND_A24C['%s-%s' % (dbse, 11)] = -1.634
BIND_A24C['%s-%s' % (dbse, 12)] = -1.538
BIND_A24C['%s-%s' % (dbse, 13)] = -1.396
BIND_A24C['%s-%s' % (dbse, 14)] = -1.110
BIND_A24C['%s-%s' % (dbse, 15)] = -0.518
BIND_A24C['%s-%s' % (dbse, 16)] = -1.522
BIND_A24C['%s-%s' % (dbse, 17)] = -0.845
BIND_A24C['%s-%s' % (dbse, 18)] = -0.618
BIND_A24C['%s-%s' % (dbse, 19)] = -0.542
BIND_A24C['%s-%s' % (dbse, 20)] = -0.405
BIND_A24C['%s-%s' % (dbse, 21)] = -0.356
BIND_A24C['%s-%s' % (dbse, 22)] = 0.801
BIND_A24C['%s-%s' % (dbse, 23)] = 0.909
BIND_A24C['%s-%s' % (dbse, 24)] = 1.097
# Set default
BIND = BIND_A24B
# Reference information
BINDINFO_A240 = {}
BINDINFO_A24A = {}
BINDINFO_A24B = {}
BINDINFO_A24C = {}
for rxn in HRXN:
# A24-0: HF/aug-cc-pV5Z + D:CCSD(T)/aug-cc-pV[TQ5]Z + D:(Q)/6-31G**(0.25,0.15) + DKH4/aug-cc-pCVQZ-DK + CCSD(T)/aug-cc-pCV[TQ]Z(ae - fc)
BINDINFO_A240['%s-%s' % (dbse, rxn)] = {'citation': 'a240', 'method': 'CCSDTQ'}
if rxn in [1, 2, 3, 4, 5, 6, 8, 20, 21]:
BINDINFO_A24A['%s-%s' % (dbse, rxn)] = {'citation': 'dilabio', 'method': 'CCSDT', 'mode': 'ave', 'basis': 'a56z'}
BINDINFO_A24B['%s-%s' % (dbse, rxn)] = {'citation': 'dilabio', 'method': 'CCSDT', 'mode': 'CP', 'basis': 'a56z'}
else:
BINDINFO_A24A['%s-%s' % (dbse, rxn)] = {'citation': 'dilabio', 'method': 'CCSDT', 'mode': 'ave', 'basis': 'aq5z'}
BINDINFO_A24B['%s-%s' % (dbse, rxn)] = {'citation': 'dilabio', 'method': 'CCSDT', 'mode': 'CP', 'basis': 'aq5z'}
# A24C: A24B + D:(Q)/aTZ (A24-2, 4, 5, 19; could be aDTZ?) /aDZ (1, 6-8, 10-18, 20-24) /None (3, 9) + relativisic(A24-0) + core(A24-0)
BINDINFO_A24C['%s-%s' % (dbse, rxn)] = {'citation': 'a24c', 'method': 'CCSDTQ'}
# <<< Comment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, 1)] = """ water_ammonia_Cs """
TAGL['%s-%s-dimer' % (dbse, 1)] = """Dimer from water_ammonia_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 1)] = """Monomer A water_ammonia_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 1)] = """Monomer B water_ammonia_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 1)] = """Monomer A water_ammonia_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 1)] = """Monomer B water_ammonia_Cs """
TAGL['%s-%s' % (dbse, 2)] = """ water_water_Cs """
TAGL['%s-%s-dimer' % (dbse, 2)] = """Dimer from water_water_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 2)] = """Monomer A from water_water_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 2)] = """Monomer B from water_water_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 2)] = """Monomer A from water_water_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 2)] = """Monomer B from water_water_Cs """
TAGL['%s-%s' % (dbse, 3)] = """ HCN_HCN_Cxv """
TAGL['%s-%s-dimer' % (dbse, 3)] = """Dimer from HCN_HCN_Cxv """
TAGL['%s-%s-monoA-CP' % (dbse, 3)] = """Monomer A from HCN_HCN_Cxv """
TAGL['%s-%s-monoB-CP' % (dbse, 3)] = """Monomer B from HCN_HCN_Cxv """
TAGL['%s-%s-monoA-unCP' % (dbse, 3)] = """Monomer A from HCN_HCN_Cxv """
TAGL['%s-%s-monoB-unCP' % (dbse, 3)] = """Monomer B from HCN_HCN_Cxv """
TAGL['%s-%s' % (dbse, 4)] = """ HF_HF_Cs """
TAGL['%s-%s-dimer' % (dbse, 4)] = """Dimer from HF_HF_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 4)] = """Monomer A from HF_HF_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 4)] = """Monomer B from HF_HF_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 4)] = """Monomer A from HF_HF_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 4)] = """Monomer B from HF_HF_Cs """
TAGL['%s-%s' % (dbse, 5)] = """ ammonia_ammonia_C2h """
TAGL['%s-%s-dimer' % (dbse, 5)] = """Dimer from ammonia_ammonia_C2h """
TAGL['%s-%s-monoA-CP' % (dbse, 5)] = """Monomer A from ammonia_ammonia_C2h """
TAGL['%s-%s-monoB-CP' % (dbse, 5)] = """Monomer B from ammonia_ammonia_C2h """
TAGL['%s-%s-monoA-unCP' % (dbse, 5)] = """Monomer A from ammonia_ammonia_C2h """
TAGL['%s-%s-monoB-unCP' % (dbse, 5)] = """Monomer B from ammonia_ammonia_C2h """
TAGL['%s-%s' % (dbse, 6)] = """ methane_HF_C3v """
TAGL['%s-%s-dimer' % (dbse, 6)] = """Dimer from methane_HF_C3v """
TAGL['%s-%s-monoA-CP' % (dbse, 6)] = """Monomer A from methane_HF_C3v """
TAGL['%s-%s-monoB-CP' % (dbse, 6)] = """Monomer B from methane_HF_C3v """
TAGL['%s-%s-monoA-unCP' % (dbse, 6)] = """Monomer A from methane_HF_C3v """
TAGL['%s-%s-monoB-unCP' % (dbse, 6)] = """Monomer B from methane_HF_C3v """
TAGL['%s-%s' % (dbse, 7)] = """ ammmonia_methane_C3v """
TAGL['%s-%s-dimer' % (dbse, 7)] = """Dimer from ammmonia_methane_C3v """
TAGL['%s-%s-monoA-CP' % (dbse, 7)] = """Monomer A from ammmonia_methane_C3v """
TAGL['%s-%s-monoB-CP' % (dbse, 7)] = """Monomer B from ammmonia_methane_C3v """
TAGL['%s-%s-monoA-unCP' % (dbse, 7)] = """Monomer A from ammmonia_methane_C3v """
TAGL['%s-%s-monoB-unCP' % (dbse, 7)] = """Monomer B from ammmonia_methane_C3v """
TAGL['%s-%s' % (dbse, 8)] = """ methane_water_Cs """
TAGL['%s-%s-dimer' % (dbse, 8)] = """Dimer from methane_water_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 8)] = """Monomer A from methane_water_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 8)] = """Monomer B from methane_water_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 8)] = """Monomer A from methane_water_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 8)] = """Monomer B from methane_water_Cs """
TAGL['%s-%s' % (dbse, 9)] = """ formaldehyde_formaldehyde_Cs """
TAGL['%s-%s-dimer' % (dbse, 9)] = """Dimer from formaldehyde_formaldehyde_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 9)] = """Monomer A from formaldehyde_formaldehyde_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 9)] = """Monomer B from formaldehyde_formaldehyde_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 9)] = """Monomer A from formaldehyde_formaldehyde_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 9)] = """Monomer B from formaldehyde_formaldehyde_Cs """
TAGL['%s-%s' % (dbse, 10)] = """ ethene_wat_Cs """
TAGL['%s-%s-dimer' % (dbse, 10)] = """Dimer from ethene_wat_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 10)] = """Monomer A from ethene_wat_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 10)] = """Monomer B from ethene_wat_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 10)] = """Monomer A from ethene_wat_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 10)] = """Monomer B from ethene_wat_Cs """
TAGL['%s-%s' % (dbse, 11)] = """ ethene_formaldehyde_Cs """
TAGL['%s-%s-dimer' % (dbse, 11)] = """Dimer from ethene_formaldehyde_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 11)] = """Monomer A from ethene_formaldehyde_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 11)] = """Monomer B from ethene_formaldehyde_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 11)] = """Monomer A from ethene_formaldehyde_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 11)] = """Monomer B from ethene_formaldehyde_Cs """
TAGL['%s-%s' % (dbse, 12)] = """ ethyne_ethyne_C2v """
TAGL['%s-%s-dimer' % (dbse, 12)] = """Dimer from ethyne_ethyne_C2v """
TAGL['%s-%s-monoA-CP' % (dbse, 12)] = """Monomer A from ethyne_ethyne_C2v """
TAGL['%s-%s-monoB-CP' % (dbse, 12)] = """Monomer B from ethyne_ethyne_C2v """
TAGL['%s-%s-monoA-unCP' % (dbse, 12)] = """Monomer A from ethyne_ethyne_C2v """
TAGL['%s-%s-monoB-unCP' % (dbse, 12)] = """Monomer B from ethyne_ethyne_C2v """
TAGL['%s-%s' % (dbse, 13)] = """ ethene_ammonia_Cs """
TAGL['%s-%s-dimer' % (dbse, 13)] = """Dimer from ethene_ammonia_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 13)] = """Monomer A from ethene_ammonia_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 13)] = """Monomer B from ethene_ammonia_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 13)] = """Monomer A from ethene_ammonia_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 13)] = """Monomer B from ethene_ammonia_Cs """
TAGL['%s-%s' % (dbse, 14)] = """ ethene_ethene_C2v """
TAGL['%s-%s-dimer' % (dbse, 14)] = """Dimer from ethene_ethene_C2v """
TAGL['%s-%s-monoA-CP' % (dbse, 14)] = """Monomer A from ethene_ethene_C2v """
TAGL['%s-%s-monoB-CP' % (dbse, 14)] = """Monomer B from ethene_ethene_C2v """
TAGL['%s-%s-monoA-unCP' % (dbse, 14)] = """Monomer A from ethene_ethene_C2v """
TAGL['%s-%s-monoB-unCP' % (dbse, 14)] = """Monomer B from ethene_ethene_C2v """
TAGL['%s-%s' % (dbse, 15)] = """ methane_ethene_Cs """
TAGL['%s-%s-dimer' % (dbse, 15)] = """Dimer from methane_ethene_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 15)] = """Monomer A from methane_ethene_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 15)] = """Monomer B from methane_ethene_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 15)] = """Monomer A from methane_ethene_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 15)] = """Monomer B from methane_ethene_Cs """
TAGL['%s-%s' % (dbse, 16)] = """ borane_methane_Cs """
TAGL['%s-%s-dimer' % (dbse, 16)] = """Dimer from borane_methane_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 16)] = """Monomer A from borane_methane_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 16)] = """Monomer B from borane_methane_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 16)] = """Monomer A from borane_methane_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 16)] = """Monomer B from borane_methane_Cs """
TAGL['%s-%s' % (dbse, 17)] = """ methane_ethane_Cs """
TAGL['%s-%s-dimer' % (dbse, 17)] = """Dimer from methane_ethane_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 17)] = """Monomer A from methane_ethane_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 17)] = """Monomer B from methane_ethane_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 17)] = """Monomer A from methane_ethane_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 17)] = """Monomer B from methane_ethane_Cs """
TAGL['%s-%s' % (dbse, 18)] = """ methane_ethane_C3 """
TAGL['%s-%s-dimer' % (dbse, 18)] = """Dimer from methane_ethane_C3 """
TAGL['%s-%s-monoA-CP' % (dbse, 18)] = """Monomer A from methane_ethane_C3 """
TAGL['%s-%s-monoB-CP' % (dbse, 18)] = """Monomer B from methane_ethane_C3 """
TAGL['%s-%s-monoA-unCP' % (dbse, 18)] = """Monomer A from methane_ethane_C3 """
TAGL['%s-%s-monoB-unCP' % (dbse, 18)] = """Monomer B from methane_ethane_C3 """
TAGL['%s-%s' % (dbse, 19)] = """ methane_methane_D3d """
TAGL['%s-%s-dimer' % (dbse, 19)] = """Dimer from methane_methane_D3d """
TAGL['%s-%s-monoA-CP' % (dbse, 19)] = """Monomer A from methane_methane_D3d """
TAGL['%s-%s-monoB-CP' % (dbse, 19)] = """Monomer B from methane_methane_D3d """
TAGL['%s-%s-monoA-unCP' % (dbse, 19)] = """Monomer A from methane_methane_D3d """
TAGL['%s-%s-monoB-unCP' % (dbse, 19)] = """Monomer B from methane_methane_D3d """
TAGL['%s-%s' % (dbse, 20)] = """ methane_Ar_C3v """
TAGL['%s-%s-dimer' % (dbse, 20)] = """Dimer from methane_Ar_C3v """
TAGL['%s-%s-monoA-CP' % (dbse, 20)] = """Monomer A from methane_Ar_C3v """
TAGL['%s-%s-monoB-CP' % (dbse, 20)] = """Monomer B from methane_Ar_C3v """
TAGL['%s-%s-monoA-unCP' % (dbse, 20)] = """Monomer A from methane_Ar_C3v """
TAGL['%s-%s-monoB-unCP' % (dbse, 20)] = """Monomer B from methane_Ar_C3v """
TAGL['%s-%s' % (dbse, 21)] = """ ethene_Ar_C2v """
TAGL['%s-%s-dimer' % (dbse, 21)] = """Dimer from ethene_Ar_C2v """
TAGL['%s-%s-monoA-CP' % (dbse, 21)] = """Monomer A from ethene_Ar_C2v """
TAGL['%s-%s-monoB-CP' % (dbse, 21)] = """Monomer B from ethene_Ar_C2v """
TAGL['%s-%s-monoA-unCP' % (dbse, 21)] = """Monomer A from ethene_Ar_C2v """
TAGL['%s-%s-monoB-unCP' % (dbse, 21)] = """Monomer B from ethene_Ar_C2v """
TAGL['%s-%s' % (dbse, 22)] = """ ethene_ethyne_C2v """
TAGL['%s-%s-dimer' % (dbse, 22)] = """Dimer from ethene_ethyne_C2v """
TAGL['%s-%s-monoA-CP' % (dbse, 22)] = """Monomer A from ethene_ethyne_C2v """
TAGL['%s-%s-monoB-CP' % (dbse, 22)] = """Monomer B from ethene_ethyne_C2v """
TAGL['%s-%s-monoA-unCP' % (dbse, 22)] = """Monomer A from ethene_ethyne_C2v """
TAGL['%s-%s-monoB-unCP' % (dbse, 22)] = """Monomer B from ethene_ethyne_C2v """
TAGL['%s-%s' % (dbse, 23)] = """ ethene_ethene_D2h """
TAGL['%s-%s-dimer' % (dbse, 23)] = """Dimer from ethene_ethene_D2h """
TAGL['%s-%s-monoA-CP' % (dbse, 23)] = """Monomer A from ethene_ethene_D2h """
TAGL['%s-%s-monoB-CP' % (dbse, 23)] = """Monomer B from ethene_ethene_D2h """
TAGL['%s-%s-monoA-unCP' % (dbse, 23)] = """Monomer A from ethene_ethene_D2h """
TAGL['%s-%s-monoB-unCP' % (dbse, 23)] = """Monomer B from ethene_ethene_D2h """
TAGL['%s-%s' % (dbse, 24)] = """ ethyne_ethyne_D2h """
TAGL['%s-%s-dimer' % (dbse, 24)] = """Dimer from ethyne_ethyne_D2h """
TAGL['%s-%s-monoA-CP' % (dbse, 24)] = """Monomer A from ethyne_ethyne_D2h """
TAGL['%s-%s-monoB-CP' % (dbse, 24)] = """Monomer B from ethyne_ethyne_D2h """
TAGL['%s-%s-monoA-unCP' % (dbse, 24)] = """Monomer A from ethyne_ethyne_D2h """
TAGL['%s-%s-monoB-unCP' % (dbse, 24)] = """Monomer B from ethyne_ethyne_D2h """
TAGL['dbse'] = 'interaction energies for small bimolecular complexes'
TAGL['default'] = 'entire database'
TAGL['small'] = 'few computationally quick systems'
TAGL['large'] = 'most computationally expensive systems'
TAGL['HB'] = 'hydrogen-bonded systems'
TAGL['MX'] = 'mixed-influence systems'
TAGL['DD'] = 'dispersion-dominated systems'
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-dimer' % (dbse, '1')] = qcdb.Molecule("""
0 1
O 0.00000000 -0.05786571 -1.47979303
H 0.00000000 0.82293384 -1.85541474
H 0.00000000 0.07949567 -0.51934253
--
0 1
N 0.00000000 0.01436394 1.46454628
H 0.00000000 -0.98104857 1.65344779
H -0.81348351 0.39876776 1.92934049
H 0.81348351 0.39876776 1.92934049
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '2')] = qcdb.Molecule("""
0 1
O -0.06699914 0.00000000 1.49435474
H 0.81573427 0.00000000 1.86586639
H 0.06885510 0.00000000 0.53914277
--
0 1
O 0.06254775 0.00000000 -1.42263208
H -0.40696540 -0.76017841 -1.77174450
H -0.40696540 0.76017841 -1.77174450
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '3')] = qcdb.Molecule("""
0 1
H 0.00000000 0.00000000 3.85521306
C 0.00000000 0.00000000 2.78649976
N 0.00000000 0.00000000 1.63150791
--
0 1
H 0.00000000 0.00000000 -0.59377492
C 0.00000000 0.00000000 -1.66809824
N 0.00000000 0.00000000 -2.82525056
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '4')] = qcdb.Molecule("""
0 1
H 0.00000000 0.80267982 1.69529329
F 0.00000000 -0.04596666 1.34034818
--
0 1
H 0.00000000 -0.12040787 -0.49082840
F 0.00000000 0.00976945 -1.40424978
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '5')] = qcdb.Molecule("""
0 1
N -0.04998129 -1.58709323 0.00000000
H 0.12296265 -2.16846018 0.81105976
H 0.12296265 -2.16846018 -0.81105976
H 0.65988580 -0.86235298 0.00000000
--
0 1
N 0.04998129 1.58709323 0.00000000
H -0.12296265 2.16846018 0.81105976
H -0.65988580 0.86235298 0.00000000
H -0.12296265 2.16846018 -0.81105976
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '6')] = qcdb.Molecule("""
0 1
C 0.00000000 -0.00000000 1.77071609
H 0.51593378 -0.89362352 1.42025061
H -0.00000000 0.00000000 2.85805859
H 0.51593378 0.89362352 1.42025061
H -1.03186756 0.00000000 1.42025061
--
0 1
H -0.00000000 0.00000000 -0.54877328
F -0.00000000 0.00000000 -1.46803256
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '7')] = qcdb.Molecule("""
0 1
N -0.00000000 0.00000000 1.84833659
H 0.93730979 -0.00000000 2.23206741
H -0.46865489 -0.81173409 2.23206741
H -0.46865489 0.81173409 2.23206741
--
0 1
H 0.00000000 -0.00000000 -0.94497174
C 0.00000000 -0.00000000 -2.03363752
H 0.51251439 0.88770096 -2.40095125
H 0.51251439 -0.88770096 -2.40095125
H -1.02502878 0.00000000 -2.40095125
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '8')] = qcdb.Molecule("""
0 1
C 0.00069016 0.00000000 -1.99985520
H -0.50741740 0.88759452 -2.37290605
H 1.03052749 0.00000000 -2.35282982
H -0.01314396 0.00000000 -0.91190852
H -0.50741740 -0.88759452 -2.37290605
--
0 1
O -0.00472553 0.00000000 1.71597466
H 0.03211863 0.75755459 2.30172044
H 0.03211863 -0.75755459 2.30172044
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '9')] = qcdb.Molecule("""
0 1
C 0.00000000 0.60123980 -1.35383976
O 0.00000000 -0.59301814 -1.55209021
H 0.93542250 1.17427624 -1.26515132
H -0.93542250 1.17427624 -1.26515132
--
0 1
C 0.00000000 -0.60200476 1.55228866
O 0.00000000 0.59238638 1.35511328
H 0.00000000 -1.00937982 2.57524635
H 0.00000000 -1.32002906 0.71694997
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '10')] = qcdb.Molecule("""
0 1
C 0.01058825 -0.66806246 1.29820809
C 0.01058825 0.66806246 1.29820809
H 0.86863216 1.23267933 0.95426815
H -0.84608285 1.23258495 1.64525385
H -0.84608285 -1.23258495 1.64525385
H 0.86863216 -1.23267933 0.95426815
--
0 1
H -0.79685627 0.00000000 -2.50911038
O 0.04347445 0.00000000 -2.04834054
H -0.19067546 0.00000000 -1.11576944
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '11')] = qcdb.Molecule("""
0 1
C 0.00000000 -0.59797089 1.47742864
C 0.00000000 0.42131196 2.33957848
H 0.92113351 -1.02957102 1.10653516
H -0.92113351 -1.02957102 1.10653516
H -0.92393815 0.85124826 2.70694633
H 0.92393815 0.85124826 2.70694633
--
0 1
O 0.00000000 -0.51877334 -1.82845679
C 0.00000000 0.68616220 -1.73709412
H 0.00000000 1.33077474 -2.63186355
H 0.00000000 1.18902807 -0.75645498
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '12')] = qcdb.Molecule("""
0 1
C 0.00000000 0.60356400 -2.18173438
H 0.00000000 1.66847581 -2.18429610
C 0.00000000 -0.60356400 -2.18173438
H 0.00000000 -1.66847581 -2.18429610
--
0 1
C -0.00000000 0.00000000 1.57829513
H -0.00000000 0.00000000 0.51136193
C -0.00000000 0.00000000 2.78576543
H -0.00000000 0.00000000 3.85017859
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '13')] = qcdb.Molecule("""
0 1
C 0.00000000 -0.59662248 1.58722206
C 0.00000000 0.68258238 1.20494642
H 0.92312147 1.22423658 1.04062463
H -0.92312147 1.22423658 1.04062463
H -0.92388993 -1.13738548 1.75121281
H 0.92388993 -1.13738548 1.75121281
--
0 1
N 0.00000000 -0.00401379 -2.31096701
H -0.81122549 -0.45983060 -2.71043881
H 0.00000000 -0.22249432 -1.32128161
H 0.81122549 -0.45983060 -2.71043881
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '14')] = qcdb.Molecule("""
0 1
H 0.92444510 -1.23172221 -1.90619313
H -0.92444510 -1.23172221 -1.90619313
H -0.92444510 1.23172221 -1.90619313
H 0.92444510 1.23172221 -1.90619313
C 0.00000000 0.66728778 -1.90556520
C 0.00000000 -0.66728778 -1.90556520
--
0 1
H -0.00000000 1.23344948 2.82931792
H 0.00000000 1.22547148 0.97776199
H -0.00000000 -1.22547148 0.97776199
H -0.00000000 -1.23344948 2.82931792
C -0.00000000 -0.66711698 1.90601042
C -0.00000000 0.66711698 1.90601042
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '15')] = qcdb.Molecule("""
0 1
C 0.00000000 0.64634385 -1.60849815
C 0.00000000 -0.67914355 -1.45381675
H -0.92399961 -1.24016223 -1.38784883
H 0.92399961 -1.24016223 -1.38784883
H 0.92403607 1.20737602 -1.67357285
H -0.92403607 1.20737602 -1.67357285
--
0 1
H 0.00000000 0.08295411 1.59016711
C 0.00000000 0.02871509 2.67711785
H 0.88825459 0.52261990 3.06664029
H -0.88825459 0.52261990 3.06664029
H 0.00000000 -1.01394800 2.98955227
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '16')] = qcdb.Molecule("""
0 1
C 0.00346000 0.00000000 1.38045208
H 0.84849635 0.00000000 0.68958651
H 0.39513333 0.00000000 2.39584935
H -0.60268447 -0.88994299 1.22482674
H -0.60268447 0.88994299 1.22482674
--
0 1
B -0.00555317 0.00000000 -1.59887976
H 0.58455128 -1.03051800 -1.67949525
H 0.58455128 1.03051800 -1.67949525
H -1.18903148 0.00000000 -1.47677217
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '17')] = qcdb.Molecule("""
0 1
C 0.00000000 -0.06374421 2.42054090
H 0.00000000 1.02169396 2.34238038
H 0.88828307 -0.46131911 1.93307194
H -0.88828307 -0.46131911 1.93307194
H 0.00000000 -0.35363606 3.46945195
--
0 1
C 0.00000000 0.78133572 -1.13543912
H 0.00000000 1.37465349 -2.05114442
H -0.88043002 1.06310554 -0.55580918
C 0.00000000 -0.71332890 -1.44723686
H 0.88043002 1.06310554 -0.55580918
H 0.00000000 -1.30641812 -0.53140693
H -0.88100343 -0.99533072 -2.02587154
H 0.88100343 -0.99533072 -2.02587154
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '18')] = qcdb.Molecule("""
0 1
C -0.00000000 0.00000000 -2.85810471
H 0.39304720 -0.94712229 -2.49369739
H 0.62370837 0.81395000 -2.49369739
H -1.01675556 0.13317229 -2.49369739
H 0.00000000 -0.00000000 -3.94634214
--
0 1
C 0.00000000 -0.00000000 0.76143405
C -0.00000000 -0.00000000 2.28821715
H -0.61711193 -0.80824397 0.36571527
H -0.39140385 0.93855659 0.36571527
H 1.00851577 -0.13031262 0.36571527
H -1.00891703 0.13031295 2.68258296
H 0.39160418 -0.93890425 2.68258296
H 0.61731284 0.80859130 2.68258296
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '19')] = qcdb.Molecule("""
0 1
C -0.00000000 0.00000000 1.81901457
H 0.51274115 0.88809373 1.45476743
H 0.51274115 -0.88809373 1.45476743
H -1.02548230 0.00000000 1.45476743
H 0.00000000 -0.00000000 2.90722072
--
0 1
C 0.00000000 -0.00000000 -1.81901457
H -0.00000000 0.00000000 -2.90722072
H -0.51274115 0.88809373 -1.45476743
H -0.51274115 -0.88809373 -1.45476743
H 1.02548230 -0.00000000 -1.45476743
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '20')] = qcdb.Molecule("""
0 1
C -0.00000000 0.00000000 -2.62458428
H 0.51286762 0.88831278 -2.26110195
H 0.51286762 -0.88831278 -2.26110195
H -0.00000000 0.00000000 -3.71273928
H -1.02573525 0.00000000 -2.26110195
--
0 1
AR -0.00000000 0.00000000 1.05395172
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '21')] = qcdb.Molecule("""
0 1
C 0.00000000 0.66718073 -2.29024825
C 0.00000000 -0.66718073 -2.29024825
H -0.92400768 1.23202333 -2.28975239
H 0.92400768 1.23202333 -2.28975239
H -0.92400768 -1.23202333 -2.28975239
H 0.92400768 -1.23202333 -2.28975239
--
0 1
AR -0.00000000 0.00000000 1.60829261
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '22')] = qcdb.Molecule("""
0 1
H -0.92396100 1.23195600 -1.68478123
H 0.92396100 1.23195600 -1.68478123
H 0.92396100 -1.23195600 -1.68478123
H -0.92396100 -1.23195600 -1.68478123
C 0.00000000 0.66717600 -1.68478123
C 0.00000000 -0.66717600 -1.68478123
--
0 1
H -0.00000000 -1.66786500 1.81521877
H -0.00000000 1.66786500 1.81521877
C -0.00000000 -0.60339700 1.81521877
C -0.00000000 0.60339700 1.81521877
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '23')] = qcdb.Molecule("""
0 1
H -0.92396100 1.23195600 -1.75000000
H 0.92396100 1.23195600 -1.75000000
H 0.92396100 -1.23195600 -1.75000000
H -0.92396100 -1.23195600 -1.75000000
C 0.00000000 0.66717600 -1.75000000
C -0.00000000 -0.66717600 -1.75000000
--
0 1
H -0.92396100 1.23195600 1.75000000
H 0.92396100 1.23195600 1.75000000
H 0.92396100 -1.23195600 1.75000000
H -0.92396100 -1.23195600 1.75000000
C 0.00000000 0.66717600 1.75000000
C -0.00000000 -0.66717600 1.75000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '24')] = qcdb.Molecule("""
0 1
H -0.00000000 -1.66786500 -1.75000000
H 0.00000000 1.66786500 -1.75000000
C -0.00000000 -0.60339700 -1.75000000
C 0.00000000 0.60339700 -1.75000000
--
0 1
H -0.00000000 -1.66786500 1.75000000
H 0.00000000 1.66786500 1.75000000
C -0.00000000 -0.60339700 1.75000000
C 0.00000000 0.60339700 1.75000000
units angstrom
""")
# <<< Derived Geometry Strings >>>
for rxn in HRXN:
GEOS['%s-%s-monoA-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1)
GEOS['%s-%s-monoB-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2)
GEOS['%s-%s-monoA-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1, 2)
GEOS['%s-%s-monoB-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2, 1)
#########################################################################
| lgpl-3.0 |
codeforamerica/heroku-buildpack-pygeo | vendor/pip-1.3.1/pip/baseparser.py | 63 | 12283 | """Base option parser setup"""
import sys
import optparse
import pkg_resources
import os
import textwrap
from distutils.util import strtobool
from pip.backwardcompat import ConfigParser, string_types, ssl
from pip.locations import default_config_file, default_log_file
from pip.util import get_terminal_size, get_prog
class PrettyHelpFormatter(optparse.IndentedHelpFormatter):
"""A prettier/less verbose help formatter for optparse."""
def __init__(self, *args, **kwargs):
# help position must be aligned with __init__.parseopts.description
kwargs['max_help_position'] = 30
kwargs['indent_increment'] = 1
kwargs['width'] = get_terminal_size()[0] - 2
optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)
def format_option_strings(self, option):
return self._format_option_strings(option, ' <%s>', ', ')
def _format_option_strings(self, option, mvarfmt=' <%s>', optsep=', '):
"""
Return a comma-separated list of option strings and metavars.
:param option: tuple of (short opt, long opt), e.g: ('-f', '--format')
:param mvarfmt: metavar format string - evaluated as mvarfmt % metavar
:param optsep: separator
"""
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, optsep)
if option.takes_value():
metavar = option.metavar or option.dest.lower()
opts.append(mvarfmt % metavar.lower())
return ''.join(opts)
def format_heading(self, heading):
if heading == 'Options':
return ''
return heading + ':\n'
def format_usage(self, usage):
"""
Ensure there is only one newline between usage and the first heading
if there is no description.
"""
msg = '\nUsage: %s\n' % self.indent_lines(textwrap.dedent(usage), " ")
return msg
def format_description(self, description):
# leave full control over description to us
if description:
if hasattr(self.parser, 'main'):
label = 'Commands'
else:
label = 'Description'
#some doc strings have inital newlines, some don't
description = description.lstrip('\n')
#some doc strings have final newlines and spaces, some don't
description = description.rstrip()
#dedent, then reindent
description = self.indent_lines(textwrap.dedent(description), " ")
description = '%s:\n%s\n' % (label, description)
return description
else:
return ''
def format_epilog(self, epilog):
# leave full control over epilog to us
if epilog:
return epilog
else:
return ''
def indent_lines(self, text, indent):
new_lines = [indent + line for line in text.split('\n')]
return "\n".join(new_lines)
class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter):
"""Custom help formatter for use in ConfigOptionParser that updates
the defaults before expanding them, allowing them to show up correctly
in the help listing"""
def expand_default(self, option):
if self.parser is not None:
self.parser.update_defaults(self.parser.defaults)
return optparse.IndentedHelpFormatter.expand_default(self, option)
class CustomOptionParser(optparse.OptionParser):
def insert_option_group(self, idx, *args, **kwargs):
"""Insert an OptionGroup at a given position."""
group = self.add_option_group(*args, **kwargs)
self.option_groups.pop()
self.option_groups.insert(idx, group)
return group
@property
def option_list_all(self):
"""Get a list of all options, including those in option groups."""
res = self.option_list[:]
for i in self.option_groups:
res.extend(i.option_list)
return res
class ConfigOptionParser(CustomOptionParser):
"""Custom option parser which updates its defaults by by checking the
configuration files and environmental variables"""
def __init__(self, *args, **kwargs):
self.config = ConfigParser.RawConfigParser()
self.name = kwargs.pop('name')
self.files = self.get_config_files()
self.config.read(self.files)
assert self.name
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_config_files(self):
config_file = os.environ.get('PIP_CONFIG_FILE', False)
if config_file and os.path.exists(config_file):
return [config_file]
return [default_config_file]
def update_defaults(self, defaults):
"""Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists)."""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
for section in ('global', self.name):
config.update(self.normalize_keys(self.get_config_section(section)))
# 2. environmental variables
config.update(self.normalize_keys(self.get_environ_vars()))
# Then set the options with those values
for key, val in config.items():
option = self.get_option(key)
if option is not None:
# ignore empty values
if not val:
continue
# handle multiline configs
if option.action == 'append':
val = val.split()
else:
option.nargs = 1
if option.action in ('store_true', 'store_false', 'count'):
val = strtobool(val)
try:
val = option.convert_value(key, val)
except optparse.OptionValueError:
e = sys.exc_info()[1]
print("An error occurred during configuration: %s" % e)
sys.exit(3)
defaults[option.dest] = val
return defaults
def normalize_keys(self, items):
"""Return a config dictionary with normalized keys regardless of
whether the keys were specified in environment variables or in config
files"""
normalized = {}
for key, val in items:
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
normalized[key] = val
return normalized
def get_config_section(self, name):
"""Get a section of a configuration"""
if self.config.has_section(name):
return self.config.items(name)
return []
def get_environ_vars(self, prefix='PIP_'):
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
if key.startswith(prefix):
yield (key.replace(prefix, '').lower(), val)
def get_default_values(self):
"""Overridding to make updating the defaults after instantiation of
the option parser possible, update_defaults() does the dirty work."""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self.update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, string_types):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def error(self, msg):
self.print_usage(sys.stderr)
self.exit(2, "%s\n" % msg)
try:
pip_dist = pkg_resources.get_distribution('pip')
version = '%s from %s (python %s)' % (
pip_dist, pip_dist.location, sys.version[:3])
except pkg_resources.DistributionNotFound:
# when running pip.py without installing
version = None
def create_main_parser():
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
genopt = optparse.OptionGroup(parser, 'General Options')
parser.disable_interspersed_args()
# having a default version action just causes trouble
parser.version = version
for opt in standard_options:
genopt.add_option(opt)
parser.add_option_group(genopt)
return parser
standard_options = [
optparse.make_option(
'-h', '--help',
dest='help',
action='help',
help='Show help.'),
optparse.make_option(
# Run only if inside a virtualenv, bail if not.
'--require-virtualenv', '--require-venv',
dest='require_venv',
action='store_true',
default=False,
help=optparse.SUPPRESS_HELP),
optparse.make_option(
'-v', '--verbose',
dest='verbose',
action='count',
default=0,
help='Give more output. Option is additive, and can be used up to 3 times.'),
optparse.make_option(
'-V', '--version',
dest='version',
action='store_true',
help='Show version and exit.'),
optparse.make_option(
'-q', '--quiet',
dest='quiet',
action='count',
default=0,
help='Give less output.'),
optparse.make_option(
'--log',
dest='log',
metavar='file',
help='Log file where a complete (maximum verbosity) record will be kept.'),
optparse.make_option(
# Writes the log levels explicitely to the log'
'--log-explicit-levels',
dest='log_explicit_levels',
action='store_true',
default=False,
help=optparse.SUPPRESS_HELP),
optparse.make_option(
# The default log file
'--local-log', '--log-file',
dest='log_file',
metavar='file',
default=default_log_file,
help=optparse.SUPPRESS_HELP),
optparse.make_option(
# Don't ask for input
'--no-input',
dest='no_input',
action='store_true',
default=False,
help=optparse.SUPPRESS_HELP),
optparse.make_option(
'--proxy',
dest='proxy',
type='str',
default='',
help="Specify a proxy in the form [user:passwd@]proxy.server:port."),
optparse.make_option(
'--timeout', '--default-timeout',
metavar='sec',
dest='timeout',
type='float',
default=15,
help='Set the socket timeout (default %default seconds).'),
optparse.make_option(
# The default version control system for editables, e.g. 'svn'
'--default-vcs',
dest='default_vcs',
type='str',
default='',
help=optparse.SUPPRESS_HELP),
optparse.make_option(
# A regex to be used to skip requirements
'--skip-requirements-regex',
dest='skip_requirements_regex',
type='str',
default='',
help=optparse.SUPPRESS_HELP),
optparse.make_option(
# Option when path already exist
'--exists-action',
dest='exists_action',
type='choice',
choices=['s', 'i', 'w', 'b'],
default=[],
action='append',
metavar='action',
help="Default action when a path already exists: "
"(s)witch, (i)gnore, (w)ipe, (b)ackup."),
optparse.make_option(
'--cert',
dest='cert',
type='str',
default='',
metavar='path',
help = "Path to alternate CA bundle."),
]
if not ssl:
standard_options.append(optparse.make_option(
'--insecure',
dest='insecure',
action='store_true',
default=False,
help = "Allow lack of certificate checking when ssl is not installed."))
| mit |
akhilari7/pa-dude | lib/python2.7/site-packages/pyrfc3339/generator.py | 3 | 2170 | import pytz
from pyrfc3339.utils import timezone, timedelta_seconds
def generate(dt, utc=True, accept_naive=False, microseconds=False):
'''
Generate an :RFC:`3339`-formatted timestamp from a
:class:`datetime.datetime`.
>>> from datetime import datetime
>>> generate(datetime(2009,1,1,12,59,59,0,pytz.utc))
'2009-01-01T12:59:59Z'
The timestamp will use UTC unless `utc=False` is specified, in which case
it will use the timezone from the :class:`datetime.datetime`'s
:attr:`tzinfo` parameter.
>>> eastern = pytz.timezone('US/Eastern')
>>> dt = eastern.localize(datetime(2009,1,1,12,59,59))
>>> generate(dt)
'2009-01-01T17:59:59Z'
>>> generate(dt, utc=False)
'2009-01-01T12:59:59-05:00'
Unless `accept_naive=True` is specified, the `datetime` must not be naive.
>>> generate(datetime(2009,1,1,12,59,59,0))
Traceback (most recent call last):
...
ValueError: naive datetime and accept_naive is False
>>> generate(datetime(2009,1,1,12,59,59,0), accept_naive=True)
'2009-01-01T12:59:59Z'
If `accept_naive=True` is specified, the `datetime` is assumed to be UTC.
Attempting to generate a local timestamp from a naive datetime will result
in an error.
>>> generate(datetime(2009,1,1,12,59,59,0), accept_naive=True, utc=False)
Traceback (most recent call last):
...
ValueError: cannot generate a local timestamp from a naive datetime
'''
if dt.tzinfo is None:
if accept_naive is True:
if utc is True:
dt = dt.replace(tzinfo=pytz.utc)
else:
raise ValueError("cannot generate a local timestamp from " +
"a naive datetime")
else:
raise ValueError("naive datetime and accept_naive is False")
if utc is True:
dt = dt.astimezone(pytz.utc)
timestamp = dt.strftime('%Y-%m-%dT%H:%M:%S')
if microseconds is True:
timestamp += dt.strftime('.%f')
if dt.tzinfo is pytz.utc:
timestamp += 'Z'
else:
timestamp += timezone(timedelta_seconds(dt.tzinfo.utcoffset(dt)))
return timestamp
| mit |
sbalde/edx-platform | cms/djangoapps/contentstore/features/signup.py | 111 | 2821 | # pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from nose.tools import assert_true, assert_false # pylint: disable=no-name-in-module
@step('I fill in the registration form$')
def i_fill_in_the_registration_form(step):
def fill_in_reg_form():
register_form = world.css_find('form#register_form')
register_form.find_by_name('email').fill('[email protected]')
register_form.find_by_name('password').fill('test')
register_form.find_by_name('username').fill('robot-studio')
register_form.find_by_name('name').fill('Robot Studio')
register_form.find_by_name('terms_of_service').click()
world.retry_on_exception(fill_in_reg_form)
@step('I press the Create My Account button on the registration form$')
def i_press_the_button_on_the_registration_form(step):
submit_css = 'form#register_form button#submit'
world.css_click(submit_css)
@step('I should see an email verification prompt')
def i_should_see_an_email_verification_prompt(step):
world.css_has_text('h1.page-header', u'Studio Home')
world.css_has_text('div.msg h3.title', u'We need to verify your email address')
@step(u'I fill in and submit the signin form$')
def i_fill_in_the_signin_form(step):
def fill_login_form():
login_form = world.browser.find_by_css('form#login_form')
login_form.find_by_name('email').fill('[email protected]')
login_form.find_by_name('password').fill('test')
login_form.find_by_name('submit').click()
world.retry_on_exception(fill_login_form)
@step(u'I should( not)? see a login error message$')
def i_should_see_a_login_error(step, should_not_see):
if should_not_see:
# the login error may be absent or invisible. Check absence first,
# because css_visible will throw an exception if the element is not present
if world.is_css_present('div#login_error'):
assert_false(world.css_visible('div#login_error'))
else:
assert_true(world.css_visible('div#login_error'))
@step(u'I fill in and submit the signin form incorrectly$')
def i_goof_in_the_signin_form(step):
def fill_login_form():
login_form = world.browser.find_by_css('form#login_form')
login_form.find_by_name('email').fill('[email protected]')
login_form.find_by_name('password').fill('oops')
login_form.find_by_name('submit').click()
world.retry_on_exception(fill_login_form)
@step(u'I edit the password field$')
def i_edit_the_password_field(step):
password_css = 'form#login_form input#password'
world.css_fill(password_css, 'test')
@step(u'I submit the signin form$')
def i_submit_the_signin_form(step):
submit_css = 'form#login_form button#submit'
world.css_click(submit_css)
| agpl-3.0 |
dmsimard/ansible | test/lib/ansible_test/_internal/test.py | 7 | 15062 | """Classes for storing and processing test results."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import re
from . import types as t
from .util import (
display,
get_ansible_version,
)
from .util_common import (
write_text_test_results,
write_json_test_results,
ResultType,
)
from .config import (
TestConfig,
)
def calculate_best_confidence(choices, metadata):
"""
:type choices: tuple[tuple[str, int]]
:type metadata: Metadata
:rtype: int
"""
best_confidence = 0
for path, line in choices:
confidence = calculate_confidence(path, line, metadata)
best_confidence = max(confidence, best_confidence)
return best_confidence
def calculate_confidence(path, line, metadata):
"""
:type path: str
:type line: int
:type metadata: Metadata
:rtype: int
"""
ranges = metadata.changes.get(path)
# no changes were made to the file
if not ranges:
return 0
# changes were made to the same file and line
if any(r[0] <= line <= r[1] in r for r in ranges):
return 100
# changes were made to the same file and the line number is unknown
if line == 0:
return 75
# changes were made to the same file and the line number is different
return 50
class TestResult:
"""Base class for test results."""
def __init__(self, command, test, python_version=None):
"""
:type command: str
:type test: str
:type python_version: str
"""
self.command = command
self.test = test
self.python_version = python_version
self.name = self.test or self.command
if self.python_version:
self.name += '-python-%s' % self.python_version
try:
import junit_xml
except ImportError:
junit_xml = None
self.junit = junit_xml
def write(self, args):
"""
:type args: TestConfig
"""
self.write_console()
self.write_bot(args)
if args.lint:
self.write_lint()
if args.junit:
if self.junit:
self.write_junit(args)
else:
display.warning('Skipping junit xml output because the `junit-xml` python package was not found.', unique=True)
def write_console(self):
"""Write results to console."""
def write_lint(self):
"""Write lint results to stdout."""
def write_bot(self, args):
"""
:type args: TestConfig
"""
def write_junit(self, args):
"""
:type args: TestConfig
"""
def create_result_name(self, extension):
"""
:type extension: str
:rtype: str
"""
name = 'ansible-test-%s' % self.command
if self.test:
name += '-%s' % self.test
if self.python_version:
name += '-python-%s' % self.python_version
name += extension
return name
def save_junit(self, args, test_case, properties=None):
"""
:type args: TestConfig
:type test_case: junit_xml.TestCase
:type properties: dict[str, str] | None
:rtype: str | None
"""
test_suites = [
self.junit.TestSuite(
name='ansible-test',
test_cases=[test_case],
timestamp=datetime.datetime.utcnow().replace(microsecond=0).isoformat(),
properties=properties,
),
]
# the junit_xml API is changing in version 2.0.0
# TestSuite.to_xml_string is being replaced with to_xml_report_string
# see: https://github.com/kyrus/python-junit-xml/blob/63db26da353790500642fd02cae1543eb41aab8b/junit_xml/__init__.py#L249-L261
try:
to_xml_string = self.junit.to_xml_report_string
except AttributeError:
# noinspection PyDeprecation
to_xml_string = self.junit.TestSuite.to_xml_string
report = to_xml_string(test_suites=test_suites, prettyprint=True, encoding='utf-8')
if args.explain:
return
write_text_test_results(ResultType.JUNIT, self.create_result_name('.xml'), report)
class TestTimeout(TestResult):
"""Test timeout."""
def __init__(self, timeout_duration):
"""
:type timeout_duration: int
"""
super(TestTimeout, self).__init__(command='timeout', test='')
self.timeout_duration = timeout_duration
def write(self, args):
"""
:type args: TestConfig
"""
message = 'Tests were aborted after exceeding the %d minute time limit.' % self.timeout_duration
# Include a leading newline to improve readability on Shippable "Tests" tab.
# Without this, the first line becomes indented.
output = '''
One or more of the following situations may be responsible:
- Code changes have resulted in tests that hang or run for an excessive amount of time.
- Tests have been added which exceed the time limit when combined with existing tests.
- Test infrastructure and/or external dependencies are operating slower than normal.'''
if args.coverage:
output += '\n- Additional overhead from collecting code coverage has resulted in tests exceeding the time limit.'
output += '\n\nConsult the console log for additional details on where the timeout occurred.'
timestamp = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
# hack to avoid requiring junit-xml, which may not be pre-installed outside our test containers
xml = '''
<?xml version="1.0" encoding="utf-8"?>
<testsuites disabled="0" errors="1" failures="0" tests="1" time="0.0">
\t<testsuite disabled="0" errors="1" failures="0" file="None" log="None" name="ansible-test" skipped="0" tests="1" time="0" timestamp="%s" url="None">
\t\t<testcase classname="timeout" name="timeout">
\t\t\t<error message="%s" type="error">%s</error>
\t\t</testcase>
\t</testsuite>
</testsuites>
''' % (timestamp, message, output)
write_text_test_results(ResultType.JUNIT, self.create_result_name('.xml'), xml.lstrip())
class TestSuccess(TestResult):
"""Test success."""
def write_junit(self, args):
"""
:type args: TestConfig
"""
test_case = self.junit.TestCase(classname=self.command, name=self.name)
self.save_junit(args, test_case)
class TestSkipped(TestResult):
"""Test skipped."""
def write_console(self):
"""Write results to console."""
display.info('No tests applicable.', verbosity=1)
def write_junit(self, args):
"""
:type args: TestConfig
"""
test_case = self.junit.TestCase(classname=self.command, name=self.name)
test_case.add_skipped_info('No tests applicable.')
self.save_junit(args, test_case)
class TestFailure(TestResult):
"""Test failure."""
def __init__(self, command, test, python_version=None, messages=None, summary=None):
"""
:type command: str
:type test: str
:type python_version: str | None
:type messages: list[TestMessage] | None
:type summary: unicode | None
"""
super(TestFailure, self).__init__(command, test, python_version)
if messages:
messages = sorted(messages)
else:
messages = []
self.messages = messages
self.summary = summary
def write(self, args):
"""
:type args: TestConfig
"""
if args.metadata.changes:
self.populate_confidence(args.metadata)
super(TestFailure, self).write(args)
def write_console(self):
"""Write results to console."""
if self.summary:
display.error(self.summary)
else:
if self.python_version:
specifier = ' on python %s' % self.python_version
else:
specifier = ''
display.error('Found %d %s issue(s)%s which need to be resolved:' % (len(self.messages), self.test or self.command, specifier))
for message in self.messages:
display.error(message.format(show_confidence=True))
doc_url = self.find_docs()
if doc_url:
display.info('See documentation for help: %s' % doc_url)
def write_lint(self):
"""Write lint results to stdout."""
if self.summary:
command = self.format_command()
message = 'The test `%s` failed. See stderr output for details.' % command
path = ''
message = TestMessage(message, path)
print(message)
else:
for message in self.messages:
print(message)
def write_junit(self, args):
"""
:type args: TestConfig
"""
title = self.format_title()
output = self.format_block()
test_case = self.junit.TestCase(classname=self.command, name=self.name)
# Include a leading newline to improve readability on Shippable "Tests" tab.
# Without this, the first line becomes indented.
test_case.add_failure_info(message=title, output='\n%s' % output)
self.save_junit(args, test_case)
def write_bot(self, args):
"""
:type args: TestConfig
"""
docs = self.find_docs()
message = self.format_title(help_link=docs)
output = self.format_block()
if self.messages:
verified = all((m.confidence or 0) >= 50 for m in self.messages)
else:
verified = False
bot_data = dict(
verified=verified,
docs=docs,
results=[
dict(
message=message,
output=output,
),
],
)
if args.explain:
return
write_json_test_results(ResultType.BOT, self.create_result_name('.json'), bot_data)
def populate_confidence(self, metadata):
"""
:type metadata: Metadata
"""
for message in self.messages:
if message.confidence is None:
message.confidence = calculate_confidence(message.path, message.line, metadata)
def format_command(self):
"""
:rtype: str
"""
command = 'ansible-test %s' % self.command
if self.test:
command += ' --test %s' % self.test
if self.python_version:
command += ' --python %s' % self.python_version
return command
def find_docs(self):
"""
:rtype: str
"""
if self.command != 'sanity':
return None # only sanity tests have docs links
# Use the major.minor version for the URL only if this a release that
# matches the pattern 2.4.0, otherwise, use 'devel'
ansible_version = get_ansible_version()
url_version = 'devel'
if re.search(r'^[0-9.]+$', ansible_version):
url_version = '.'.join(ansible_version.split('.')[:2])
testing_docs_url = 'https://docs.ansible.com/ansible/%s/dev_guide/testing' % url_version
url = '%s/%s/' % (testing_docs_url, self.command)
if self.test:
url += '%s.html' % self.test
return url
def format_title(self, help_link=None):
"""
:type help_link: str | None
:rtype: str
"""
command = self.format_command()
if self.summary:
reason = 'the error'
else:
reason = '1 error' if len(self.messages) == 1 else '%d errors' % len(self.messages)
if help_link:
help_link_markup = ' [[explain](%s)]' % help_link
else:
help_link_markup = ''
title = 'The test `%s`%s failed with %s:' % (command, help_link_markup, reason)
return title
def format_block(self):
"""
:rtype: str
"""
if self.summary:
block = self.summary
else:
block = '\n'.join(m.format() for m in self.messages)
message = block.strip()
# Hack to remove ANSI color reset code from SubprocessError messages.
message = message.replace(display.clear, '')
return message
class TestMessage:
"""Single test message for one file."""
def __init__(self, message, path, line=0, column=0, level='error', code=None, confidence=None):
"""
:type message: str
:type path: str
:type line: int
:type column: int
:type level: str
:type code: str | None
:type confidence: int | None
"""
self.__path = path
self.__line = line
self.__column = column
self.__level = level
self.__code = code
self.__message = message
self.confidence = confidence
@property
def path(self): # type: () -> str
"""Return the path."""
return self.__path
@property
def line(self): # type: () -> int
"""Return the line number, or 0 if none is available."""
return self.__line
@property
def column(self): # type: () -> int
"""Return the column number, or 0 if none is available."""
return self.__column
@property
def level(self): # type: () -> str
"""Return the level."""
return self.__level
@property
def code(self): # type: () -> t.Optional[str]
"""Return the code, if any."""
return self.__code
@property
def message(self): # type: () -> str
"""Return the message."""
return self.__message
@property
def tuple(self): # type: () -> t.Tuple[str, int, int, str, t.Optional[str], str]
"""Return a tuple with all the immutable values of this test message."""
return self.__path, self.__line, self.__column, self.__level, self.__code, self.__message
def __lt__(self, other):
return self.tuple < other.tuple
def __le__(self, other):
return self.tuple <= other.tuple
def __eq__(self, other):
return self.tuple == other.tuple
def __ne__(self, other):
return self.tuple != other.tuple
def __gt__(self, other):
return self.tuple > other.tuple
def __ge__(self, other):
return self.tuple >= other.tuple
def __hash__(self):
return hash(self.tuple)
def __str__(self):
return self.format()
def format(self, show_confidence=False):
"""
:type show_confidence: bool
:rtype: str
"""
if self.__code:
msg = '%s: %s' % (self.__code, self.__message)
else:
msg = self.__message
if show_confidence and self.confidence is not None:
msg += ' (%d%%)' % self.confidence
return '%s:%s:%s: %s' % (self.__path, self.__line, self.__column, msg)
| gpl-3.0 |
canwe/NewsBlur | apps/rss_feeds/migrations/0044_favicon_color.py | 18 | 7246 | # encoding: utf-8
import sys
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from apps.rss_feeds.models import Feed
try:
from apps.rss_feeds.models import FeedIcon
except ImportError:
pass
class Migration(DataMigration):
def forwards(self, orm):
feeds = Feed.objects.all().order_by('-average_stories_per_month')
feed_count = feeds.count()
i = 0
for feed in feeds:
i += 1
if i % 1000 == 0:
print "%s/%s" % (i, feed_count,)
sys.stdout.flush()
if not feed.favicon_color:
feed_icon = MFeedIcon.objects(feed_id=feed.pk)
if feed_icon:
try:
feed.favicon_color = feed_icon[0].color
feed.favicon_not_found = feed_icon[0].not_found
feed.save()
except Exception, e:
print '\n\n!!! %s\n\n' % e
continue
def backwards(self, orm):
"Write your backwards methods here."
models = {
'rss_feeds.duplicatefeed': {
'Meta': {'object_name': 'DuplicateFeed'},
'duplicate_address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'duplicate_feed_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'duplicate_addresses'", 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rss_feeds.feed': {
'Meta': {'ordering': "['feed_title']", 'object_name': 'Feed', 'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'active_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'db_index': 'True'}),
'average_stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'exception_code': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'favicon_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'favicon_not_found': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'feed_address': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "'[Untitled]'", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fetched_once': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_feed_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'has_page_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'premium_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'queued_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'stories_last_month': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'rss_feeds.feeddata': {
'Meta': {'object_name': 'FeedData'},
'feed': ('utils.fields.AutoOneToOneField', [], {'related_name': "'data'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'feed_classifier_counts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feed_tagline': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'popular_authors': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'popular_tags': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'story_count_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedicon': {
'Meta': {'object_name': 'FeedIcon'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feed': ('utils.fields.AutoOneToOneField', [], {'related_name': "'icon'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['rss_feeds.Feed']"}),
'icon_url': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'not_found': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'rss_feeds.feedloadtime': {
'Meta': {'object_name': 'FeedLoadtime'},
'date_accessed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loadtime': ('django.db.models.fields.FloatField', [], {})
},
'rss_feeds.feedupdatehistory': {
'Meta': {'object_name': 'FeedUpdateHistory'},
'average_per_feed': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '1'}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number_of_feeds': ('django.db.models.fields.IntegerField', [], {}),
'seconds_taken': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['rss_feeds']
| mit |
MathieuDuponchelle/my_patched_photologue | photologue/tests/test_gallery.py | 3 | 2220 | from .. import models
from .helpers import PhotologueBaseTest
from .factories import GalleryFactory, PhotoFactory
class GalleryTest(PhotologueBaseTest):
def setUp(self):
"""Create a test gallery with 2 photos."""
super(GalleryTest, self).setUp()
self.test_gallery = GalleryFactory()
self.pl2 = PhotoFactory()
self.test_gallery.photos.add(self.pl)
self.test_gallery.photos.add(self.pl2)
def tearDown(self):
super(GalleryTest, self).tearDown()
self.pl2.delete()
def test_public(self):
"""Method 'public' should only return photos flagged as public."""
self.assertEqual(self.test_gallery.public().count(), 2)
self.pl.is_public = False
self.pl.save()
self.assertEqual(self.test_gallery.public().count(), 1)
def test_photo_count(self):
"""Method 'photo_count' should return the count of the photos in this
gallery."""
self.assertEqual(self.test_gallery.photo_count(), 2)
self.pl.is_public = False
self.pl.save()
self.assertEqual(self.test_gallery.photo_count(), 1)
# Method takes an optional 'public' kwarg.
self.assertEqual(self.test_gallery.photo_count(public=False), 2)
def test_sample(self):
"""Method 'sample' should return a random queryset of photos from the
gallery."""
# By default we return all photos from the gallery (but ordered at random).
_current_sample_size = models.SAMPLE_SIZE
models.SAMPLE_SIZE = 5
self.assertEqual(len(self.test_gallery.sample()), 2)
# We can state how many photos we want.
self.assertEqual(len(self.test_gallery.sample(count=1)), 1)
# If only one photo is public then the sample cannot have more than one
# photo.
self.pl.is_public = False
self.pl.save()
self.assertEqual(len(self.test_gallery.sample(count=2)), 1)
self.pl.is_public = True
self.pl.save()
# We can limit the number of photos by changing settings.
models.SAMPLE_SIZE = 1
self.assertEqual(len(self.test_gallery.sample()), 1)
models.SAMPLE_SIZE = _current_sample_size
| bsd-3-clause |
ansrivas/pylogging | pylogging/formatters.py | 1 | 1932 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Bunch of log formatters to be used."""
import logging
try:
import ujson as json
except Exception as ex:
import json
class TextFormatter(logging.Formatter):
"""Format the meta data in the log message to fix string length."""
datefmt = '%Y-%m-%d %H:%M:%S'
def __init__(self, context=None):
self.context = context
super(TextFormatter, self).__init__()
def format(self, record):
"""Default formatter."""
error_location = "%s.%s" % (record.name, record.funcName)
line_number = "%s" % (record.lineno)
location_line = error_location[:32] + ":" + line_number
s = "%.19s [%-8s] [%-36s] %s" % (self.formatTime(record, TextFormatter.datefmt),
record.levelname, location_line, record.getMessage())
if self.context:
s = "%.19s [%s] [%-8s] [%-36s] %s" % (self.formatTime(record, TextFormatter.datefmt), self.context,
record.levelname, location_line, record.getMessage())
return s
class JsonFormatter(logging.Formatter):
"""Format the meta data in the json log message and fix string length."""
datefmt = '%Y-%m-%d %H:%M:%S'
def format(self, record):
"""Default json formatter."""
error_location = "%s.%s" % (record.name, record.funcName)
line_number = "%s" % (record.lineno)
location_line = error_location[:32] + ":" + line_number
output = {'log_time': self.formatTime(record, TextFormatter.datefmt),
'log_location': location_line,
'log_level': record.levelname,
'message': record.getMessage()}
return json.dumps(output)
class Formatters(object):
"""Define a common class for Formatters."""
TextFormatter = TextFormatter()
JsonFormatter = JsonFormatter()
| mit |
xchenum/quantum | quantum/plugins/cisco/client/cli.py | 6 | 6937 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Initial structure and framework of this CLI has been borrowed from Quantum,
# written by the following authors
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Salvatore Orlando, Citrix
#
# Cisco adaptation for extensions
# @author: Sumit Naiksatam, Cisco Systems, Inc.
# @author: Ying Liu, Cisco Systems, Inc.
import logging
import logging.handlers
from optparse import OptionParser
import os
import sys
import quantumclient.cli as qcli
from quantumclient import Client
LOG = logging.getLogger('quantum')
FORMAT = 'json'
#ACTION_PREFIX_EXT = '/v1.0'
#ACTION_PREFIX_CSCO = ACTION_PREFIX_EXT + \
# '/extensions/csco/tenants/{tenant_id}'
VERSION = '1.0'
URI_PREFIX_EXT = ''
URI_PREFIX_CSCO = '/extensions/csco/tenants/{tenant_id}'
TENANT_ID = 'nova'
CSCO_EXT_NAME = 'Cisco Nova Tenant'
DEFAULT_QUANTUM_VERSION = '1.1'
def help():
"""Help for CLI"""
print "\nCisco Extension Commands:"
for key in COMMANDS.keys():
print " %s %s" % (
key, " ".join(["<%s>" % y for y in COMMANDS[key]["args"]]))
def build_args(cmd, cmdargs, arglist):
"""Building the list of args for a particular CLI"""
args = []
orig_arglist = arglist[:]
try:
for cmdarg in cmdargs:
args.append(arglist[0])
del arglist[0]
except:
LOG.error("Not enough arguments for \"%s\" (expected: %d, got: %d)" % (
cmd, len(cmdargs), len(orig_arglist)))
print "Usage:\n %s %s" % (
cmd, " ".join(["<%s>" % y for y in COMMANDS[cmd]["args"]]))
sys.exit()
if len(arglist) > 0:
LOG.error("Too many arguments for \"%s\" (expected: %d, got: %d)" % (
cmd, len(cmdargs), len(orig_arglist)))
print "Usage:\n %s %s" % (
cmd, " ".join(["<%s>" % y for y in COMMANDS[cmd]["args"]]))
sys.exit()
return args
def list_extensions(*args):
"""Invoking the action to get the supported extensions"""
request_url = "/extensions"
client = Client(HOST, PORT, USE_SSL, format='json',
version=VERSION, uri_prefix=URI_PREFIX_EXT, tenant="dummy")
data = client.do_request('GET', request_url)
print("Obtained supported extensions from Quantum: %s" % data)
def schedule_host(tenant_id, instance_id, user_id=None):
"""Gets the host name from the Quantum service"""
project_id = tenant_id
instance_data_dict = {
'novatenant': {
'instance_id': instance_id,
'instance_desc': {
'user_id': user_id,
'project_id': project_id,
},
},
}
request_url = "/novatenants/" + project_id + "/schedule_host"
client = Client(HOST, PORT, USE_SSL, format='json', tenant=TENANT_ID,
version=VERSION, uri_prefix=URI_PREFIX_CSCO)
data = client.do_request('PUT', request_url, body=instance_data_dict)
hostname = data["host_list"]["host_1"]
if not hostname:
print("Scheduler was unable to locate a host"
" for this request. Is the appropriate"
" service running?")
print("Quantum service returned host: %s" % hostname)
def create_multiport(tenant_id, net_id_list, *args):
"""Creates ports on a single host"""
net_list = net_id_list.split(",")
ports_info = {'multiport':
{'status': 'ACTIVE',
'net_id_list': net_list,
'ports_desc': {'key': 'value'}}}
request_url = "/multiport"
client = Client(HOST, PORT, USE_SSL, format='json', tenant=tenant_id,
version=VERSION, uri_prefix=URI_PREFIX_CSCO)
data = client.do_request('POST', request_url, body=ports_info)
print("Created ports: %s" % data)
COMMANDS = {
"create_multiport": {
"func": create_multiport,
"args": ["tenant-id",
"net-id-list (comma separated list of netword IDs)"],
},
"list_extensions": {
"func": list_extensions,
"args": [],
},
"schedule_host": {
"func": schedule_host,
"args": ["tenant-id", "instance-id"],
},
}
def main():
import cli
usagestr = "Usage: %prog [OPTIONS] <command> [args]"
PARSER = OptionParser(usage=usagestr)
PARSER.add_option("-H", "--host", dest="host",
type="string", default="127.0.0.1",
help="ip address of api host")
PARSER.add_option("-p", "--port", dest="port",
type="int", default=9696, help="api poort")
PARSER.add_option("-s", "--ssl", dest="ssl",
action="store_true", default=False, help="use ssl")
PARSER.add_option("-v", "--verbose", dest="verbose",
action="store_true", default=False,
help="turn on verbose logging")
PARSER.add_option("-f", "--logfile", dest="logfile",
type="string", default="syslog", help="log file path")
PARSER.add_option(
'--version', default=DEFAULT_QUANTUM_VERSION,
help='Accepts 1.1 and 1.0, defaults to env[QUANTUM_VERSION].')
options, args = PARSER.parse_args()
if options.verbose:
LOG.setLevel(logging.DEBUG)
else:
LOG.setLevel(logging.WARN)
if options.logfile == "syslog":
LOG.addHandler(logging.handlers.SysLogHandler(address='/dev/log'))
else:
LOG.addHandler(logging.handlers.WatchedFileHandler(options.logfile))
os.chmod(options.logfile, 0644)
version = options.version
if len(args) < 1:
PARSER.print_help()
qcli.help(version)
help()
sys.exit(1)
CMD = args[0]
if CMD in qcli.commands['1.1'].keys():
qcli.main()
sys.exit(1)
if CMD not in COMMANDS.keys():
LOG.error("Unknown command: %s" % CMD)
qcli.help(version)
help()
sys.exit(1)
args = build_args(CMD, COMMANDS[CMD]["args"], args[1:])
LOG.info("Executing command \"%s\" with args: %s" % (CMD, args))
HOST = options.host
PORT = options.port
USE_SSL = options.ssl
COMMANDS[CMD]["func"](*args)
LOG.info("Command execution completed")
sys.exit(0)
if __name__ == "__main__":
main()
| apache-2.0 |
anomitra/articleScraper | PyQt-gpl-5.4.1/examples/qml/referenceexamples/methods.py | 2 | 4326 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
import sys
from PyQt5.QtCore import (pyqtProperty, pyqtSlot, QCoreApplication, QObject,
QUrl)
from PyQt5.QtQml import (qmlRegisterType, QQmlComponent, QQmlEngine,
QQmlListProperty)
QML = b'''
import QtQuick 2.0
import People 1.0
BirthdayParty {
host: Person {
name: "Bob Jones"
shoeSize: 12
}
guests: [
Person { name: "Leo Hodges" },
Person { name: "Jack Smith" },
Person { name: "Anne Brown" }
]
Component.onCompleted: invite("William Green")
}
'''
class Person(QObject):
def __init__(self, parent=None):
super(Person, self).__init__(parent)
self._name = ''
self._shoeSize = 0
@pyqtProperty(str)
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@pyqtProperty(int)
def shoeSize(self):
return self._shoeSize
@shoeSize.setter
def shoeSize(self, shoeSize):
self._shoeSize = shoeSize
class BirthdayParty(QObject):
def __init__(self, parent=None):
super(BirthdayParty, self).__init__(parent)
self._host = None
self._guests = []
@pyqtProperty(Person)
def host(self):
return self._host
@host.setter
def host(self, host):
self._host = host
@pyqtProperty(QQmlListProperty)
def guests(self):
return QQmlListProperty(Person, self, self._guests)
def guestCount(self):
return len(self._guests)
def guest(self, idx):
return self._guests[idx]
@pyqtSlot(str)
def invite(self, name):
person = Person(self)
person.name = name
self._guests.append(person)
app = QCoreApplication(sys.argv)
qmlRegisterType(BirthdayParty, "People", 1, 0, "BirthdayParty")
qmlRegisterType(Person, "People", 1, 0, "Person")
engine = QQmlEngine()
component = QQmlComponent(engine)
component.setData(QML, QUrl())
party = component.create()
if party is not None and party.host is not None:
print("\"%s\" is having a birthday!" % party.host.name)
print("They are inviting:")
for ii in range(party.guestCount()):
print(" \"%s\"" % party.guest(ii).name)
else:
for e in component.errors():
print("Error:", e.toString());
| gpl-2.0 |
liucode/tempest-master | tempest/api/compute/volumes/test_volume_snapshots.py | 5 | 3040 | # Copyright 2015 Fujitsu(fnst) Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import config
from tempest import test
CONF = config.CONF
class VolumesSnapshotsTestJSON(base.BaseV2ComputeTest):
@classmethod
def skip_checks(cls):
super(VolumesSnapshotsTestJSON, cls).skip_checks()
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def setup_clients(cls):
super(VolumesSnapshotsTestJSON, cls).setup_clients()
cls.volumes_client = cls.volumes_extensions_client
cls.snapshots_client = cls.snapshots_extensions_client
@test.idempotent_id('cd4ec87d-7825-450d-8040-6e2068f2da8f')
def test_volume_snapshot_create_get_list_delete(self):
v_name = data_utils.rand_name('Volume')
volume = self.volumes_client.create_volume(
size=CONF.volume.volume_size,
display_name=v_name)['volume']
self.addCleanup(self.delete_volume, volume['id'])
waiters.wait_for_volume_status(self.volumes_client, volume['id'],
'available')
s_name = data_utils.rand_name('Snapshot')
# Create snapshot
snapshot = self.snapshots_client.create_snapshot(
volume['id'],
display_name=s_name)['snapshot']
def delete_snapshot(snapshot_id):
waiters.wait_for_snapshot_status(self.snapshots_client,
snapshot_id,
'available')
# Delete snapshot
self.snapshots_client.delete_snapshot(snapshot_id)
self.snapshots_client.wait_for_resource_deletion(snapshot_id)
self.addCleanup(delete_snapshot, snapshot['id'])
self.assertEqual(volume['id'], snapshot['volumeId'])
# Get snapshot
fetched_snapshot = self.snapshots_client.show_snapshot(
snapshot['id'])['snapshot']
self.assertEqual(s_name, fetched_snapshot['displayName'])
self.assertEqual(volume['id'], fetched_snapshot['volumeId'])
# Fetch all snapshots
snapshots = self.snapshots_client.list_snapshots()['snapshots']
self.assertIn(snapshot['id'], map(lambda x: x['id'], snapshots))
| apache-2.0 |
PHSCRC/phsled | nfc/clf/rcs380.py | 4 | 38231 | # -*- coding: latin-1 -*-
# -----------------------------------------------------------------------------
# Copyright 2012-2015 Stephen Tiedemann <[email protected]>
#
# Licensed under the EUPL, Version 1.1 or - as soon they
# will be approved by the European Commission - subsequent
# versions of the EUPL (the "Licence");
# You may not use this work except in compliance with the
# Licence.
# You may obtain a copy of the Licence at:
#
# http://www.osor.eu/eupl
#
# Unless required by applicable law or agreed to in
# writing, software distributed under the Licence is
# distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied.
# See the Licence for the specific language governing
# permissions and limitations under the Licence.
# -----------------------------------------------------------------------------
"""Driver module for contactless devices based on the Sony NFC Port-100
chipset. The only product known to use this chipset is the PaSoRi
RC-S380. The RC-S380 connects to the host as a native USB device.
The RC-S380 has been the first NFC Forum certified device. It supports
reading and writing of all NFC Forum tags as well as peer-to-peer
mode. In addition, the NFC Port-100 also supports card emulation Type
A and Type F Technology. A notable restriction is that peer-to-peer
active communication mode (not required for NFC Forum certification)
is not supported.
========== ======= ============
function support remarks
========== ======= ============
sense_tta yes
sense_ttb yes
sense_ttf yes
sense_dep no
listen_tta yes Type F responses can not be disabled
listen_ttb no
listen_ttf yes
listen_dep yes Only passive communication mode
========== ======= ============
"""
import logging
log = logging.getLogger(__name__)
import os
import time
import errno
import struct
import operator
from binascii import hexlify
import nfc.clf
from . import device
class Frame():
def __init__(self, data):
self._data = None
self._frame = None
if data[0:3] == bytearray("\x00\x00\xff"):
frame = bytearray(data)
if frame == bytearray("\x00\x00\xff\x00\xff\x00"):
self._type = "ack"
elif frame == bytearray("\x00\x00\xFF\xFF\xFF"):
self._type = "err"
elif frame[3:5] == bytearray("\xff\xff"):
self._type = "data"
if self.type == "data":
length = struct.unpack("<H", str(frame[5:7]))[0]
self._data = frame[8:8+length]
else:
frame = bytearray([0, 0, 255, 255, 255])
frame += bytearray(struct.pack("<H", len(data)))
frame += bytearray(struct.pack("B", (256 - sum(frame[5:7])) % 256))
frame += bytearray(data)
frame += bytearray([(256 - sum(frame[8:])) % 256, 0])
self._frame = frame
def __str__(self):
return str(self._frame)
@property
def type(self):
return self._type
@property
def data(self):
return self._data
class CommunicationError:
err2str = {0x00000000: "NO_ERROR",
0x00000001: "PROTOCOL_ERROR",
0x00000002: "PARITY_ERROR",
0x00000004: "CRC_ERROR",
0x00000008: "COLLISION_ERROR",
0x00000010: "OVERFLOW_ERROR",
0x00000040: "TEMPERATURE_ERROR",
0x00000080: "RECEIVE_TIMEOUT_ERROR",
0x00000100: "CRYPTO1_ERROR",
0x00000200: "RFCA_ERROR",
0x00000400: "RF_OFF_ERROR",
0x00000800: "TRANSMIT_TIMEOUT_ERROR",
0x80000000: "RECEIVE_LENGTH_ERROR"
}
str2err = dict([(v, k) for k, v in err2str.iteritems()])
def __init__(self, status_bytes):
self.errno = struct.unpack('<L', str(status_bytes))[0]
def __eq__(self, strerr):
return self.errno & CommunicationError.str2err[strerr]
def __ne__(self, strerr):
return not self.__eq__(strerr)
def __str__(self):
return self.__class__.__name__ + ' ' + CommunicationError.err2str.get(
self.errno, "{0:08x}".format(self.errno))
class StatusError:
err2str = ("SUCCESS", "PARAMETER_ERROR", "PB_ERROR", "RFCA_ERROR",
"TEMPERATURE_ERROR", "PWD_ERROR", "RECEIVE_ERROR",
"COMMANDTYPE_ERROR")
def __init__(self, status):
self.errno = status
def __str__(self):
try:
return StatusError.err2str[self.errno]
except IndexError:
return "UNKNOWN STATUS ERROR {0:02x}".format(self.errno)
class Chipset(object):
ACK = bytearray.fromhex('0000FF00FF00')
CMD = {
# RF Communication
0x00: "InSetRF",
0x02: "InSetProtocol",
0x04: "InCommRF",
0x06: "SwitchRF",
0x10: "MaintainFlash",
0x12: "ResetDevice",
0x20: "GetFirmwareVersion",
0x22: "GetPDDataVersion",
0x24: "GetProperty",
0x26: "InGetProtocol",
0x28: "GetCommandType",
0x2A: "SetCommandType",
0x30: "InSetRCT",
0x32: "InGetRCT",
0x34: "GetPDData",
0x36: "ReadRegister",
0x40: "TgSetRF",
0x42: "TgSetProtocol",
0x44: "TgSetAuto",
0x46: "TgSetRFOff",
0x48: "TgCommRF",
0x50: "TgGetProtocol",
0x60: "TgSetRCT",
0x62: "TgGetRCT",
0xF0: "Diagnose",
}
def __init__(self, transport, logger):
self.transport = transport
self.log = logger
# write ack to perform a soft reset
# raises IOError(EACCES) if we're second
self.transport.write(Chipset.ACK)
# do some basic initialization and deactivate rf
self.set_command_type(1)
self.get_firmware_version()
self.get_pd_data_version()
self.switch_rf("off")
def close(self):
self.switch_rf('off')
self.transport.write(Chipset.ACK)
self.transport.close()
self.transport = None
def send_command(self, cmd_code, cmd_data, timeout):
cmd_data = bytearray(cmd_data)
log.log(logging.DEBUG-1, self.CMD[cmd_code]+" "+hexlify(cmd_data))
if self.transport is not None:
cmd = bytearray([0xD6, cmd_code]) + cmd_data
self.transport.write(str(Frame(cmd)))
if Frame(self.transport.read(timeout=100)).type == "ack":
rsp = Frame(self.transport.read(timeout)).data
if rsp and rsp[0] == 0xD7 and rsp[1] == cmd_code + 1:
return rsp[2:]
else:
log.debug("transport closed in send_command")
def in_set_rf(self, brty_send, brty_recv=None):
settings = {
"212F": (1, 1, 15, 1), "424F": (1, 2, 15, 2),
"106A": (2, 3, 15, 3), "212A": (4, 4, 15, 4),
"424A": (5, 5, 15, 5), "106B": (3, 7, 15, 7),
"212B": (3, 8, 15, 8), "424B": (3, 9, 15, 9),
}
if brty_recv is None: brty_recv = brty_send
data = settings[brty_send][0:2] + settings[brty_recv][2:4]
data = self.send_command(0x00, data, 100)
if data and data[0] != 0:
raise StatusError(data[0])
in_set_protocol_defaults = bytearray.fromhex(
"0018 0101 0201 0300 0400 0500 0600 0708 0800 0900"
"0A00 0B00 0C00 0E04 0F00 1000 1100 1200 1306")
def in_set_protocol(self, data=None, **kwargs):
data = bytearray() if data is None else bytearray(data)
KEYS = ("initial_guard_time", "add_crc", "check_crc", "multi_card",
"add_parity", "check_parity", "bitwise_anticoll",
"last_byte_bit_count", "mifare_crypto", "add_sof",
"check_sof", "add_eof", "check_eof", "rfu", "deaf_time",
"continuous_receive_mode", "min_len_for_crm",
"type_1_tag_rrdd", "rfca", "guard_time")
for key, value in kwargs.iteritems():
data.extend(bytearray([KEYS.index(key), int(value)]))
data = self.send_command(0x02, data, 100)
if data and data[0] != 0:
raise StatusError(data[0])
def in_comm_rf(self, data, timeout):
to = struct.pack("<H", timeout*10) if timeout <= 6553 else '\xFF\xFF'
data = self.send_command(0x04, to + str(data), timeout+500)
if data and tuple(data[0:4]) != (0, 0, 0, 0):
raise CommunicationError(data[0:4])
return data[5:] if data else None
def switch_rf(self, switch):
switch = ("off", "on").index(switch)
data = self.send_command(0x06, [switch], 100)
if data and data[0] != 0:
raise StatusError(data[0])
def tg_set_rf(self, comm_type):
tg_comm_type = {"106A": (8, 11), "212F": (8, 12), "424F": (8, 13),
"212A": (8, 14), "424A": (8, 15)}
comm_type = tg_comm_type[comm_type]
data = self.send_command(0x40, comm_type, 100)
if data and data[0] != 0:
raise StatusError(data[0])
tg_set_protocol_defaults = bytearray.fromhex("0001 0101 0207")
def tg_set_protocol(self, data=None, **kwargs):
data = bytearray() if data is None else bytearray(data)
KEYS = ("send_timeout_time_unit", "rf_off_error",
"continuous_receive_mode")
for key, value in kwargs.iteritems():
data.extend(bytearray([KEYS.index(key), int(value)]))
data = self.send_command(0x42, bytearray(data), 100)
if data and data[0] != 0:
raise StatusError(data[0])
def tg_set_auto(self, data):
data = self.send_command(0x44, data, 100)
if data and data[0] != 0:
raise StatusError(data[0])
def tg_comm_rf(self, guard_time=0, send_timeout=0xFFFF,
mdaa=False, nfca_params='', nfcf_params='',
mf_halted=False, arae=False, recv_timeout=0,
transmit_data=None):
# Send a response packet and receive the next request. If
# *transmit_data* is None skip sending. If *recv_timeout* is
# zero skip receiving. Data is sent only between *guard_time*
# and *send_timeout*, measured from the end of the last
# received data. If *mdaa* is True, reply to Type A and Type F
# activation commands with *nfca_params* (sens_res, nfcid1-3,
# sel_res) and *nfcf_params* (idm, pmm, system_code).
data = struct.pack("<HH?6s18s??H", guard_time, send_timeout,
mdaa, str(nfca_params), str(nfcf_params),
mf_halted, arae, recv_timeout)
if transmit_data:
data = data + str(transmit_data)
data = self.send_command(0x48, data, timeout=None)
if data and tuple(data[3:7]) != (0, 0, 0, 0):
raise CommunicationError(data[3:7])
return data
def reset_device(self, startup_delay=0):
self.send_command(0x12, struct.pack("<H", startup_delay), 100)
self.transport.write(Chipset.ACK)
time.sleep(float(startup_delay + 500)/1000)
def get_firmware_version(self, option=None):
assert option in (None, 0x60, 0x61, 0x80)
data = self.send_command(0x20, [option] if option else [], 100)
log.debug("firmware version {1:x}.{0:02x}".format(*data))
return data
def get_pd_data_version(self):
data = self.send_command(0x22, [], 100)
log.debug("package data format {1:x}.{0:02x}".format(*data))
def get_command_type(self):
data = self.send_command(0x28, [], 100)
return struct.unpack(">Q", str(data[0:8]))
def set_command_type(self, command_type):
data = self.send_command(0x2A, [command_type], 100)
if data and data[0] != 0:
raise StatusError(data[0])
class Device(device.Device):
# Device driver for the Sony NFC Port-100 chipset.
def __init__(self, chipset, logger):
self.chipset = chipset
self.log = logger
minor, major = self.chipset.get_firmware_version()
self._chipset_name = "NFC Port-100 v{0:x}.{1:02x}".format(major, minor)
def close(self):
self.chipset.close()
self.chipset = None
def mute(self):
self.chipset.switch_rf("off")
def sense_tta(self, target):
"""Sense for a Type A Target is supported for 106, 212 and 424
kbps. However, there may not be any target that understands the
activation commands in other than 106 kbps.
"""
log.debug("polling for NFC-A technology")
if target.brty not in ("106A", "212A", "424A"):
message = "unsupported bitrate {0}".format(target.brty)
raise nfc.clf.UnsupportedTargetError(message)
self.chipset.in_set_rf(target.brty)
self.chipset.in_set_protocol(self.chipset.in_set_protocol_defaults)
self.chipset.in_set_protocol(initial_guard_time=6, add_crc=0,
check_crc=0, check_parity=1,
last_byte_bit_count=7)
sens_req = (target.sens_req if target.sens_req else
bytearray.fromhex("26"))
try:
sens_res = self.chipset.in_comm_rf(sens_req, 30)
if len(sens_res) != 2: return None
except CommunicationError as error:
if error != "RECEIVE_TIMEOUT_ERROR": log.debug(error)
return None
log.debug("rcvd SENS_RES " + hexlify(sens_res))
if sens_res[0] & 0x1F == 0:
log.debug("type 1 tag target found")
self.chipset.in_set_protocol(last_byte_bit_count=8, add_crc=2,
check_crc=2, type_1_tag_rrdd=2)
target = nfc.clf.RemoteTarget(target.brty, sens_res=sens_res)
if sens_res[1] & 0x0F == 0b1100:
rid_cmd = bytearray.fromhex("78 0000 00000000")
log.debug("send RID_CMD " + hexlify(rid_cmd))
try:
target.rid_res = self.chipset.in_comm_rf(rid_cmd, 30)
except CommunicationError as error:
log.debug(error)
return None
return target
# other than type 1 tag
try:
self.chipset.in_set_protocol(last_byte_bit_count=8, add_parity=1)
if target.sel_req:
uid = target.sel_req
if len(uid) > 4: uid = "\x88" + uid
if len(uid) > 8: uid = uid[0:4] + "\x88" + uid[4:]
self.chipset.in_set_protocol(add_crc=1, check_crc=1)
for i, sel_cmd in zip(range(0,len(uid),4),"\x93\x95\x97"):
sel_req = sel_cmd + "\x70" + uid[i:i+4]
sel_req.append(reduce(operator.xor, sel_req[2:6])) # BCC
log.debug("send SEL_REQ " + hexlify(sel_req))
sel_res = self.chipset.in_comm_rf(sel_req, 30)
log.debug("rcvd SEL_RES " + hexlify(sel_res))
uid = target.sel_req
else:
uid = bytearray()
for sel_cmd in "\x93\x95\x97":
self.chipset.in_set_protocol(add_crc=0, check_crc=0)
sdd_req = sel_cmd + "\x20"
log.debug("send SDD_REQ " + hexlify(sdd_req))
sdd_res = self.chipset.in_comm_rf(sdd_req, 30)
log.debug("rcvd SDD_RES " + hexlify(sdd_res))
self.chipset.in_set_protocol(add_crc=1, check_crc=1)
sel_req = sel_cmd + "\x70" + sdd_res
log.debug("send SEL_REQ " + hexlify(sel_req))
sel_res = self.chipset.in_comm_rf(sel_req, 30)
log.debug("rcvd SEL_RES " + hexlify(sel_res))
if sel_res[0] & 0b00000100: uid = uid + sdd_res[1:4]
else: uid = uid + sdd_res[0:4]; break
if sel_res[0] & 0b00000100 == 0:
return nfc.clf.RemoteTarget(target.brty, sens_res=sens_res,
sel_res=sel_res, sdd_res=uid)
except CommunicationError as error:
log.debug(error)
def sense_ttb(self, target):
"""Sense for a Type B Target is supported for 106, 212 and 424
kbps. However, there may not be any target that understands the
activation command in other than 106 kbps.
"""
log.debug("polling for NFC-B technology")
if target.brty not in ("106B", "212B", "424B"):
message = "unsupported bitrate {0}".format(target.brty)
raise nfc.clf.UnsupportedTargetError(message)
self.chipset.in_set_rf(target.brty)
self.chipset.in_set_protocol(self.chipset.in_set_protocol_defaults)
self.chipset.in_set_protocol(initial_guard_time=20, add_sof=1,
check_sof=1, add_eof=1, check_eof=1)
sensb_req = (target.sensb_req if target.sensb_req else
bytearray.fromhex("050010"))
log.debug("send SENSB_REQ " + hexlify(sensb_req))
try:
sensb_res = self.chipset.in_comm_rf(sensb_req, 30)
except CommunicationError as error:
if error != "RECEIVE_TIMEOUT_ERROR": log.debug(error)
return None
if len(sensb_res) >= 12 and sensb_res[0] == 0x50:
log.debug("rcvd SENSB_RES " + hexlify(sensb_res))
return nfc.clf.RemoteTarget(target.brty, sensb_res=sensb_res)
def sense_ttf(self, target):
"""Sense for a Type F Target is supported for 212 and 424 kbps.
"""
log.debug("polling for NFC-F technology")
if target.brty not in ("212F", "424F"):
message = "unsupported bitrate {0}".format(target.brty)
raise nfc.clf.UnsupportedTargetError(message)
self.chipset.in_set_rf(target.brty)
self.chipset.in_set_protocol(self.chipset.in_set_protocol_defaults)
self.chipset.in_set_protocol(initial_guard_time=24)
sensf_req = (target.sensf_req if target.sensf_req else
bytearray.fromhex("00FFFF0100"))
log.debug("send SENSF_REQ " + hexlify(sensf_req))
try:
frame = chr(len(sensf_req)+1) + sensf_req
frame = self.chipset.in_comm_rf(frame, 10)
except CommunicationError as error:
if error != "RECEIVE_TIMEOUT_ERROR": log.debug(error)
return None
if len(frame) >= 18 and frame[0] == len(frame) and frame[1] == 1:
log.debug("rcvd SENSF_RES " + hexlify(frame[1:]))
return nfc.clf.RemoteTarget(target.brty, sensf_res=frame[1:])
def sense_dep(self, target):
"""Sense for an active DEP Target is not supported. The device only
supports passive activation via sense_tta/sense_ttf.
"""
message = "{device} does not support sense for active DEP Target"
raise nfc.clf.UnsupportedTargetError(message.format(device=self))
def listen_tta(self, target, timeout):
"""Listen as Type A Target in 106 kbps.
Restrictions:
* It is not possible to send short frames that are required
for ACK and NAK responses. This means that a Type 2 Tag
emulation can only implement a single sector memory model.
* It can not be avoided that the chipset responds to SENSF_REQ
commands. The driver configures the SENSF_RES response to
all zero and ignores all Type F communication but eventually
it depends on the remote device whether Type A Target
activation will still be attempted.
"""
if not target.brty == '106A':
info = "unsupported target bitrate: %r" % target.brty
raise nfc.clf.UnsupportedTargetError(info)
if target.rid_res:
info = "listening for type 1 tag activation is not supported"
raise nfc.clf.UnsupportedTargetError(info)
try:
assert target.sens_res is not None, "sens_res is required"
assert target.sdd_res is not None, "sdd_res is required"
assert target.sel_res is not None, "sel_res is required"
assert len(target.sens_res) == 2, "sens_res must be 2 byte"
assert len(target.sdd_res) == 4, "sdd_res must be 4 byte"
assert len(target.sel_res) == 1, "sel_res must be 1 byte"
assert target.sdd_res[0] == 0x08, "sdd_res[0] must be 08h"
except AssertionError as error:
raise ValueError(str(error))
nfca_params = target.sens_res + target.sdd_res[1:4] + target.sel_res
log.debug("nfca_params %s", hexlify(nfca_params))
self.chipset.tg_set_rf("106A")
self.chipset.tg_set_protocol(self.chipset.tg_set_protocol_defaults)
self.chipset.tg_set_protocol(rf_off_error=False)
time_to_return = time.time() + timeout
tg_comm_rf_args = {'mdaa': True, 'nfca_params': nfca_params}
tg_comm_rf_args['recv_timeout'] = min(int(1000 * timeout), 0xFFFF)
def listen_tta_tt2():
recv_timeout = tg_comm_rf_args['recv_timeout']
while recv_timeout > 0:
log.debug("wait %d ms for Type 2 Tag activation", recv_timeout)
try:
data = self.chipset.tg_comm_rf(**tg_comm_rf_args)
except CommunicationError as error:
log.debug(error)
else:
brty = ('106A', '212F', '424F')[data[0]-11]
log.debug("%s rcvd %s", brty, hexlify(buffer(data, 7)))
if brty == "106A" and data[2] & 0x03 == 3:
self.chipset.tg_set_protocol(rf_off_error=True)
return nfc.clf.LocalTarget(
"106A", sens_res=nfca_params[0:2],
sdd_res='\x08'+nfca_params[2:5],
sel_res=nfca_params[5:6], tt2_cmd=data[7:])
else:
log.debug("not a 106A Type 2 Tag command")
finally:
recv_timeout = int(1000 * (time_to_return - time.time()))
tg_comm_rf_args['recv_timeout'] = recv_timeout
def listen_tta_tt4():
rats_cmd = rats_res = None
recv_timeout = tg_comm_rf_args['recv_timeout']
while recv_timeout > 0:
log.debug("wait %d ms for 106A TT4 command", recv_timeout)
try:
data = self.chipset.tg_comm_rf(**tg_comm_rf_args)
tg_comm_rf_args['transmit_data'] = None
except CommunicationError as error:
tg_comm_rf_args['transmit_data'] = None
rats_cmd, rats_res = None
log.debug(error)
else:
brty = ('106A', '212F', '424F')[data[0]-11]
log.debug("%s rcvd %s", brty, hexlify(buffer(data, 7)))
if brty=="106A" and data[2]==3 and data[7]==0xE0:
(rats_cmd, rats_res) = (data[7:], target.rats_res)
log.debug("rcvd RATS_CMD %s", hexlify(rats_cmd))
if rats_res is None:
rats_res = bytearray.fromhex("05 78 80 70 02")
log.debug("send RATS_RES %s", hexlify(rats_res))
tg_comm_rf_args['transmit_data'] = rats_res
elif brty=="106A" and data[7]!=0xF0 and rats_cmd:
(did, cmd) = (rats_cmd[1] & 0x0F, data[7:])
ta_tb_tc = rats_res[2:]
ta = ta_tb_tc.pop(0) if rats_res[1]&0x10 else None
tb = ta_tb_tc.pop(0) if rats_res[1]&0x20 else None
tc = ta_tb_tc.pop(0) if rats_res[1]&0x40 else None
did_supported = tc is None or bool(tc & 0x02)
cmd_with_did = bool(cmd[0] & 0x08)
if ((cmd_with_did and did_supported and cmd[1]==did)
or (did==0 and not cmd_with_did)):
if cmd[0] in (0xC2, 0xCA):
log.debug("rcvd S(DESELECT) %s", hexlify(cmd))
tg_comm_rf_args['transmit_data'] = cmd
log.debug("send S(DESELECT) %s", hexlify(cmd))
rats_cmd = rats_res = None
else:
log.debug("rcvd TT4_CMD %s", hexlify(cmd))
self.chipset.tg_set_protocol(rf_off_error=True)
return nfc.clf.LocalTarget(
"106A", sens_res=nfca_params[0:2],
sdd_res='\x08'+nfca_params[2:5],
sel_res=nfca_params[5:6], tt4_cmd=cmd,
rats_cmd=rats_cmd, rats_res=rats_res)
else: log.debug("skip TT4_CMD %s (DID)", hexlify(cmd))
else: log.debug("not a 106A TT4 command")
finally:
recv_timeout = int(1000 * (time_to_return - time.time()))
tg_comm_rf_args['recv_timeout'] = recv_timeout
if target.sel_res[0] & 0x60 == 0x00:
return listen_tta_tt2()
if target.sel_res[0] & 0x20 == 0x20:
return listen_tta_tt4()
reason = "sel_res does not indicate any tag target support"
raise nfc.clf.UnsupportedTargetError(reason)
def listen_ttb(self, target, timeout):
"""Listen as Type B Target is not supported."""
message = "{device} does not support listen as Type A Target"
raise nfc.clf.UnsupportedTargetError(message.format(device=self))
def listen_ttf(self, target, timeout):
"""Listen as Type F Target is supported for either 212 or 424 kbps."""
assert target.sensf_res is not None
assert len(target.sensf_res) == 19
if target.brty not in ('212F', '424F'):
info = "unsupported target bitrate: %r" % target.brty
raise nfc.clf.UnsupportedTargetError(info)
self.chipset.tg_set_rf(target.brty)
self.chipset.tg_set_protocol(self.chipset.tg_set_protocol_defaults)
self.chipset.tg_set_protocol(rf_off_error=False)
recv_timeout = min(int(1000 * timeout), 0xFFFF)
time_to_return = time.time() + timeout
transmit_data = sensf_req = sensf_res = None
while recv_timeout > 0:
if transmit_data:
log.debug("%s send %s", target.brty, hexlify(transmit_data))
log.debug("%s wait recv %d ms", target.brty, recv_timeout)
try:
data = self.chipset.tg_comm_rf(recv_timeout=recv_timeout,
transmit_data=transmit_data)
except CommunicationError as error:
log.debug(error); continue
finally:
recv_timeout = int((time_to_return - time.time()) * 1E3)
transmit_data = None
assert target.brty == ('106A', '212F', '424F')[data[0]-11]
log.debug("%s rcvd %s", target.brty, hexlify(buffer(data, 7)))
if len(data) > 7 and len(data)-7 == data[7]:
if sensf_req and data[9:17] == target.sensf_res[1:9]:
self.chipset.tg_set_protocol(rf_off_error=True)
target = nfc.clf.LocalTarget(target.brty)
target.sensf_req = sensf_req
target.sensf_res = sensf_res
target.tt3_cmd = data[8:]
return target
if len(data) == 13 and data[7] == 6 and data[8] == 0:
(sensf_req, sensf_res) = (data[8:], target.sensf_res[:])
if ((sensf_req[1]==255 or sensf_req[1]==sensf_res[17]) and
(sensf_req[2]==255 or sensf_req[2]==sensf_res[18])):
transmit_data = sensf_res[0:17]
if sensf_req[3] == 1:
transmit_data += sensf_res[17:19]
if sensf_req[3] == 2:
transmit_data += "\x00" + chr(1<<(target.brty=="424F"))
transmit_data = chr(len(transmit_data)+1) + transmit_data
def listen_dep(self, target, timeout):
log.debug("listen_dep for {0:.3f} sec".format(timeout))
assert target.sensf_res is not None
assert target.sens_res is not None
assert target.sdd_res is not None
assert target.sel_res is not None
assert target.atr_res is not None
nfca_params = target.sens_res + target.sdd_res[1:4] + target.sel_res
nfcf_params = target.sensf_res[1:19]
log.debug("nfca_params %s", hexlify(nfca_params))
log.debug("nfcf_params %s", hexlify(nfcf_params))
assert len(nfca_params) == 6
assert len(nfcf_params) == 18
self.chipset.tg_set_rf("106A")
self.chipset.tg_set_protocol(self.chipset.tg_set_protocol_defaults)
self.chipset.tg_set_protocol(rf_off_error=False)
tg_comm_rf_args = {'mdaa': True}
tg_comm_rf_args['nfca_params'] = nfca_params
tg_comm_rf_args['nfcf_params'] = nfcf_params
recv_timeout = min(int(1000 * timeout), 0xFFFF)
time_to_return = time.time() + timeout
while recv_timeout > 0:
tg_comm_rf_args['recv_timeout'] = recv_timeout
log.debug("wait %d ms for activation", recv_timeout)
try:
data = self.chipset.tg_comm_rf(**tg_comm_rf_args)
except CommunicationError as error:
if error != "RECEIVE_TIMEOUT_ERROR": log.warning(error)
else:
brty = ('106A', '212F', '424F')[data[0]-11]
log.debug("%s %s", brty, hexlify(data))
if data[2] & 0x03 == 3: data = data[7:]; break
else: log.debug("not a passive mode activation")
recv_timeout = int(1000 * (time_to_return - time.time()))
else:
return None
# further tg_comm_rf commands return RF_OFF_ERROR when field is gone
self.chipset.tg_set_protocol(rf_off_error=True)
if brty == "106A" and len(data)>1 and data[0] != 0xF0:
# We received a Type A card activation, probably because
# sel_res has indicated Type 2 or Type 4A Tag support.
target = nfc.clf.LocalTarget("106A", tag_cmd=data[:])
target.sens_res = nfca_params[0:2]
target.sdd_res = '\x08' + nfca_params[2:5]
target.sel_res = nfca_params[5:6]
return target
try:
if brty == "106A": assert data.pop(0) == 0xF0
assert len(data) == data.pop(0)
assert data.startswith("\xD4\x00")
except (IndexError, AssertionError):
return None
activation_params = nfca_params if brty=='106A' else nfcf_params
def send_res_recv_req(brty, data, timeout):
if data: data = ("", "\xF0")[brty=="106A"] + chr(len(data)) + data
args = {'transmit_data': data, 'recv_timeout': timeout}
data = self.chipset.tg_comm_rf(**args)[7:]
if timeout > 0:
try:
if brty == "106A":
assert data.pop(0) == 0xF0, "invalid start byte"
assert len(data) == data.pop(0), "incorrect length byte"
assert data[0] == 0xD4, "invalid command byte 1"
assert data[1] in (0,4,6,8,10), "invalid command byte 2"
except IndexError:
raise AssertionError("insufficient receive data")
return data
while data and data[1] == 0:
try:
(atr_req, atr_res) = (data[:], target.atr_res)
log.debug("%s rcvd ATR_REQ %s", brty, hexlify(atr_req))
assert len(atr_req) >= 16, "ATR_REQ has less than 16 byte"
assert len(atr_req) <= 64, "ATR_REQ has more than 64 byte"
log.debug("%s send ATR_RES %s", brty, hexlify(atr_res))
data = send_res_recv_req(brty, atr_res, 1000)
except (CommunicationError, AssertionError) as error:
log.warning(str(error))
return None
psl_req = dep_req = None
while data and data[1] in (4,6,8,10):
did = atr_req[12] if atr_req[12]>0 else None
cmd = ("PSL", "DEP", "DSL", "RLS")[(data[1]-4)//2] + "_REQ"
log.debug("%s rcvd %s %s", brty, cmd, hexlify(data))
try:
if cmd=="DEP_REQ" and did==(data[3] if data[2]>>2&1 else None):
dep_req = data[:]
break
if cmd=="DSL_REQ" and did==(data[2] if len(data)>2 else None):
data = "\xD5\x09" + data[2:3]
log.debug("%s send DSL_RES %s", brty, hexlify(data))
send_res_recv_req(brty, data, 0)
return None
if cmd=="RLS_REQ" and did==(data[2] if len(data)>2 else None):
data = "\xD5\x0B" + data[2:3]
log.debug("%s send RLS_RES %s", brty, hexlify(data))
send_res_recv_req(brty, data, 0)
return None
if cmd=="PSL_REQ" and did==(data[2] if data[2]>0 else None):
(dsi, dri) = ((data[3] >> 3) & 7, data[3] & 7)
if dsi != dri:
log.warning("DSI != DRI is not supported")
return None
(psl_req, psl_res) = (data[:], "\xD5\x05"+data[2:3])
log.debug("%s send PSL_RES %s", brty, hexlify(psl_res))
send_res_recv_req(brty, psl_res, 0)
brty = ('106A', '212F', '424F')[dsi]
self.chipset.tg_set_rf(brty)
log.debug("%s wait recv 1000 ms", brty)
data = send_res_recv_req(brty, None, 1000)
except (CommunicationError, AssertionError) as error:
log.warning(str(error))
return None
else: # while data and data[1] in (4,6,8,10)
return None
target = nfc.clf.LocalTarget(brty, atr_req=atr_req, dep_req=dep_req)
if psl_req: target.psl_req = psl_req
if activation_params == nfca_params:
target.sens_res = nfca_params[0:2]
target.sdd_res = '\x08' + nfca_params[2:5]
target.sel_res = nfca_params[5:6]
else:
target.sensf_res = "\x01" + nfcf_params
return target
def get_max_send_data_size(self, target):
return 290
def get_max_recv_data_size(self, target):
return 290
def send_cmd_recv_rsp(self, target, data, timeout):
timeout_msec = min(int(timeout * 1000), 0xFFFF) if timeout else 0
self.chipset.in_set_rf(target.brty_send, target.brty_recv)
self.chipset.in_set_protocol(self.chipset.in_set_protocol_defaults)
in_set_protocol_settings = {
'add_parity': 1 if target.brty_send.endswith('A') else 0,
'check_parity': 1 if target.brty_recv.endswith('A') else 0
}
try:
if (target.brty == '106A' and target.sel_res and
target.sel_res[0] & 0x60 == 0x00):
# Driver must check TT2 CRC to get ACK/NAK
in_set_protocol_settings['check_crc'] = 0
self.chipset.in_set_protocol(**in_set_protocol_settings)
return self._tt2_send_cmd_recv_rsp(data, timeout_msec)
else:
self.chipset.in_set_protocol(**in_set_protocol_settings)
return self.chipset.in_comm_rf(data, timeout_msec)
except CommunicationError as error:
log.debug(error)
if error == "RECEIVE_TIMEOUT_ERROR":
raise nfc.clf.TimeoutError
raise nfc.clf.TransmissionError
def _tt2_send_cmd_recv_rsp(self, data, timeout_msec):
# The Type2Tag implementation needs to receive the Mifare
# ACK/NAK responses but the chipset reports them as crc error
# (indistinguishable from a real crc error). We thus had to
# switch off the crc check and do it here.
data = self.chipset.in_comm_rf(data, timeout_msec)
if len(data) > 2 and self.check_crc_a(data) is False:
raise nfc.clf.TransmissionError("crc_a check error")
return data[:-2] if len(data) > 2 else data
def send_rsp_recv_cmd(self, target, data, timeout):
assert timeout is None or timeout >= 0
timeout_msec = min(int(timeout * 1000), 0xFFFF) if timeout else 0
kwargs = {
'guard_time': 500,
'transmit_data': data,
'recv_timeout': 0xFFFF if timeout is None else int(timeout*1E3),
}
try:
data = self.chipset.tg_comm_rf(**kwargs)
return data[7:] if data else None
except CommunicationError as error:
log.debug(error)
if error == "RF_OFF_ERROR":
raise nfc.clf.BrokenLinkError(str(error))
if error == "RECEIVE_TIMEOUT_ERROR":
raise nfc.clf.TimeoutError(str(error))
raise nfc.clf.TransmissionError(str(error))
def init(transport):
chipset = Chipset(transport, logger=log)
device = Device(chipset, logger=log)
device._vendor_name = transport.manufacturer_name
device._device_name = transport.product_name
return device
| mit |
nlholdem/icodoom | .venv/lib/python2.7/site-packages/google/protobuf/internal/text_encoding_test.py | 126 | 2903 | #! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.text_encoding."""
try:
import unittest2 as unittest #PY26
except ImportError:
import unittest
from google.protobuf import text_encoding
TEST_VALUES = [
("foo\\rbar\\nbaz\\t",
"foo\\rbar\\nbaz\\t",
b"foo\rbar\nbaz\t"),
("\\'full of \\\"sound\\\" and \\\"fury\\\"\\'",
"\\'full of \\\"sound\\\" and \\\"fury\\\"\\'",
b"'full of \"sound\" and \"fury\"'"),
("signi\\\\fying\\\\ nothing\\\\",
"signi\\\\fying\\\\ nothing\\\\",
b"signi\\fying\\ nothing\\"),
("\\010\\t\\n\\013\\014\\r",
"\x08\\t\\n\x0b\x0c\\r",
b"\010\011\012\013\014\015")]
class TextEncodingTestCase(unittest.TestCase):
def testCEscape(self):
for escaped, escaped_utf8, unescaped in TEST_VALUES:
self.assertEqual(escaped,
text_encoding.CEscape(unescaped, as_utf8=False))
self.assertEqual(escaped_utf8,
text_encoding.CEscape(unescaped, as_utf8=True))
def testCUnescape(self):
for escaped, escaped_utf8, unescaped in TEST_VALUES:
self.assertEqual(unescaped, text_encoding.CUnescape(escaped))
self.assertEqual(unescaped, text_encoding.CUnescape(escaped_utf8))
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
hynekcer/django | django/db/migrations/operations/base.py | 356 | 4370 | from __future__ import unicode_literals
from django.db import router
class Operation(object):
"""
Base class for migration operations.
It's responsible for both mutating the in-memory model state
(see db/migrations/state.py) to represent what it performs, as well
as actually performing it against a live database.
Note that some operations won't modify memory state at all (e.g. data
copying operations), and some will need their modifications to be
optionally specified by the user (e.g. custom Python code snippets)
Due to the way this class deals with deconstruction, it should be
considered immutable.
"""
# If this migration can be run in reverse.
# Some operations are impossible to reverse, like deleting data.
reversible = True
# Can this migration be represented as SQL? (things like RunPython cannot)
reduces_to_sql = True
# Should this operation be forced as atomic even on backends with no
# DDL transaction support (i.e., does it have no DDL, like RunPython)
atomic = False
serialization_expand_args = []
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
self = object.__new__(cls)
self._constructor_args = (args, kwargs)
return self
def deconstruct(self):
"""
Returns a 3-tuple of class import path (or just name if it lives
under django.db.migrations), positional arguments, and keyword
arguments.
"""
return (
self.__class__.__name__,
self._constructor_args[0],
self._constructor_args[1],
)
def state_forwards(self, app_label, state):
"""
Takes the state from the previous migration, and mutates it
so that it matches what this migration would perform.
"""
raise NotImplementedError('subclasses of Operation must provide a state_forwards() method')
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the normal
(forwards) direction.
"""
raise NotImplementedError('subclasses of Operation must provide a database_forwards() method')
def database_backwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the reverse
direction - e.g. if this were CreateModel, it would in fact
drop the model's table.
"""
raise NotImplementedError('subclasses of Operation must provide a database_backwards() method')
def describe(self):
"""
Outputs a brief summary of what the action does.
"""
return "%s: %s" % (self.__class__.__name__, self._constructor_args)
def references_model(self, name, app_label=None):
"""
Returns True if there is a chance this operation references the given
model name (as a string), with an optional app label for accuracy.
Used for optimization. If in doubt, return True;
returning a false positive will merely make the optimizer a little
less efficient, while returning a false negative may result in an
unusable optimized migration.
"""
return True
def references_field(self, model_name, name, app_label=None):
"""
Returns True if there is a chance this operation references the given
field name, with an optional app label for accuracy.
Used for optimization. If in doubt, return True.
"""
return self.references_model(model_name, app_label)
def allow_migrate_model(self, connection_alias, model):
"""
Returns if we're allowed to migrate the model.
This is a thin wrapper around router.allow_migrate_model() that
preemptively rejects any proxy, swapped out, or unmanaged model.
"""
if not model._meta.can_migrate(connection_alias):
return False
return router.allow_migrate_model(connection_alias, model)
def __repr__(self):
return "<%s %s%s>" % (
self.__class__.__name__,
", ".join(map(repr, self._constructor_args[0])),
",".join(" %s=%r" % x for x in self._constructor_args[1].items()),
)
| bsd-3-clause |
jose51197/Infernal | Documentation/target/tcm_mod_builder.py | 3119 | 42754 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!(se_nacl_new))\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!(tpg)) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!(" + fabric_mod_port + ")) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd_to_pool = " + fabric_mod_name + "_release_cmd,\n"
buf += " .release_cmd_direct = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .new_cmd_failure = " + fabric_mod_name + "_new_cmd_failure,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " .pack_lun = " + fabric_mod_name + "_pack_lun,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (!(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return -ENOMEM;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!(" + fabric_mod_name + "_fabric_configfs))\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "#ifdef MODULE\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
buf += "#endif\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!(nacl)) {\n"
buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('release_cmd_to_pool', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('new_cmd_failure\)\(', fo):
buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
if re.search('pack_lun\)\(', fo):
buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
buf += "{\n"
buf += " WARN_ON(lun >= 256);\n"
buf += " /* Caller wants this byte-swapped */\n"
buf += " return cpu_to_le64((lun & 0xff) << 8);\n"
buf += "}\n\n"
bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
Gitlab11/odoo | addons/calendar/controllers/main.py | 329 | 3390 | import simplejson
import openerp
import openerp.http as http
from openerp.http import request
import openerp.addons.web.controllers.main as webmain
import json
class meeting_invitation(http.Controller):
@http.route('/calendar/meeting/accept', type='http', auth="calendar")
def accept(self, db, token, action, id, **kwargs):
registry = openerp.modules.registry.RegistryManager.get(db)
attendee_pool = registry.get('calendar.attendee')
with registry.cursor() as cr:
attendee_id = attendee_pool.search(cr, openerp.SUPERUSER_ID, [('access_token', '=', token), ('state', '!=', 'accepted')])
if attendee_id:
attendee_pool.do_accept(cr, openerp.SUPERUSER_ID, attendee_id)
return self.view(db, token, action, id, view='form')
@http.route('/calendar/meeting/decline', type='http', auth="calendar")
def declined(self, db, token, action, id):
registry = openerp.modules.registry.RegistryManager.get(db)
attendee_pool = registry.get('calendar.attendee')
with registry.cursor() as cr:
attendee_id = attendee_pool.search(cr, openerp.SUPERUSER_ID, [('access_token', '=', token), ('state', '!=', 'declined')])
if attendee_id:
attendee_pool.do_decline(cr, openerp.SUPERUSER_ID, attendee_id)
return self.view(db, token, action, id, view='form')
@http.route('/calendar/meeting/view', type='http', auth="calendar")
def view(self, db, token, action, id, view='calendar'):
registry = openerp.modules.registry.RegistryManager.get(db)
meeting_pool = registry.get('calendar.event')
attendee_pool = registry.get('calendar.attendee')
partner_pool = registry.get('res.partner')
with registry.cursor() as cr:
attendee = attendee_pool.search_read(cr, openerp.SUPERUSER_ID, [('access_token', '=', token)], [])
if attendee and attendee[0] and attendee[0].get('partner_id'):
partner_id = int(attendee[0].get('partner_id')[0])
tz = partner_pool.read(cr, openerp.SUPERUSER_ID, partner_id, ['tz'])['tz']
else:
tz = False
attendee_data = meeting_pool.get_attendee(cr, openerp.SUPERUSER_ID, id, dict(tz=tz))
if attendee:
attendee_data['current_attendee'] = attendee[0]
values = dict(init="s.calendar.event('%s', '%s', '%s', '%s' , '%s');" % (db, action, id, 'form', json.dumps(attendee_data)))
return request.render('web.webclient_bootstrap', values)
# Function used, in RPC to check every 5 minutes, if notification to do for an event or not
@http.route('/calendar/notify', type='json', auth="none")
def notify(self):
registry = request.registry
uid = request.session.uid
context = request.session.context
with registry.cursor() as cr:
res = registry.get("calendar.alarm_manager").get_next_notif(cr, uid, context=context)
return res
@http.route('/calendar/notify_ack', type='json', auth="none")
def notify_ack(self, type=''):
registry = request.registry
uid = request.session.uid
context = request.session.context
with registry.cursor() as cr:
res = registry.get("res.partner")._set_calendar_last_notif_ack(cr, uid, context=context)
return res
| agpl-3.0 |
toddpalino/kafka-tools | kafka/tools/protocol/requests/update_metadata_v3.py | 1 | 2339 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from kafka.tools.protocol.requests.update_metadata_v0 import UpdateMetadataV0Request
from kafka.tools.protocol.responses.update_metadata_v3 import UpdateMetadataV3Response
class UpdateMetadataV3Request(UpdateMetadataV0Request):
api_version = 3
response = UpdateMetadataV3Response
help_string = ''
schema = [
{'name': 'controller_id', 'type': 'int32'},
{'name': 'controller_epoch', 'type': 'int32'},
{'name': 'partition_states',
'type': 'array',
'item_type': [
{'name': 'topic', 'type': 'string'},
{'name': 'partition', 'type': 'int32'},
{'name': 'controller_epoch', 'type': 'int32'},
{'name': 'leader', 'type': 'int32'},
{'name': 'leader_epoch', 'type': 'int32'},
{'name': 'isr', 'type': 'array', 'item_type': 'int32'},
{'name': 'zk_version', 'type': 'int32'},
{'name': 'replicas', 'type': 'array', 'item_type': 'int32'},
]},
{'name': 'live_leaders',
'type': 'array',
'item_type': [
{'name': 'id', 'type': 'int32'},
{'name': 'end_points',
'type': 'array',
'item_type': [
{'name': 'port', 'type': 'int32'},
{'name': 'host', 'type': 'string'},
{'name': 'listener_name', 'type': 'string'},
{'name': 'security_protocol_type', 'type': 'int16'},
]},
{'name': 'rack', 'type': 'string'},
]},
]
| apache-2.0 |
gauribhoite/personfinder | env/site-packages/pygments/lexers/data.py | 72 | 18277 | # -*- coding: utf-8 -*-
"""
pygments.lexers.data
~~~~~~~~~~~~~~~~~~~~
Lexers for data file format.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, LexerContext, \
include, bygroups, inherit
from pygments.token import Text, Comment, Keyword, Name, String, Number, \
Punctuation, Literal
__all__ = ['YamlLexer', 'JsonLexer', 'JsonLdLexer']
class YamlLexerContext(LexerContext):
"""Indentation context for the YAML lexer."""
def __init__(self, *args, **kwds):
super(YamlLexerContext, self).__init__(*args, **kwds)
self.indent_stack = []
self.indent = -1
self.next_indent = 0
self.block_scalar_indent = None
class YamlLexer(ExtendedRegexLexer):
"""
Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization
language.
.. versionadded:: 0.11
"""
name = 'YAML'
aliases = ['yaml']
filenames = ['*.yaml', '*.yml']
mimetypes = ['text/x-yaml']
def something(token_class):
"""Do not produce empty tokens."""
def callback(lexer, match, context):
text = match.group()
if not text:
return
yield match.start(), token_class, text
context.pos = match.end()
return callback
def reset_indent(token_class):
"""Reset the indentation levels."""
def callback(lexer, match, context):
text = match.group()
context.indent_stack = []
context.indent = -1
context.next_indent = 0
context.block_scalar_indent = None
yield match.start(), token_class, text
context.pos = match.end()
return callback
def save_indent(token_class, start=False):
"""Save a possible indentation level."""
def callback(lexer, match, context):
text = match.group()
extra = ''
if start:
context.next_indent = len(text)
if context.next_indent < context.indent:
while context.next_indent < context.indent:
context.indent = context.indent_stack.pop()
if context.next_indent > context.indent:
extra = text[context.indent:]
text = text[:context.indent]
else:
context.next_indent += len(text)
if text:
yield match.start(), token_class, text
if extra:
yield match.start()+len(text), token_class.Error, extra
context.pos = match.end()
return callback
def set_indent(token_class, implicit=False):
"""Set the previously saved indentation level."""
def callback(lexer, match, context):
text = match.group()
if context.indent < context.next_indent:
context.indent_stack.append(context.indent)
context.indent = context.next_indent
if not implicit:
context.next_indent += len(text)
yield match.start(), token_class, text
context.pos = match.end()
return callback
def set_block_scalar_indent(token_class):
"""Set an explicit indentation level for a block scalar."""
def callback(lexer, match, context):
text = match.group()
context.block_scalar_indent = None
if not text:
return
increment = match.group(1)
if increment:
current_indent = max(context.indent, 0)
increment = int(increment)
context.block_scalar_indent = current_indent + increment
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_block_scalar_empty_line(indent_token_class, content_token_class):
"""Process an empty line in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if (context.block_scalar_indent is None or
len(text) <= context.block_scalar_indent):
if text:
yield match.start(), indent_token_class, text
else:
indentation = text[:context.block_scalar_indent]
content = text[context.block_scalar_indent:]
yield match.start(), indent_token_class, indentation
yield (match.start()+context.block_scalar_indent,
content_token_class, content)
context.pos = match.end()
return callback
def parse_block_scalar_indent(token_class):
"""Process indentation spaces in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if context.block_scalar_indent is None:
if len(text) <= max(context.indent, 0):
context.stack.pop()
context.stack.pop()
return
context.block_scalar_indent = len(text)
else:
if len(text) < context.block_scalar_indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_plain_scalar_indent(token_class):
"""Process indentation spaces in a plain scalar."""
def callback(lexer, match, context):
text = match.group()
if len(text) <= context.indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
tokens = {
# the root rules
'root': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# the '%YAML' directive
(r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'),
# the %TAG directive
(r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'),
# document start and document end indicators
(r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace),
'block-line'),
# indentation spaces
(r'[ ]*(?!\s|$)', save_indent(Text, start=True),
('block-line', 'indentation')),
],
# trailing whitespaces after directives or a block scalar indicator
'ignored-line': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# line break
(r'\n', Text, '#pop:2'),
],
# the %YAML directive
'yaml-directive': [
# the version number
(r'([ ]+)([0-9]+\.[0-9]+)',
bygroups(Text, Number), 'ignored-line'),
],
# the %YAG directive
'tag-directive': [
# a tag handle and the corresponding prefix
(r'([ ]+)(!|![\w-]*!)'
r'([ ]+)(!|!?[\w;/?:@&=+$,.!~*\'()\[\]%-]+)',
bygroups(Text, Keyword.Type, Text, Keyword.Type),
'ignored-line'),
],
# block scalar indicators and indentation spaces
'indentation': [
# trailing whitespaces are ignored
(r'[ ]*$', something(Text), '#pop:2'),
# whitespaces preceeding block collection indicators
(r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)),
# block collection indicators
(r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),
# the beginning a block line
(r'[ ]*', save_indent(Text), '#pop'),
],
# an indented line in the block context
'block-line': [
# the line end
(r'[ ]*(?=#|$)', something(Text), '#pop'),
# whitespaces separating tokens
(r'[ ]+', Text),
# tags, anchors and aliases,
include('descriptors'),
# block collections and scalars
include('block-nodes'),
# flow collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`-]|[?:-]\S)',
something(Name.Variable),
'plain-scalar-in-block-context'),
],
# tags, anchors, aliases
'descriptors': [
# a full-form tag
(r'!<[\w;/?:@&=+$,.!~*\'()\[\]%-]+>', Keyword.Type),
# a tag in the form '!', '!suffix' or '!handle!suffix'
(r'!(?:[\w-]+)?'
r'(?:![\w;/?:@&=+$,.!~*\'()\[\]%-]+)?', Keyword.Type),
# an anchor
(r'&[\w-]+', Name.Label),
# an alias
(r'\*[\w-]+', Name.Variable),
],
# block collections and scalars
'block-nodes': [
# implicit key
(r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)),
# literal and folded scalars
(r'[|>]', Punctuation.Indicator,
('block-scalar-content', 'block-scalar-header')),
],
# flow collections and quoted scalars
'flow-nodes': [
# a flow sequence
(r'\[', Punctuation.Indicator, 'flow-sequence'),
# a flow mapping
(r'\{', Punctuation.Indicator, 'flow-mapping'),
# a single-quoted scalar
(r'\'', String, 'single-quoted-scalar'),
# a double-quoted scalar
(r'\"', String, 'double-quoted-scalar'),
],
# the content of a flow collection
'flow-collection': [
# whitespaces
(r'[ ]+', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# simple indicators
(r'[?:,]', Punctuation.Indicator),
# tags, anchors and aliases
include('descriptors'),
# nested collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`])',
something(Name.Variable),
'plain-scalar-in-flow-context'),
],
# a flow sequence indicated by '[' and ']'
'flow-sequence': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\]', Punctuation.Indicator, '#pop'),
],
# a flow mapping indicated by '{' and '}'
'flow-mapping': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\}', Punctuation.Indicator, '#pop'),
],
# block scalar lines
'block-scalar-content': [
# line break
(r'\n', Text),
# empty line
(r'^[ ]+$',
parse_block_scalar_empty_line(Text, Name.Constant)),
# indentation spaces (we may leave the state here)
(r'^[ ]*', parse_block_scalar_indent(Text)),
# line content
(r'[\S\t ]+', Name.Constant),
],
# the content of a literal or folded scalar
'block-scalar-header': [
# indentation indicator followed by chomping flag
(r'([1-9])?[+-]?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
# chomping flag followed by indentation indicator
(r'[+-]?([1-9])?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
],
# ignored and regular whitespaces in quoted scalars
'quoted-scalar-whitespaces': [
# leading and trailing whitespaces are ignored
(r'^[ ]+', Text),
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
],
# single-quoted scalars
'single-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of the quote character
(r'\'\'', String.Escape),
# regular non-whitespace characters
(r'[^\s\']+', String),
# the closing quote
(r'\'', String, '#pop'),
],
# double-quoted scalars
'double-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of special characters
(r'\\[0abt\tn\nvfre "\\N_LP]', String),
# escape codes
(r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
String.Escape),
# regular non-whitespace characters
(r'[^\s"\\]+', String),
# the closing quote
(r'"', String, '#pop'),
],
# the beginning of a new line while scanning a plain scalar
'plain-scalar-in-block-context-new-line': [
# empty lines
(r'^[ ]+$', Text),
# line breaks
(r'\n+', Text),
# document start and document end indicators
(r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'),
# indentation spaces (we may leave the block line state here)
(r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'),
],
# a plain scalar in the block context
'plain-scalar-in-block-context': [
# the scalar ends with the ':' indicator
(r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'),
# the scalar ends with whitespaces followed by a comment
(r'[ ]+(?=#)', Text, '#pop'),
# trailing whitespaces are ignored
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text, 'plain-scalar-in-block-context-new-line'),
# other whitespaces are a part of the value
(r'[ ]+', Literal.Scalar.Plain),
# regular non-whitespace characters
(r'(?::(?!\s)|[^\s:])+', Literal.Scalar.Plain),
],
# a plain scalar is the flow context
'plain-scalar-in-flow-context': [
# the scalar ends with an indicator character
(r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'),
# the scalar ends with a comment
(r'[ ]+(?=#)', Text, '#pop'),
# leading and trailing whitespaces are ignored
(r'^[ ]+', Text),
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
# regular non-whitespace characters
(r'[^\s,:?\[\]{}]+', Name.Variable),
],
}
def get_tokens_unprocessed(self, text=None, context=None):
if context is None:
context = YamlLexerContext(text, 0)
return super(YamlLexer, self).get_tokens_unprocessed(text, context)
class JsonLexer(RegexLexer):
"""
For JSON data structures.
.. versionadded:: 1.5
"""
name = 'JSON'
aliases = ['json']
filenames = ['*.json']
mimetypes = ['application/json']
flags = re.DOTALL
# integer part of a number
int_part = r'-?(0|[1-9]\d*)'
# fractional part of a number
frac_part = r'\.\d+'
# exponential part of a number
exp_part = r'[eE](\+|-)?\d+'
tokens = {
'whitespace': [
(r'\s+', Text),
],
# represents a simple terminal value
'simplevalue': [
(r'(true|false|null)\b', Keyword.Constant),
(('%(int_part)s(%(frac_part)s%(exp_part)s|'
'%(exp_part)s|%(frac_part)s)') % vars(),
Number.Float),
(int_part, Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
],
# the right hand side of an object, after the attribute name
'objectattribute': [
include('value'),
(r':', Punctuation),
# comma terminates the attribute but expects more
(r',', Punctuation, '#pop'),
# a closing bracket terminates the entire object, so pop twice
(r'\}', Punctuation, ('#pop', '#pop')),
],
# a json object - { attr, attr, ... }
'objectvalue': [
include('whitespace'),
(r'"(\\\\|\\"|[^"])*"', Name.Tag, 'objectattribute'),
(r'\}', Punctuation, '#pop'),
],
# json array - [ value, value, ... }
'arrayvalue': [
include('whitespace'),
include('value'),
(r',', Punctuation),
(r'\]', Punctuation, '#pop'),
],
# a json value - either a simple value or a complex value (object or array)
'value': [
include('whitespace'),
include('simplevalue'),
(r'\{', Punctuation, 'objectvalue'),
(r'\[', Punctuation, 'arrayvalue'),
],
# the root of a json document whould be a value
'root': [
include('value'),
],
}
class JsonLdLexer(JsonLexer):
"""
For `JSON-LD <http://json-ld.org/>`_ linked data.
.. versionadded:: 2.0
"""
name = 'JSON-LD'
aliases = ['jsonld', 'json-ld']
filenames = ['*.jsonld']
mimetypes = ['application/ld+json']
tokens = {
'objectvalue': [
(r'"@(context|id|value|language|type|container|list|set|'
r'reverse|index|base|vocab|graph)"', Name.Decorator,
'objectattribute'),
inherit,
],
}
| apache-2.0 |
travisreed-wf/PyGithub | github/PaginatedList.py | 23 | 7707 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Bill Mill <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# Copyright 2013 davidbrai <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
class PaginatedListBase:
def __init__(self):
self.__elements = list()
def __getitem__(self, index):
assert isinstance(index, (int, slice))
if isinstance(index, (int, long)):
self.__fetchToIndex(index)
return self.__elements[index]
else:
return self._Slice(self, index)
def __iter__(self):
for element in self.__elements:
yield element
while self._couldGrow():
newElements = self._grow()
for element in newElements:
yield element
def _isBiggerThan(self, index):
return len(self.__elements) > index or self._couldGrow()
def __fetchToIndex(self, index):
while len(self.__elements) <= index and self._couldGrow():
self._grow()
def _grow(self):
newElements = self._fetchNextPage()
self.__elements += newElements
return newElements
class _Slice:
def __init__(self, theList, theSlice):
self.__list = theList
self.__start = theSlice.start or 0
self.__stop = theSlice.stop
self.__step = theSlice.step or 1
def __iter__(self):
index = self.__start
while not self.__finished(index):
if self.__list._isBiggerThan(index):
yield self.__list[index]
index += self.__step
else:
return
def __finished(self, index):
return self.__stop is not None and index >= self.__stop
class PaginatedList(PaginatedListBase):
"""
This class abstracts the `pagination of the API <http://developer.github.com/v3/#pagination>`_.
You can simply enumerate through instances of this class::
for repo in user.get_repos():
print repo.name
You can also index them or take slices::
second_repo = user.get_repos()[1]
first_repos = user.get_repos()[:10]
If you want to iterate in reversed order, just do::
for repo in user.get_repos().reversed:
print repo.name
And if you really need it, you can explicitely access a specific page::
some_repos = user.get_repos().get_page(0)
some_other_repos = user.get_repos().get_page(3)
"""
def __init__(self, contentClass, requester, firstUrl, firstParams):
PaginatedListBase.__init__(self)
self.__requester = requester
self.__contentClass = contentClass
self.__firstUrl = firstUrl
self.__firstParams = firstParams or ()
self.__nextUrl = firstUrl
self.__nextParams = firstParams or {}
if self.__requester.per_page != 30:
self.__nextParams["per_page"] = self.__requester.per_page
self._reversed = False
self.__totalCount = None
@property
def totalCount(self):
if not self.__totalCount:
self._grow()
return self.__totalCount
def _getLastPageUrl(self):
headers, data = self.__requester.requestJsonAndCheck(
"GET",
self.__firstUrl,
parameters=self.__nextParams
)
links = self.__parseLinkHeader(headers)
lastUrl = links.get("last")
return lastUrl
@property
def reversed(self):
r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams)
r.__reverse()
return r
def __reverse(self):
self._reversed = True
lastUrl = self._getLastPageUrl()
if lastUrl:
self.__nextUrl = lastUrl
def _couldGrow(self):
return self.__nextUrl is not None
def _fetchNextPage(self):
headers, data = self.__requester.requestJsonAndCheck(
"GET",
self.__nextUrl,
parameters=self.__nextParams
)
data = data if data else []
self.__nextUrl = None
if len(data) > 0:
links = self.__parseLinkHeader(headers)
if self._reversed:
if "prev" in links:
self.__nextUrl = links["prev"]
elif "next" in links:
self.__nextUrl = links["next"]
self.__nextParams = None
if 'items' in data:
self.__totalCount = data['total_count']
data = data["items"]
content = [
self.__contentClass(self.__requester, headers, element, completed=False)
for element in data if element is not None
]
if self._reversed:
return content[::-1]
return content
def __parseLinkHeader(self, headers):
links = {}
if "link" in headers:
linkHeaders = headers["link"].split(", ")
for linkHeader in linkHeaders:
(url, rel) = linkHeader.split("; ")
url = url[1:-1]
rel = rel[5:-1]
links[rel] = url
return links
def get_page(self, page):
params = dict(self.__firstParams)
if page != 0:
params["page"] = page + 1
if self.__requester.per_page != 30:
params["per_page"] = self.__requester.per_page
headers, data = self.__requester.requestJsonAndCheck(
"GET",
self.__firstUrl,
parameters=params
)
if 'items' in data:
self.__totalCount = data['total_count']
data = data["items"]
return [
self.__contentClass(self.__requester, headers, element, completed=False)
for element in data
]
| gpl-3.0 |
katyhuff/moose | python/MooseDocs/extensions/MooseCSS.py | 1 | 2028 | from markdown.blockprocessors import BlockProcessor
from MooseCommonExtension import MooseCommonExtension
import re
from markdown.util import etree
class MooseCSS(BlockProcessor, MooseCommonExtension):
"""
Markdown extension for applying CSS styles to paragraph
Markdown syntax is:
!css <options>
Paragraph text here
Where <options> are key=value pairs.
"""
RE = re.compile(r'^!\ ?css(.*)')
# If there are multiple css blocks on the same page then
# they need to have different ids
MATCHES_FOUND = 0
def __init__(self, parser, root=None, **kwargs):
MooseCommonExtension.__init__(self)
BlockProcessor.__init__(self, parser, **kwargs)
def test(self, parent, block):
"""
Test to see if we should process this block of markdown.
Inherited from BlockProcessor.
"""
return self.RE.search(block)
def run(self, parent, blocks):
"""
Called when it is determined that we can process this block.
This will convert the markdown into HTML
"""
sibling = self.lastChild(parent)
block = blocks.pop(0)
m = self.RE.search(block)
if m:
# Parse out the options on the css line
options, styles = self.getSettings(m.group(1))
block = block[m.end() + 1:] # removes the css line
block, paragraph = self.detab(block)
if m:
top_div = etree.SubElement(parent, 'div')
self.createCSS(top_div, styles, paragraph)
else:
top_div = sibling
self.parser.parseChunk(top_div, block)
def createCSS(self, top_div, styles, paragraph):
"""
Creates the actual HTML required for the CSS paragraph to work.
Input:
top_div: div element that will contain the paragraph element
styles[dict]: The CSS style attributes
paragraph: the actual text within the <p></p> element
"""
p_el = self.addStyle(etree.SubElement(top_div, 'p'), **styles)
p_el.text = paragraph
| lgpl-2.1 |
karrtikr/ete | ete3/test/test_treeview/item_faces.py | 1 | 4140 | # We will need to create Qt4 items
from PyQt4 import QtCore
from PyQt4.QtGui import QGraphicsRectItem, QGraphicsSimpleTextItem, \
QGraphicsEllipseItem, QColor, QPen, QBrush
from ... import Tree, faces, TreeStyle, NodeStyle
# To play with random colors
import colorsys
import random
class InteractiveItem(QGraphicsRectItem):
def __init__(self, *arg, **karg):
QGraphicsRectItem.__init__(self, *arg, **karg)
self.node = None
self.label = None
self.setCursor(QtCore.Qt.PointingHandCursor)
self.setAcceptsHoverEvents(True)
def hoverEnterEvent (self, e):
# There are many ways of adding interactive elements. With the
# following code, I show/hide a text item over my custom
# DynamicItemFace
if not self.label:
self.label = QGraphicsRectItem()
self.label.setParentItem(self)
# This is to ensure that the label is rendered over the
# rest of item children (default ZValue for items is 0)
self.label.setZValue(1)
self.label.setBrush(QBrush(QColor("white")))
self.label.text = QGraphicsSimpleTextItem()
self.label.text.setParentItem(self.label)
self.label.text.setText(self.node.name)
self.label.setRect(self.label.text.boundingRect())
self.label.setVisible(True)
def hoverLeaveEvent(self, e):
if self.label:
self.label.setVisible(False)
def random_color(h=None):
"""Generates a random color in RGB format."""
if not h:
h = random.random()
s = 0.5
l = 0.5
return _hls2hex(h, l, s)
def _hls2hex(h, l, s):
return '#%02x%02x%02x' %tuple(map(lambda x: int(x*255),
colorsys.hls_to_rgb(h, l, s)))
def ugly_name_face(node, *args, **kargs):
""" This is my item generator. It must receive a node object, and
returns a Qt4 graphics item that can be used as a node face.
"""
# receive an arbitrary number of arguments, in this case width and
# height of the faces
width = args[0][0]
height = args[0][1]
## Creates a main master Item that will contain all other elements
## Items can be standard QGraphicsItem
# masterItem = QGraphicsRectItem(0, 0, width, height)
# Or your custom Items, in which you can re-implement interactive
# functions, etc. Check QGraphicsItem doc for details.
masterItem = InteractiveItem(0, 0, width, height)
# Keep a link within the item to access node info
masterItem.node = node
# I dont want a border around the masterItem
masterItem.setPen(QPen(QtCore.Qt.NoPen))
# Add ellipse around text
ellipse = QGraphicsEllipseItem(masterItem.rect())
ellipse.setParentItem(masterItem)
# Change ellipse color
ellipse.setBrush(QBrush(QColor( random_color())))
# Add node name within the ellipse
text = QGraphicsSimpleTextItem(node.name)
text.setParentItem(ellipse)
text.setPen(QPen(QPen(QColor("white"))))
# Center text according to masterItem size
tw = text.boundingRect().width()
th = text.boundingRect().height()
center = masterItem.boundingRect().center()
text.setPos(center.x()-tw/2, center.y()-th/2)
return masterItem
def master_ly(node):
if node.is_leaf():
# Create an ItemFAce. First argument must be the pointer to
# the constructor function that returns a QGraphicsItem. It
# will be used to draw the Face. Next arguments are arbitrary,
# and they will be forwarded to the constructor Face function.
F = faces.DynamicItemFace(ugly_name_face, 100, 50)
faces.add_face_to_node(F, node, 0, position="aligned")
def get_example_tree():
t = Tree()
t.populate(8, reuse_names=False)
ts = TreeStyle()
ts.layout_fn = master_ly
ts.title.add_face(faces.TextFace("Drawing your own Qt Faces", fsize=15), 0)
return t, ts
if __name__ == "__main__":
t, ts = get_example_tree()
#t.render("item_faces.png", h=400, tree_style=ts)
# The interactive features are only available using the GUI
t.show(tree_style=ts)
| gpl-3.0 |
alexandrucoman/vbox-neutron-agent | neutron/db/api.py | 6 | 1693 | # Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from oslo_config import cfg
from oslo_db.sqlalchemy import session
from sqlalchemy import exc
_FACADE = None
MAX_RETRIES = 10
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = session.EngineFacade.from_config(cfg.CONF, sqlite_fk=True)
return _FACADE
def get_engine():
"""Helper method to grab engine."""
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(autocommit=True, expire_on_commit=False):
"""Helper method to grab session."""
facade = _create_facade_lazily()
return facade.get_session(autocommit=autocommit,
expire_on_commit=expire_on_commit)
@contextlib.contextmanager
def autonested_transaction(sess):
"""This is a convenience method to not bother with 'nested' parameter."""
try:
session_context = sess.begin_nested()
except exc.InvalidRequestError:
session_context = sess.begin(subtransactions=True)
finally:
with session_context as tx:
yield tx
| apache-2.0 |
MrLoick/python-for-android | python-modules/twisted/twisted/python/dispatch.py | 64 | 1187 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
import warnings
warnings.warn(
"Create your own event dispatching mechanism, "
"twisted.python.dispatch will soon be no more.",
DeprecationWarning, 2)
class EventDispatcher:
"""
A global event dispatcher for events.
I'm used for any events that need to span disparate objects in the client.
I should only be used when one object needs to signal an object that it's
not got a direct reference to (unless you really want to pass it through
here, in which case I won't mind).
I'm mainly useful for complex GUIs.
"""
def __init__(self, prefix="event_"):
self.prefix = prefix
self.callbacks = {}
def registerHandler(self, name, meth):
self.callbacks.setdefault(name, []).append(meth)
def autoRegister(self, obj):
from twisted.python import reflect
d = {}
reflect.accumulateMethods(obj, d, self.prefix)
for k,v in d.items():
self.registerHandler(k, v)
def publishEvent(self, name, *args, **kwargs):
for cb in self.callbacks[name]:
cb(*args, **kwargs)
| apache-2.0 |
0xc0170/pyOCD | pyOCD/test/test_utility/test_cmdline.py | 11 | 1600 | """
mbed CMSIS-DAP debugger
Copyright (c) 2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pyOCD.utility.cmdline import split_command_line
class TestSplitCommandLine:
def test_split(self):
assert split_command_line('foo') == ['foo']
assert split_command_line(['foo']) == ['foo']
assert split_command_line('foo bar') == ['foo', 'bar']
assert split_command_line(['foo bar']) == ['foo', 'bar']
def test_split_strings(self):
assert split_command_line('"foo"') == ['foo']
assert split_command_line('"foo bar"') == ['foo bar']
assert split_command_line(['"foo"']) == ['foo']
assert split_command_line('a "b c" d') == ['a', "b c", 'd']
assert split_command_line("'foo bar'") == ['foo bar']
def test_split_whitespace(self):
assert split_command_line('a b') == ['a', 'b']
assert split_command_line('a\tb') == ['a', 'b']
assert split_command_line('a\rb') == ['a', 'b']
assert split_command_line('a\nb') == ['a', 'b']
assert split_command_line('a \tb') == ['a', 'b']
| apache-2.0 |
mementum/backtrader | samples/sigsmacross/sigsmacross.py | 1 | 3835 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015-2020 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import datetime
import backtrader as bt
class SmaCross(bt.SignalStrategy):
params = dict(sma1=10, sma2=20)
def notify_order(self, order):
if not order.alive():
print('{} {} {}@{}'.format(
bt.num2date(order.executed.dt),
'buy' if order.isbuy() else 'sell',
order.executed.size,
order.executed.price)
)
def notify_trade(self, trade):
if trade.isclosed:
print('profit {}'.format(trade.pnlcomm))
def __init__(self):
sma1 = bt.ind.SMA(period=self.params.sma1)
sma2 = bt.ind.SMA(period=self.params.sma2)
crossover = bt.ind.CrossOver(sma1, sma2)
self.signal_add(bt.SIGNAL_LONG, crossover)
def runstrat(pargs=None):
args = parse_args(pargs)
cerebro = bt.Cerebro()
cerebro.broker.set_cash(args.cash)
data0 = bt.feeds.YahooFinanceData(
dataname=args.data,
fromdate=datetime.datetime.strptime(args.fromdate, '%Y-%m-%d'),
todate=datetime.datetime.strptime(args.todate, '%Y-%m-%d'))
cerebro.adddata(data0)
cerebro.addstrategy(SmaCross, **(eval('dict(' + args.strat + ')')))
cerebro.addsizer(bt.sizers.FixedSize, stake=args.stake)
cerebro.run()
if args.plot:
cerebro.plot(**(eval('dict(' + args.plot + ')')))
def parse_args(pargs=None):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='sigsmacross')
parser.add_argument('--data', required=False, default='YHOO',
help='Yahoo Ticker')
parser.add_argument('--fromdate', required=False, default='2011-01-01',
help='Ending date in YYYY-MM-DD format')
parser.add_argument('--todate', required=False, default='2012-12-31',
help='Ending date in YYYY-MM-DD format')
parser.add_argument('--cash', required=False, action='store', type=float,
default=10000, help=('Starting cash'))
parser.add_argument('--stake', required=False, action='store', type=int,
default=1, help=('Stake to apply'))
parser.add_argument('--strat', required=False, action='store', default='',
help=('Arguments for the strategy'))
parser.add_argument('--plot', '-p', nargs='?', required=False,
metavar='kwargs', const='{}',
help=('Plot the read data applying any kwargs passed\n'
'\n'
'For example:\n'
'\n'
' --plot style="candle" (to plot candles)\n'))
return parser.parse_args(pargs)
if __name__ == '__main__':
runstrat()
| gpl-3.0 |
cchanning/Impala | tests/common/failure_injector.py | 16 | 3806 | # Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# Failure injection module for the Impala service. There are two main ways this module
# can be used - the first is to initialize the failure injector and then call start()
# which will kick off a timer that chooses a random impalad/state store process
# to fail each time timer fires.
# The second way this module can be used to to initialize it and call the actions
# directly (ex. kill_random_impalad()). This provides a bit more control over exactly
# when a failure will happen and is useful for targeted test scenarios.
import logging
import os
import sys
import time
from tests.common.impala_cluster import *
from random import choice
from threading import Timer
logging.basicConfig(level=logging.INFO, format='%(threadName)s: %(message)s')
LOG = logging.getLogger('failure-injector')
# This class is used for injecting failures for the Impala service.
class FailureInjector(object):
def __init__(self, impala_cluster, failure_frequency, impalad_exclude_list=None):
"""
Initializes the FailureInjector object.
impala_cluster - An ImpalaCluster object (see the impala_cluster module)
failure_frequency - Interval to fire timer (in seconds)
impalad_exclude_list - A list of impalad host:port name to not inject failures
on. Useful to filter out the coordinator.
"""
self.cluster = impala_cluster
self.cluster.get_impala_service().set_process_auto_restart_config(value=True)
# TODO: Do we need to restart the impala service to apply this?
# self.cluster.get_impala_service().restart()
self.failure_frequency = failure_frequency
num_impalad_procs = len(self.cluster.get_impala_service().get_all_impalad_processes())
self.impalad_exclude_list = impalad_exclude_list
# Build a weighted list of possible actions. This is done using a trivial approach
# where we just add the item multiple times (weight value) into the action list.
# TODO: Provide a way to dynamically configure the weights
actions_with_weights = {self.kill_random_impalad: num_impalad_procs * 2,
self.kill_state_store: 1}
self.possible_actions = list()
for key, value in actions_with_weights.items():
self.possible_actions.extend([key] * value)
def start(self):
""" Starts the timer, triggering failures for the specified interval """
self.__start_timer()
def cancel(self):
""" Stops the timer, canceling any additional failures from occurring """
if self.__timer is not None:
self.__timer.cancel()
def kill_random_impalad(self):
""" Kills a randomly selected impalad instance not in the exlude list """
filtered_impalad = \
filter(lambda impalad: '%s:%d' % (impalad.hostname, impalad.be_port)\
not in self.impalad_exclude_list,
self.cluster.get_impala_service().get_all_impalad_processes())
self.kill_impalad(choice(filtered_impalad))
def kill_impalad(self, impalad):
""" Kills the specified impalad instance """
LOG.info('Chose impalad on "%s" to kill' % impalad.hostname)
impalad.kill()
def kill_state_store(self):
""" Kills the statestore process """
state_store = self.cluster.get_impala_service().get_state_store_process()
LOG.info('Chose statestore on "%s" to kill' % state_store.hostname)
state_store.kill()
def __start_timer(self):
""" Starts a new timer, cancelling the previous timer if it is running """
self.cancel()
self.__timer = Timer(self.failure_frequency, self.__choose_action)
self.__timer.start()
def __choose_action(self):
""" Chooses a failure action to perform """
action = choice(self.possible_actions)
LOG.info('Executing action: %s' % action)
action()
self.__start_timer()
| apache-2.0 |
parthea/pydatalab | google/datalab/bigquery/_query.py | 4 | 13135 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Implements Query BigQuery API."""
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import object
import google.datalab
import google.datalab.data
import google.datalab.utils
from ._query_output import QueryOutput
from . import _api
from . import _query_job
from . import _udf
from . import _utils
from . import _external_data_source
class Query(object):
"""Represents a Query object that encapsulates a BigQuery SQL query.
This object can be used to execute SQL queries and retrieve results.
"""
def __init__(self, sql, env=None, udfs=None, data_sources=None, subqueries=None):
"""Initializes an instance of a Query object.
Args:
sql: the BigQuery SQL query string to execute
env: a dictionary containing objects from the query execution context, used to get references
to UDFs, subqueries, and external data sources referenced by the query
udfs: list of UDFs names referenced in the SQL, or dictionary of names and UDF objects
data_sources: list of external data sources names referenced in the SQL, or dictionary of
names and data source objects
subqueries: list of subqueries names referenced in the SQL, or dictionary of names and
Query objects
Raises:
Exception if expansion of any variables failed.
"""
self._sql = sql
self._udfs = []
self._subqueries = []
self._data_sources = []
self._env = env or {}
# Validate given list or dictionary of objects that they are of correct type
# and add them to the target dictionary
def _expand_objects(obj_container, obj_type, target_list):
for item in obj_container:
# for a list of objects, we should find these objects in the given environment
if isinstance(obj_container, list):
value = self._env.get(item)
if value is None:
raise Exception('Cannot find object %s' % item)
# for a dictionary of objects, each pair must be a string and object of the expected type
elif isinstance(obj_container, dict):
value = obj_container[item]
if not isinstance(value, obj_type):
raise Exception('Expected type: %s, found: %s.' % (obj_type, type(value)))
else:
raise Exception('Unexpected container for type %s. Expected a list or dictionary'
% obj_type)
target_list.append((item, value))
if subqueries:
_expand_objects(subqueries, Query, self._subqueries)
if udfs:
_expand_objects(udfs, _udf.UDF, self._udfs)
if data_sources:
_expand_objects(data_sources, _external_data_source.ExternalDataSource, self._data_sources)
if len(self._data_sources) > 1:
raise Exception('Only one temporary external datasource is supported in queries.')
@staticmethod
def from_view(view):
""" Return a Query for the given View object
Args:
view: the View object to construct a Query out of
Returns:
A Query object with the same sql as the given View object
"""
return Query('SELECT * FROM %s' % view._repr_sql_())
@staticmethod
def from_table(table, fields=None):
""" Return a Query for the given Table object
Args:
table: the Table object to construct a Query out of
fields: the fields to return. If None, all fields will be returned. This can be a string
which will be injected into the Query after SELECT, or a list of field names.
Returns:
A Query object that will return the specified fields from the records in the Table.
"""
if fields is None:
fields = '*'
elif isinstance(fields, list):
fields = ','.join(fields)
return Query('SELECT %s FROM %s' % (fields, table._repr_sql_()))
def _expanded_sql(self, sampling=None):
"""Get the expanded SQL of this object, including all subqueries, UDFs, and external datasources
Returns:
The expanded SQL string of this object
"""
# use lists to preserve the order of subqueries, bigquery will not like listing subqueries
# out of order if they depend on each other. for example. the following will be rejected:
# WITH q2 as (SELECT * FROM q1),
# q1 as (SELECT * FROM mytable),
# SELECT * FROM q2
# so when we're getting the dependencies, use recursion into a list to maintain the order
udfs = []
subqueries = []
expanded_sql = ''
def _recurse_subqueries(query):
"""Recursively scan subqueries and add their pieces to global scope udfs and subqueries
"""
if query._subqueries:
for subquery in query._subqueries:
_recurse_subqueries(subquery[1])
subqueries.extend([s for s in query._subqueries if s not in subqueries])
if query._udfs:
# query._udfs is a list of (name, UDF) tuples; we just want the UDF.
udfs.extend([u[1] for u in query._udfs if u[1] not in udfs])
_recurse_subqueries(self)
if udfs:
expanded_sql += '\n'.join([udf._expanded_sql() for udf in udfs])
expanded_sql += '\n'
def _indent_query(subquery):
return ' ' + subquery._sql.replace('\n', '\n ')
if subqueries:
expanded_sql += 'WITH ' + \
'\n),\n'.join(['%s AS (\n%s' % (sq[0], _indent_query(sq[1]))
for sq in subqueries])
expanded_sql += '\n)\n\n'
expanded_sql += sampling(self._sql) if sampling else self._sql
return expanded_sql
def _repr_sql_(self):
"""Creates a SQL representation of this object.
Returns:
The SQL representation to use when embedding this object into other SQL.
"""
return '(%s)' % self.sql
def __repr__(self):
"""Creates a friendly representation of this object.
Returns:
The friendly representation of this object (the unmodified SQL).
"""
return 'BigQuery Query - %s' % self._sql
@property
def sql(self):
""" Get the SQL for the query. """
return self._expanded_sql()
@property
def udfs(self):
""" Get a dictionary of UDFs referenced by the query."""
return dict(self._udfs)
@property
def subqueries(self):
""" Get a dictionary of subqueries referenced by the query."""
return dict(self._subqueries)
@property
def data_sources(self):
""" Get a dictionary of external data sources referenced by the query."""
return dict(self._data_sources)
def dry_run(self, context=None, query_params=None):
"""Dry run a query, to check the validity of the query and return some useful statistics.
Args:
context: an optional Context object providing project_id and credentials. If a specific
project id or credentials are unspecified, the default ones configured at the global
level are used.
query_params: a dictionary containing query parameter types and values, passed to BigQuery.
Returns:
A dict with 'cacheHit' and 'totalBytesProcessed' fields.
Raises:
An exception if the query was malformed.
"""
context = context or google.datalab.Context.default()
api = _api.Api(context)
try:
query_result = api.jobs_insert_query(self.sql, dry_run=True,
table_definitions=self.data_sources,
query_params=query_params)
except Exception as e:
raise e
return query_result['statistics']['query']
def execute_async(self, output_options=None, sampling=None, context=None, query_params=None):
""" Initiate the query and return a QueryJob.
Args:
output_options: a QueryOutput object describing how to execute the query
sampling: sampling function to use. No sampling is done if None. See bigquery.Sampling
context: an optional Context object providing project_id and credentials. If a specific
project id or credentials are unspecified, the default ones configured at the global
level are used.
query_params: a dictionary containing query parameter types and values, passed to BigQuery.
Returns:
A Job object that can wait on creating a table or exporting to a file
If the output is a table, the Job object additionally has run statistics
and query results
Raises:
Exception if query could not be executed.
"""
# Default behavior is to execute to a table
if output_options is None:
output_options = QueryOutput.table()
# First, execute the query into a table, using a temporary one if no name is specified
batch = output_options.priority == 'low'
append = output_options.table_mode == 'append'
overwrite = output_options.table_mode == 'overwrite'
table_name = output_options.table_name
context = context or google.datalab.Context.default()
api = _api.Api(context)
if table_name is not None:
table_name = _utils.parse_table_name(table_name, api.project_id)
sql = self._expanded_sql(sampling)
try:
query_result = api.jobs_insert_query(sql, table_name=table_name,
append=append, overwrite=overwrite, batch=batch,
use_cache=output_options.use_cache,
allow_large_results=output_options.allow_large_results,
table_definitions=self.data_sources,
query_params=query_params)
except Exception as e:
raise e
if 'jobReference' not in query_result:
raise Exception('Unexpected response from server')
job_id = query_result['jobReference']['jobId']
if not table_name:
try:
destination = query_result['configuration']['query']['destinationTable']
table_name = (destination['projectId'], destination['datasetId'], destination['tableId'])
except KeyError:
# The query was in error
raise Exception(_utils.format_query_errors(query_result['status']['errors']))
execute_job = _query_job.QueryJob(job_id, table_name, sql, context=context)
# If all we need is to execute the query to a table, we're done
if output_options.type == 'table':
return execute_job
# Otherwise, build an async Job that waits on the query execution then carries out
# the specific export operation
else:
export_args = export_kwargs = None
if output_options.type == 'file':
if output_options.file_path.startswith('gs://'):
export_func = execute_job.result().extract
export_args = [output_options.file_path]
export_kwargs = {
'format': output_options.file_format,
'csv_delimiter': output_options.csv_delimiter,
'csv_header': output_options.csv_header,
'compress': output_options.compress_file
}
else:
export_func = execute_job.result().to_file
export_args = [output_options.file_path]
export_kwargs = {
'format': output_options.file_format,
'csv_delimiter': output_options.csv_delimiter,
'csv_header': output_options.csv_header
}
elif output_options.type == 'dataframe':
export_func = execute_job.result().to_dataframe
export_args = []
export_kwargs = {
'start_row': output_options.dataframe_start_row,
'max_rows': output_options.dataframe_max_rows
}
# Perform the export operation with the specified parameters
export_func = google.datalab.utils.async_function(export_func)
return export_func(*export_args, **export_kwargs)
def execute(self, output_options=None, sampling=None, context=None, query_params=None):
""" Initiate the query and return a QueryJob.
Args:
output_options: a QueryOutput object describing how to execute the query
sampling: sampling function to use. No sampling is done if None. See bigquery.Sampling
context: an optional Context object providing project_id and credentials. If a specific
project id or credentials are unspecified, the default ones configured at the global
level are used.
Returns:
A Job object that can be used to get the query results, or export to a file or dataframe
Raises:
Exception if query could not be executed.
"""
return self.execute_async(output_options, sampling=sampling, context=context,
query_params=query_params).wait()
| apache-2.0 |
kmacinnis/sympy | sympy/assumptions/tests/test_context.py | 126 | 1153 | from sympy.assumptions import ask, Q
from sympy.assumptions.assume import assuming, global_assumptions
from sympy.abc import x, y
def test_assuming():
with assuming(Q.integer(x)):
assert ask(Q.integer(x))
assert not ask(Q.integer(x))
def test_assuming_nested():
assert not ask(Q.integer(x))
assert not ask(Q.integer(y))
with assuming(Q.integer(x)):
assert ask(Q.integer(x))
assert not ask(Q.integer(y))
with assuming(Q.integer(y)):
assert ask(Q.integer(x))
assert ask(Q.integer(y))
assert ask(Q.integer(x))
assert not ask(Q.integer(y))
assert not ask(Q.integer(x))
assert not ask(Q.integer(y))
def test_finally():
try:
with assuming(Q.integer(x)):
1/0
except ZeroDivisionError:
pass
assert not ask(Q.integer(x))
def test_remove_safe():
global_assumptions.add(Q.integer(x))
with assuming():
assert ask(Q.integer(x))
global_assumptions.remove(Q.integer(x))
assert not ask(Q.integer(x))
assert ask(Q.integer(x))
global_assumptions.clear() # for the benefit of other tests
| bsd-3-clause |
lodemo/CATANA | src/face_recognition/youtube_dl/extractor/ina.py | 55 | 1062 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class InaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ina\.fr/video/(?P<id>I?[A-Z0-9]+)'
_TEST = {
'url': 'http://www.ina.fr/video/I12055569/francois-hollande-je-crois-que-c-est-clair-video.html',
'md5': 'a667021bf2b41f8dc6049479d9bb38a3',
'info_dict': {
'id': 'I12055569',
'ext': 'mp4',
'title': 'François Hollande "Je crois que c\'est clair"',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
mrss_url = 'http://player.ina.fr/notices/%s.mrss' % video_id
info_doc = self._download_xml(mrss_url, video_id)
self.report_extraction(video_id)
video_url = info_doc.find('.//{http://search.yahoo.com/mrss/}player').attrib['url']
return {
'id': video_id,
'url': video_url,
'title': info_doc.find('.//title').text,
}
| mit |
cmyr/keras | keras/preprocessing/sequence.py | 4 | 4010 | from __future__ import absolute_import
# -*- coding: utf-8 -*-
import numpy as np
import random
from six.moves import range
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='pre'):
"""
Pad each sequence to the same length:
the length of the longuest sequence.
If maxlen is provided, any sequence longer
than maxlen is truncated to maxlen.
Support post-padding and pre-padding (default).
"""
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
x = np.zeros((nb_samples, maxlen)).astype(dtype)
for idx, s in enumerate(sequences):
if padding == 'post':
x[idx, :lengths[idx]] = s[:maxlen]
else:
x[idx, -min(maxlen, lengths[idx]):] = s[:maxlen]
return x
def make_sampling_table(size, sampling_factor=1e-5):
'''
This generates an array where the ith element
is the probability that a word of rank i would be sampled,
according to the sampling distribution used in word2vec.
The word2vec formula is:
p(word) = min(1, sqrt(word.frequency/sampling_factor) / (word.frequency/sampling_factor))
We assume that the word frequencies follow Zipf's law (s=1) to derive
a numerical approximation of frequency(rank):
frequency(rank) ~ 1/(rank * (log(rank) + gamma) + 1/2 - 1/(12*rank))
where gamma is the Euler-Mascheroni constant.
'''
gamma = 0.577
rank = np.array(list(range(size)))
rank[0] = 1
inv_fq = rank * (np.log(rank) + gamma) + 0.5 - 1./(12.*rank)
f = sampling_factor * inv_fq
return np.minimum(1., f / np.sqrt(f))
def skipgrams(sequence, vocabulary_size,
window_size=4, negative_samples=1., shuffle=True,
categorical=False, sampling_table=None):
'''
Take a sequence (list of indexes of words),
returns couples of [word_index, other_word index] and labels (1s or 0s),
where label = 1 if 'other_word' belongs to the context of 'word',
and label=0 if 'other_word' is ramdomly sampled
@param vocabulary_size: int. maximum possible word index + 1
@param window_size: int. actually half-window. The window of a word wi will be [i-window_size, i+window_size+1]
@param negative_samples: float >= 0. 0 for no negative (=random) samples. 1 for same number as positive samples. etc.
@param categorical: bool. if False, labels will be integers (eg. [0, 1, 1 .. ]),
if True labels will be categorical eg. [[1,0],[0,1],[0,1] .. ]
Note: by convention, index 0 in the vocabulary is a non-word and will be skipped.
'''
couples = []
labels = []
for i, wi in enumerate(sequence):
if not wi:
continue
if sampling_table is not None:
if sampling_table[wi] < random.random():
continue
window_start = max(0, i-window_size)
window_end = min(len(sequence), i+window_size+1)
for j in range(window_start, window_end):
if j != i:
wj = sequence[j]
if not wj:
continue
couples.append([wi, wj])
if categorical:
labels.append([0,1])
else:
labels.append(1)
if negative_samples > 0:
nb_negative_samples = int(len(labels) * negative_samples)
words = [c[0] for c in couples]
random.shuffle(words)
couples += [[words[i%len(words)], random.randint(1, vocabulary_size-1)] for i in range(nb_negative_samples)]
if categorical:
labels += [[1,0]]*nb_negative_samples
else:
labels += [0]*nb_negative_samples
if shuffle:
seed = random.randint(0,10e6)
random.seed(seed)
random.shuffle(couples)
random.seed(seed)
random.shuffle(labels)
return couples, labels
| mit |
GenericMappingTools/gmt-python | pygmt/tests/test_helpers.py | 1 | 3568 | """
Tests the helper functions/classes/etc used in wrapping GMT.
"""
import os
import numpy as np
import pytest
from pygmt.exceptions import GMTInvalidInput
from pygmt.helpers import (
GMTTempFile,
args_in_kwargs,
data_kind,
kwargs_to_strings,
unique_name,
)
@pytest.mark.parametrize(
"data,x,y",
[
(None, None, None),
("data.txt", np.array([1, 2]), np.array([4, 5])),
("data.txt", np.array([1, 2]), None),
("data.txt", None, np.array([4, 5])),
(None, np.array([1, 2]), None),
(None, None, np.array([4, 5])),
],
)
def test_data_kind_fails(data, x, y):
"""
Make sure data_kind raises exceptions when it should.
"""
with pytest.raises(GMTInvalidInput):
data_kind(data=data, x=x, y=y)
def test_unique_name():
"""
Make sure the names are really unique.
"""
names = [unique_name() for i in range(100)]
assert len(names) == len(set(names))
def test_kwargs_to_strings_fails():
"""
Make sure it fails for invalid conversion types.
"""
with pytest.raises(GMTInvalidInput):
kwargs_to_strings(bla="blablabla")
def test_gmttempfile():
"""
Check that file is really created and deleted.
"""
with GMTTempFile() as tmpfile:
assert os.path.exists(tmpfile.name)
# File should be deleted when leaving the with block
assert not os.path.exists(tmpfile.name)
def test_gmttempfile_unique():
"""
Check that generating multiple files creates unique names.
"""
with GMTTempFile() as tmp1:
with GMTTempFile() as tmp2:
with GMTTempFile() as tmp3:
assert tmp1.name != tmp2.name != tmp3.name
def test_gmttempfile_prefix_suffix():
"""
Make sure the prefix and suffix of temporary files are user specifiable.
"""
with GMTTempFile() as tmpfile:
assert os.path.basename(tmpfile.name).startswith("pygmt-")
assert os.path.basename(tmpfile.name).endswith(".txt")
with GMTTempFile(prefix="user-prefix-") as tmpfile:
assert os.path.basename(tmpfile.name).startswith("user-prefix-")
assert os.path.basename(tmpfile.name).endswith(".txt")
with GMTTempFile(suffix=".log") as tmpfile:
assert os.path.basename(tmpfile.name).startswith("pygmt-")
assert os.path.basename(tmpfile.name).endswith(".log")
with GMTTempFile(prefix="user-prefix-", suffix=".log") as tmpfile:
assert os.path.basename(tmpfile.name).startswith("user-prefix-")
assert os.path.basename(tmpfile.name).endswith(".log")
def test_gmttempfile_read():
"""
Make sure GMTTempFile.read() works.
"""
with GMTTempFile() as tmpfile:
with open(tmpfile.name, "w") as ftmp:
ftmp.write("in.dat: N = 2\t<1/3>\t<2/4>\n")
assert tmpfile.read() == "in.dat: N = 2 <1/3> <2/4>\n"
assert tmpfile.read(keep_tabs=True) == "in.dat: N = 2\t<1/3>\t<2/4>\n"
def test_args_in_kwargs():
"""
Test that args_in_kwargs function returns correct Boolean responses.
"""
kwargs = {"A": 1, "B": 2, "C": 3}
# Passing list of arguments with passing values in the beginning
passing_args_1 = ["B", "C", "D"]
assert args_in_kwargs(args=passing_args_1, kwargs=kwargs)
# Passing list of arguments that starts with failing arguments
passing_args_2 = ["D", "X", "C"]
assert args_in_kwargs(args=passing_args_2, kwargs=kwargs)
# Failing list of arguments
failing_args = ["D", "E", "F"]
assert not args_in_kwargs(args=failing_args, kwargs=kwargs)
| bsd-3-clause |
linktlh/Toontown-journey | toontown/dna/DNALandmarkBuilding.py | 3 | 2519 | from panda3d.core import LVector4f
import DNANode
import DNAUtil
import DNAError
class DNALandmarkBuilding(DNANode.DNANode):
COMPONENT_CODE = 13
def __init__(self, name):
DNANode.DNANode.__init__(self, name)
self.code = ''
self.wallColor = LVector4f(1, 1, 1, 1)
self.title = ''
self.article = ''
self.buildingType = ''
self.door = None
def setArticle(self, article):
self.article = article
def getArticle(self):
return self.article
def setBuildingType(self, buildingType):
self.buildingType = buildingType
def getBuildingType(self):
return self.buildingType
def setTitle(self, title):
self.title = title
def getTitle(self):
return self.title
def getCode(self):
return self.code
def setCode(self, code):
self.code = code
def setWallColor(self, color):
self.wallColor = color
def getWallColor(self):
return self.wallColor
def setupSuitBuildingOrigin(self, nodePathA, nodePathB):
if (self.getName()[:2] == 'tb') and (self.getName()[3].isdigit()) and (self.getName().find(':') != -1):
name = self.getName()
name = 's' + name[1:]
node = nodePathB.find('**/*suit_building_origin')
if node.isEmpty():
node = nodePathA.attachNewNode(name)
node.setPosHprScale(self.getPos(), self.getHpr(), self.getScale())
else:
node.wrtReparentTo(nodePathA, 0)
node.setName(name)
def makeFromDGI(self, dgi):
DNANode.DNANode.makeFromDGI(self, dgi)
self.code = DNAUtil.dgiExtractString8(dgi)
self.wallColor = DNAUtil.dgiExtractColor(dgi)
self.title = DNAUtil.dgiExtractString8(dgi)
self.article = DNAUtil.dgiExtractString8(dgi)
self.buildingType = DNAUtil.dgiExtractString8(dgi)
def traverse(self, nodePath, dnaStorage):
node = dnaStorage.findNode(self.code)
if node is None:
raise DNAError.DNAError('DNALandmarkBuilding code ' + self.code + ' not found in DNAStorage')
npA = nodePath
nodePath = node.copyTo(nodePath, 0)
nodePath.setName(self.getName())
nodePath.setPosHprScale(self.getPos(), self.getHpr(), self.getScale())
self.setupSuitBuildingOrigin(npA, nodePath)
for child in self.children:
child.traverse(nodePath, dnaStorage)
nodePath.flattenStrong() | apache-2.0 |
mvaled/gunicorn | gunicorn/selectors.py | 107 | 18997 | """Selectors module.
This module allows high-level and efficient I/O multiplexing, built upon the
`select` module primitives.
The following code adapted from trollius.selectors.
"""
from abc import ABCMeta, abstractmethod
from collections import namedtuple, Mapping
import math
import select
import sys
from gunicorn._compat import wrap_error, InterruptedError
from gunicorn import six
# generic events, that must be mapped to implementation-specific ones
EVENT_READ = (1 << 0)
EVENT_WRITE = (1 << 1)
def _fileobj_to_fd(fileobj):
"""Return a file descriptor from a file object.
Parameters:
fileobj -- file object or file descriptor
Returns:
corresponding file descriptor
Raises:
ValueError if the object is invalid
"""
if isinstance(fileobj, six.integer_types):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid file object: "
"{0!r}".format(fileobj))
if fd < 0:
raise ValueError("Invalid file descriptor: {0}".format(fd))
return fd
SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
"""Object used to associate a file object to its backing file descriptor,
selected event mask and attached data."""
class _SelectorMapping(Mapping):
"""Mapping of file objects to selector keys."""
def __init__(self, selector):
self._selector = selector
def __len__(self):
return len(self._selector._fd_to_key)
def __getitem__(self, fileobj):
try:
fd = self._selector._fileobj_lookup(fileobj)
return self._selector._fd_to_key[fd]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
def __iter__(self):
return iter(self._selector._fd_to_key)
class BaseSelector(six.with_metaclass(ABCMeta)):
"""Selector abstract base class.
A selector supports registering file objects to be monitored for specific
I/O events.
A file object is a file descriptor or any object with a `fileno()` method.
An arbitrary object can be attached to the file object, which can be used
for example to store context information, a callback, etc.
A selector can use various implementations (select(), poll(), epoll()...)
depending on the platform. The default `Selector` class uses the most
efficient implementation on the current platform.
"""
@abstractmethod
def register(self, fileobj, events, data=None):
"""Register a file object.
Parameters:
fileobj -- file object or file descriptor
events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE)
data -- attached data
Returns:
SelectorKey instance
Raises:
ValueError if events is invalid
KeyError if fileobj is already registered
OSError if fileobj is closed or otherwise is unacceptable to
the underlying system call (if a system call is made)
Note:
OSError may or may not be raised
"""
raise NotImplementedError
@abstractmethod
def unregister(self, fileobj):
"""Unregister a file object.
Parameters:
fileobj -- file object or file descriptor
Returns:
SelectorKey instance
Raises:
KeyError if fileobj is not registered
Note:
If fileobj is registered but has since been closed this does
*not* raise OSError (even if the wrapped syscall does)
"""
raise NotImplementedError
def modify(self, fileobj, events, data=None):
"""Change a registered file object monitored events or attached data.
Parameters:
fileobj -- file object or file descriptor
events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE)
data -- attached data
Returns:
SelectorKey instance
Raises:
Anything that unregister() or register() raises
"""
self.unregister(fileobj)
return self.register(fileobj, events, data)
@abstractmethod
def select(self, timeout=None):
"""Perform the actual selection, until some monitored file objects are
ready or a timeout expires.
Parameters:
timeout -- if timeout > 0, this specifies the maximum wait time, in
seconds
if timeout <= 0, the select() call won't block, and will
report the currently ready file objects
if timeout is None, select() will block until a monitored
file object becomes ready
Returns:
list of (key, events) for ready file objects
`events` is a bitwise mask of EVENT_READ|EVENT_WRITE
"""
raise NotImplementedError
def close(self):
"""Close the selector.
This must be called to make sure that any underlying resource is freed.
"""
pass
def get_key(self, fileobj):
"""Return the key associated to a registered file object.
Returns:
SelectorKey for this file object
"""
mapping = self.get_map()
try:
return mapping[fileobj]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
@abstractmethod
def get_map(self):
"""Return a mapping of file objects to selector keys."""
raise NotImplementedError
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
class _BaseSelectorImpl(BaseSelector):
"""Base selector implementation."""
def __init__(self):
# this maps file descriptors to keys
self._fd_to_key = {}
# read-only mapping returned by get_map()
self._map = _SelectorMapping(self)
def _fileobj_lookup(self, fileobj):
"""Return a file descriptor from a file object.
This wraps _fileobj_to_fd() to do an exhaustive search in case
the object is invalid but we still have it in our map. This
is used by unregister() so we can unregister an object that
was previously registered even if it is closed. It is also
used by _SelectorMapping.
"""
try:
return _fileobj_to_fd(fileobj)
except ValueError:
# Do an exhaustive search.
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
return key.fd
# Raise ValueError after all.
raise
def register(self, fileobj, events, data=None):
if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
raise ValueError("Invalid events: {0!r}".format(events))
key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
if key.fd in self._fd_to_key:
raise KeyError("{0!r} (FD {1}) is already registered"
.format(fileobj, key.fd))
self._fd_to_key[key.fd] = key
return key
def unregister(self, fileobj):
try:
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
return key
def modify(self, fileobj, events, data=None):
# TODO: Subclasses can probably optimize this even further.
try:
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
if events != key.events:
self.unregister(fileobj)
key = self.register(fileobj, events, data)
elif data != key.data:
# Use a shortcut to update the data.
key = key._replace(data=data)
self._fd_to_key[key.fd] = key
return key
def close(self):
self._fd_to_key.clear()
def get_map(self):
return self._map
def _key_from_fd(self, fd):
"""Return the key associated to a given file descriptor.
Parameters:
fd -- file descriptor
Returns:
corresponding key, or None if not found
"""
try:
return self._fd_to_key[fd]
except KeyError:
return None
class SelectSelector(_BaseSelectorImpl):
"""Select-based selector."""
def __init__(self):
super(SelectSelector, self).__init__()
self._readers = set()
self._writers = set()
def register(self, fileobj, events, data=None):
key = super(SelectSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
self._readers.add(key.fd)
if events & EVENT_WRITE:
self._writers.add(key.fd)
return key
def unregister(self, fileobj):
key = super(SelectSelector, self).unregister(fileobj)
self._readers.discard(key.fd)
self._writers.discard(key.fd)
return key
if sys.platform == 'win32':
def _select(self, r, w, _, timeout=None):
r, w, x = select.select(r, w, w, timeout)
return r, w + x, []
else:
_select = select.select
def select(self, timeout=None):
timeout = None if timeout is None else max(timeout, 0)
ready = []
try:
r, w, _ = wrap_error(self._select,
self._readers, self._writers, [], timeout)
except InterruptedError:
return ready
r = set(r)
w = set(w)
for fd in r | w:
events = 0
if fd in r:
events |= EVENT_READ
if fd in w:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
if hasattr(select, 'poll'):
class PollSelector(_BaseSelectorImpl):
"""Poll-based selector."""
def __init__(self):
super(PollSelector, self).__init__()
self._poll = select.poll()
def register(self, fileobj, events, data=None):
key = super(PollSelector, self).register(fileobj, events, data)
poll_events = 0
if events & EVENT_READ:
poll_events |= select.POLLIN
if events & EVENT_WRITE:
poll_events |= select.POLLOUT
self._poll.register(key.fd, poll_events)
return key
def unregister(self, fileobj):
key = super(PollSelector, self).unregister(fileobj)
self._poll.unregister(key.fd)
return key
def select(self, timeout=None):
if timeout is None:
timeout = None
elif timeout <= 0:
timeout = 0
else:
# poll() has a resolution of 1 millisecond, round away from
# zero to wait *at least* timeout seconds.
timeout = int(math.ceil(timeout * 1e3))
ready = []
try:
fd_event_list = wrap_error(self._poll.poll, timeout)
except InterruptedError:
return ready
for fd, event in fd_event_list:
events = 0
if event & ~select.POLLIN:
events |= EVENT_WRITE
if event & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
if hasattr(select, 'epoll'):
class EpollSelector(_BaseSelectorImpl):
"""Epoll-based selector."""
def __init__(self):
super(EpollSelector, self).__init__()
self._epoll = select.epoll()
def fileno(self):
return self._epoll.fileno()
def register(self, fileobj, events, data=None):
key = super(EpollSelector, self).register(fileobj, events, data)
epoll_events = 0
if events & EVENT_READ:
epoll_events |= select.EPOLLIN
if events & EVENT_WRITE:
epoll_events |= select.EPOLLOUT
self._epoll.register(key.fd, epoll_events)
return key
def unregister(self, fileobj):
key = super(EpollSelector, self).unregister(fileobj)
try:
self._epoll.unregister(key.fd)
except OSError:
# This can happen if the FD was closed since it
# was registered.
pass
return key
def select(self, timeout=None):
if timeout is None:
timeout = -1
elif timeout <= 0:
timeout = 0
else:
# epoll_wait() has a resolution of 1 millisecond, round away
# from zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3) * 1e-3
max_ev = len(self._fd_to_key)
ready = []
try:
fd_event_list = wrap_error(self._epoll.poll, timeout, max_ev)
except InterruptedError:
return ready
for fd, event in fd_event_list:
events = 0
if event & ~select.EPOLLIN:
events |= EVENT_WRITE
if event & ~select.EPOLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._epoll.close()
super(EpollSelector, self).close()
if hasattr(select, 'devpoll'):
class DevpollSelector(_BaseSelectorImpl):
"""Solaris /dev/poll selector."""
def __init__(self):
super(DevpollSelector, self).__init__()
self._devpoll = select.devpoll()
def fileno(self):
return self._devpoll.fileno()
def register(self, fileobj, events, data=None):
key = super(DevpollSelector, self).register(fileobj, events, data)
poll_events = 0
if events & EVENT_READ:
poll_events |= select.POLLIN
if events & EVENT_WRITE:
poll_events |= select.POLLOUT
self._devpoll.register(key.fd, poll_events)
return key
def unregister(self, fileobj):
key = super(DevpollSelector, self).unregister(fileobj)
self._devpoll.unregister(key.fd)
return key
def select(self, timeout=None):
if timeout is None:
timeout = None
elif timeout <= 0:
timeout = 0
else:
# devpoll() has a resolution of 1 millisecond, round away from
# zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3)
ready = []
try:
fd_event_list = self._devpoll.poll(timeout)
except InterruptedError:
return ready
for fd, event in fd_event_list:
events = 0
if event & ~select.POLLIN:
events |= EVENT_WRITE
if event & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._devpoll.close()
super(DevpollSelector, self).close()
if hasattr(select, 'kqueue'):
class KqueueSelector(_BaseSelectorImpl):
"""Kqueue-based selector."""
def __init__(self):
super(KqueueSelector, self).__init__()
self._kqueue = select.kqueue()
def fileno(self):
return self._kqueue.fileno()
def register(self, fileobj, events, data=None):
key = super(KqueueSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
kev = select.kevent(key.fd, select.KQ_FILTER_READ,
select.KQ_EV_ADD)
self._kqueue.control([kev], 0, 0)
if events & EVENT_WRITE:
kev = select.kevent(key.fd, select.KQ_FILTER_WRITE,
select.KQ_EV_ADD)
self._kqueue.control([kev], 0, 0)
return key
def unregister(self, fileobj):
key = super(KqueueSelector, self).unregister(fileobj)
if key.events & EVENT_READ:
kev = select.kevent(key.fd, select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
try:
self._kqueue.control([kev], 0, 0)
except OSError:
# This can happen if the FD was closed since it
# was registered.
pass
if key.events & EVENT_WRITE:
kev = select.kevent(key.fd, select.KQ_FILTER_WRITE,
select.KQ_EV_DELETE)
try:
self._kqueue.control([kev], 0, 0)
except OSError:
# See comment above.
pass
return key
def select(self, timeout=None):
timeout = None if timeout is None else max(timeout, 0)
max_ev = len(self._fd_to_key)
ready = []
try:
kev_list = wrap_error(self._kqueue.control,
None, max_ev, timeout)
except InterruptedError:
return ready
for kev in kev_list:
fd = kev.ident
flag = kev.filter
events = 0
if flag == select.KQ_FILTER_READ:
events |= EVENT_READ
if flag == select.KQ_FILTER_WRITE:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._kqueue.close()
super(KqueueSelector, self).close()
# Choose the best implementation: roughly, epoll|kqueue|devpoll > poll > select.
# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
if 'KqueueSelector' in globals():
DefaultSelector = KqueueSelector
elif 'EpollSelector' in globals():
DefaultSelector = EpollSelector
elif 'DevpollSelector' in globals():
DefaultSelector = DevpollSelector
elif 'PollSelector' in globals():
DefaultSelector = PollSelector
else:
DefaultSelector = SelectSelector
| mit |
xiangel/hue | desktop/core/ext-py/Babel-0.9.6/babel/messages/tests/data/setup.py | 42 | 1045 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4 ts=4 fenc=utf-8
# =============================================================================
# $Id: setup.py 114 2007-06-14 21:17:14Z palgarvio $
# =============================================================================
# $URL: http://svn.edgewall.org/repos/babel/tags/0.9.6/babel/messages/tests/data/setup.py $
# $LastChangedDate: 2007-06-14 23:17:14 +0200 (do, 14 jun 2007) $
# $Rev: 114 $
# $LastChangedBy: palgarvio $
# =============================================================================
# Copyright (C) 2006 Ufsoft.org - Pedro Algarvio <[email protected]>
#
# Please view LICENSE for additional licensing information.
# =============================================================================
# THIS IS A BOGUS PROJECT
from setuptools import setup, find_packages
setup(
name = 'TestProject',
version = '0.1',
license = 'BSD',
author = 'Foo Bar',
author_email = '[email protected]',
packages = find_packages(),
)
| apache-2.0 |
rabitt/mir_eval | evaluators/beat_eval.py | 4 | 1856 | #!/usr/bin/env python
'''
CREATED:2014-01-24 12:42:43 by Brian McFee <[email protected]>
Compute beat evaluation metrics
Usage:
./beat_eval.py REFERENCE.TXT ESTIMATED.TXT
'''
from __future__ import print_function
import argparse
import sys
import os
import eval_utilities
import mir_eval
def process_arguments():
'''Argparse function to get the program parameters'''
parser = argparse.ArgumentParser(description='mir_eval beat detection '
'evaluation')
parser.add_argument('-o',
dest='output_file',
default=None,
type=str,
action='store',
help='Store results in json format')
parser.add_argument('reference_file',
action='store',
help='path to the reference annotation file')
parser.add_argument('estimated_file',
action='store',
help='path to the estimated annotation file')
return vars(parser.parse_args(sys.argv[1:]))
if __name__ == '__main__':
# Get the parameters
parameters = process_arguments()
# Load in data
reference_beats = mir_eval.io.load_events(parameters['reference_file'])
estimated_beats = mir_eval.io.load_events(parameters['estimated_file'])
# Compute all the scores
scores = mir_eval.beat.evaluate(reference_beats, estimated_beats)
print("{} vs. {}".format(os.path.basename(parameters['reference_file']),
os.path.basename(parameters['estimated_file'])))
eval_utilities.print_evaluation(scores)
if parameters['output_file']:
print('Saving results to: ', parameters['output_file'])
eval_utilities.save_results(scores, parameters['output_file'])
| mit |
LukeM12/samba | source4/scripting/devel/speedtest.py | 31 | 8527 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Unix SMB/CIFS implementation.
# This speed test aims to show difference in execution time for bulk
# creation of user objects. This will help us compare
# Samba4 vs MS Active Directory performance.
# Copyright (C) Zahari Zahariev <[email protected]> 2010
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import optparse
import sys
import time
import base64
from decimal import Decimal
sys.path.insert(0, "bin/python")
import samba
samba.ensure_external_module("testtools", "testtools")
samba.ensure_external_module("subunit", "subunit/python")
import samba.getopt as options
from ldb import (
SCOPE_BASE, SCOPE_SUBTREE, LdbError, ERR_NO_SUCH_OBJECT,
ERR_UNWILLING_TO_PERFORM, ERR_INSUFFICIENT_ACCESS_RIGHTS)
from samba.ndr import ndr_pack, ndr_unpack
from samba.dcerpc import security
from samba.auth import system_session
from samba import gensec, sd_utils
from samba.samdb import SamDB
from samba.credentials import Credentials
import samba.tests
from samba.tests import delete_force
from subunit.run import SubunitTestRunner
import unittest
parser = optparse.OptionParser("speedtest.py [options] <host>")
sambaopts = options.SambaOptions(parser)
parser.add_option_group(sambaopts)
parser.add_option_group(options.VersionOptions(parser))
# use command line creds if available
credopts = options.CredentialsOptions(parser)
parser.add_option_group(credopts)
opts, args = parser.parse_args()
if len(args) < 1:
parser.print_usage()
sys.exit(1)
host = args[0]
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
creds.set_gensec_features(creds.get_gensec_features() | gensec.FEATURE_SEAL)
#
# Tests start here
#
class SpeedTest(samba.tests.TestCase):
def find_domain_sid(self, ldb):
res = ldb.search(base=self.base_dn, expression="(objectClass=*)", scope=SCOPE_BASE)
return ndr_unpack(security.dom_sid,res[0]["objectSid"][0])
def setUp(self):
super(SpeedTest, self).setUp()
self.ldb_admin = ldb
self.base_dn = ldb.domain_dn()
self.domain_sid = security.dom_sid(ldb.get_domain_sid())
self.user_pass = "samba123@"
print "baseDN: %s" % self.base_dn
def create_user(self, user_dn):
ldif = """
dn: """ + user_dn + """
sAMAccountName: """ + user_dn.split(",")[0][3:] + """
objectClass: user
unicodePwd:: """ + base64.b64encode(("\"%s\"" % self.user_pass).encode('utf-16-le')) + """
url: www.example.com
"""
self.ldb_admin.add_ldif(ldif)
def create_group(self, group_dn, desc=None):
ldif = """
dn: """ + group_dn + """
objectClass: group
sAMAccountName: """ + group_dn.split(",")[0][3:] + """
groupType: 4
url: www.example.com
"""
self.ldb_admin.add_ldif(ldif)
def create_bundle(self, count):
for i in range(count):
self.create_user("cn=speedtestuser%d,cn=Users,%s" % (i+1, self.base_dn))
def remove_bundle(self, count):
for i in range(count):
delete_force(self.ldb_admin, "cn=speedtestuser%d,cn=Users,%s" % (i+1, self.base_dn))
def remove_test_users(self):
res = ldb.search(base="cn=Users,%s" % self.base_dn, expression="(objectClass=user)", scope=SCOPE_SUBTREE)
dn_list = [item.dn for item in res if "speedtestuser" in str(item.dn)]
for dn in dn_list:
delete_force(self.ldb_admin, dn)
class SpeedTestAddDel(SpeedTest):
def setUp(self):
super(SpeedTestAddDel, self).setUp()
def run_bundle(self, num):
print "\n=== Test ADD/DEL %s user objects ===\n" % num
avg_add = Decimal("0.0")
avg_del = Decimal("0.0")
for x in [1, 2, 3]:
start = time.time()
self.create_bundle(num)
res_add = Decimal( str(time.time() - start) )
avg_add += res_add
print " Attempt %s ADD: %.3fs" % ( x, float(res_add) )
#
start = time.time()
self.remove_bundle(num)
res_del = Decimal( str(time.time() - start) )
avg_del += res_del
print " Attempt %s DEL: %.3fs" % ( x, float(res_del) )
print "Average ADD: %.3fs" % float( Decimal(avg_add) / Decimal("3.0") )
print "Average DEL: %.3fs" % float( Decimal(avg_del) / Decimal("3.0") )
print ""
def test_00000(self):
""" Remove possibly undeleted test users from previous test
"""
self.remove_test_users()
def test_00010(self):
self.run_bundle(10)
def test_00100(self):
self.run_bundle(100)
def test_01000(self):
self.run_bundle(1000)
def _test_10000(self):
""" This test should be enabled preferably against MS Active Directory.
It takes quite the time against Samba4 (1-2 days).
"""
self.run_bundle(10000)
class AclSearchSpeedTest(SpeedTest):
def setUp(self):
super(AclSearchSpeedTest, self).setUp()
self.ldb_admin.newuser("acltestuser", "samba123@")
self.sd_utils = sd_utils.SDUtils(self.ldb_admin)
self.ldb_user = self.get_ldb_connection("acltestuser", "samba123@")
self.user_sid = self.sd_utils.get_object_sid(self.get_user_dn("acltestuser"))
def tearDown(self):
super(AclSearchSpeedTest, self).tearDown()
delete_force(self.ldb_admin, self.get_user_dn("acltestuser"))
def run_search_bundle(self, num, _ldb):
print "\n=== Creating %s user objects ===\n" % num
self.create_bundle(num)
mod = "(A;;LC;;;%s)(D;;RP;;;%s)" % (str(self.user_sid), str(self.user_sid))
for i in range(num):
self.sd_utils.dacl_add_ace("cn=speedtestuser%d,cn=Users,%s" %
(i+1, self.base_dn), mod)
print "\n=== %s user objects created ===\n" % num
print "\n=== Test search on %s user objects ===\n" % num
avg_search = Decimal("0.0")
for x in [1, 2, 3]:
start = time.time()
res = _ldb.search(base=self.base_dn, expression="(objectClass=*)", scope=SCOPE_SUBTREE)
res_search = Decimal( str(time.time() - start) )
avg_search += res_search
print " Attempt %s SEARCH: %.3fs" % ( x, float(res_search) )
print "Average Search: %.3fs" % float( Decimal(avg_search) / Decimal("3.0") )
self.remove_bundle(num)
def get_user_dn(self, name):
return "CN=%s,CN=Users,%s" % (name, self.base_dn)
def get_ldb_connection(self, target_username, target_password):
creds_tmp = Credentials()
creds_tmp.set_username(target_username)
creds_tmp.set_password(target_password)
creds_tmp.set_domain(creds.get_domain())
creds_tmp.set_realm(creds.get_realm())
creds_tmp.set_workstation(creds.get_workstation())
creds_tmp.set_gensec_features(creds_tmp.get_gensec_features()
| gensec.FEATURE_SEAL)
ldb_target = SamDB(url=host, credentials=creds_tmp, lp=lp)
return ldb_target
def test_search_01000(self):
self.run_search_bundle(1000, self.ldb_admin)
def test_search2_01000(self):
# allow the user to see objects but not attributes, all attributes will be filtered out
mod = "(A;;LC;;;%s)(D;;RP;;;%s)" % (str(self.user_sid), str(self.user_sid))
self.sd_utils.dacl_add_ace("CN=Users,%s" % self.base_dn, mod)
self.run_search_bundle(1000, self.ldb_user)
# Important unit running information
if not "://" in host:
host = "ldap://%s" % host
ldb_options = ["modules:paged_searches"]
ldb = SamDB(host, credentials=creds, session_info=system_session(), lp=lp, options=ldb_options)
runner = SubunitTestRunner()
rc = 0
if not runner.run(unittest.makeSuite(SpeedTestAddDel)).wasSuccessful():
rc = 1
if not runner.run(unittest.makeSuite(AclSearchSpeedTest)).wasSuccessful():
rc = 1
sys.exit(rc)
| gpl-3.0 |
jakar/odoo-bank-statement-reconcile | __unported__/account_statement_completion_label/partner.py | 18 | 1385 | # -*- coding: utf-8 -*-
###############################################################################
#
# account_statement_completion_label for OpenERP
# Copyright (C) 2013 Akretion (http://www.akretion.com). All Rights Reserved
# @author Benoît GUILLOT <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp.osv import fields, orm
class res_partner(orm.Model):
_inherit = "res.partner"
_columns = {
'bank_statement_label': fields.one2many('account.statement.label',
'partner_id',
'Bank Statement Label'),
}
| agpl-3.0 |
tvibliani/odoo | addons/document/report/__init__.py | 444 | 1068 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import document_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kgullikson88/IGRINS_Scripts | Search_Fast.py | 1 | 2751 | import sys
import os
import GenericSearch
import pandas
# Define regions contaminated by telluric residuals or other defects. We will not use those regions in the cross-correlation
badregions = [[0, 1510], # Blue end of H band (lots of water absorption)
#[1561, 1615], # CO2 band that is often poorly corrected (for now at least...)
[1740, 2090], #In between H and K bands (lots of water absorption)
[2348, 2500], #Red end of K band (lots of water absorption)
[1510, 1520], #Temporary...
[1688,1740],
[2313, 2350]]
if "darwin" in sys.platform:
modeldir = "/Volumes/DATADRIVE/Stellar_Models/Sorted/Stellar/NearIR/"
elif "linux" in sys.platform:
modeldir = "/media/FreeAgent_Drive/SyntheticSpectra/Sorted/Stellar/NearIR/"
else:
modeldir = raw_input("sys.platform not recognized. Please enter model directory below: ")
if not modeldir.endswith("/"):
modeldir = modeldir + "/"
def add_oh_lines(oh_file, badregions=[], minstrength=1.0, tol=0.05):
oh_data = pandas.read_csv(oh_file, header=False, sep=" ", skipinitialspace=True, names=['wave', 'strength'])
oh = oh_data[oh_data['strength'] > minstrength]
n = 1.0 + 2.735182e-4 + 131.4182 / oh['wave'] ** 2 + 2.76249e8 / oh['wave'] ** 4
oh['wave'] = oh['wave'] / (n * 10.0)
for wave in oh['wave'].values:
badregions.append([wave - tol, wave + tol])
return badregions
if __name__ == "__main__":
#Parse command line arguments:
fileList = []
interp_regions = []
extensions = True
tellurics = False
trimsize = 100
for arg in sys.argv[1:]:
if "-e" in arg:
extensions = False
if "-t" in arg:
tellurics = True #telluric lines modeled but not removed
else:
fileList.append(arg)
# Add strong oh lines to interp_regions
oh_file = "{}/School/Research/IGRINS_data/plp/master_calib/ohlines.dat".format(os.environ['HOME'])
interp_regions = add_oh_lines(oh_file, badregions=interp_regions)
GenericSearch.CompanionSearch(fileList,
extensions=extensions,
resolution=45000.0,
trimsize=trimsize,
vsini_values=[1.0, 10.0, 20.0, 30.0, 40.0],
observatory="McDonald",
vbary_correct=True,
debug=False,
badregions=badregions,
interp_regions=interp_regions,
modeldir=modeldir,
addmode="weighted")
| mit |
RubenKelevra/rethinkdb | external/v8_3.30.33.16/build/gyp/PRESUBMIT.py | 496 | 3373 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for GYP.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
PYLINT_BLACKLIST = [
# TODO: fix me.
# From SCons, not done in google style.
'test/lib/TestCmd.py',
'test/lib/TestCommon.py',
'test/lib/TestGyp.py',
# Needs style fix.
'pylib/gyp/generator/xcode.py',
]
PYLINT_DISABLED_WARNINGS = [
# TODO: fix me.
# Many tests include modules they don't use.
'W0611',
# Include order doesn't properly include local files?
'F0401',
# Some use of built-in names.
'W0622',
# Some unused variables.
'W0612',
# Operator not preceded/followed by space.
'C0323',
'C0322',
# Unnecessary semicolon.
'W0301',
# Unused argument.
'W0613',
# String has no effect (docstring in wrong place).
'W0105',
# Comma not followed by space.
'C0324',
# Access to a protected member.
'W0212',
# Bad indent.
'W0311',
# Line too long.
'C0301',
# Undefined variable.
'E0602',
# Not exception type specified.
'W0702',
# No member of that name.
'E1101',
# Dangerous default {}.
'W0102',
# Others, too many to sort.
'W0201', 'W0232', 'E1103', 'W0621', 'W0108', 'W0223', 'W0231',
'R0201', 'E0101', 'C0321',
# ************* Module copy
# W0104:427,12:_test.odict.__setitem__: Statement seems to have no effect
'W0104',
]
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
# Accept any year number from 2009 to the current year.
current_year = int(input_api.time.strftime('%Y'))
allowed_years = (str(s) for s in reversed(xrange(2009, current_year + 1)))
years_re = '(' + '|'.join(allowed_years) + ')'
# The (c) is deprecated, but tolerate it until it's removed from all files.
license = (
r'.*? Copyright (\(c\) )?%(year)s Google Inc\. All rights reserved\.\n'
r'.*? Use of this source code is governed by a BSD-style license that '
r'can be\n'
r'.*? found in the LICENSE file\.\n'
) % {
'year': years_re,
}
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, license_header=license))
report.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
'http://gyp-status.appspot.com/status',
'http://gyp-status.appspot.com/current'))
import os
import sys
old_sys_path = sys.path
try:
sys.path = ['pylib', 'test/lib'] + sys.path
blacklist = PYLINT_BLACKLIST
if sys.platform == 'win32':
blacklist = [os.path.normpath(x).replace('\\', '\\\\')
for x in PYLINT_BLACKLIST]
report.extend(input_api.canned_checks.RunPylint(
input_api,
output_api,
black_list=blacklist,
disabled_warnings=PYLINT_DISABLED_WARNINGS))
finally:
sys.path = old_sys_path
return report
def GetPreferredTrySlaves():
return ['gyp-win32', 'gyp-win64', 'gyp-linux', 'gyp-mac', 'gyp-android']
| agpl-3.0 |
jarvys/django-1.7-jdb | django/middleware/common.py | 52 | 7351 | import hashlib
import logging
import re
import warnings
from django.conf import settings
from django.core.mail import mail_managers
from django.core import urlresolvers
from django import http
from django.utils.deprecation import RemovedInDjango18Warning
from django.utils.encoding import force_text
from django.utils.http import urlquote
from django.utils import six
logger = logging.getLogger('django.request')
class CommonMiddleware(object):
"""
"Common" middleware for taking care of some basic operations:
- Forbids access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
this middleware appends missing slashes and/or prepends missing
"www."s.
- If APPEND_SLASH is set and the initial URL doesn't end with a
slash, and it is not found in urlpatterns, a new URL is formed by
appending a slash at the end. If this new URL is found in
urlpatterns, then an HTTP-redirect is returned to this new URL;
otherwise the initial URL is processed as usual.
- ETags: If the USE_ETAGS setting is set, ETags will be calculated from
the entire page content and Not Modified responses will be returned
appropriately.
"""
def process_request(self, request):
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for denied User-Agents
if 'HTTP_USER_AGENT' in request.META:
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(request.META['HTTP_USER_AGENT']):
logger.warning('Forbidden (User agent): %s', request.path,
extra={
'status_code': 403,
'request': request
}
)
return http.HttpResponseForbidden('<h1>Forbidden</h1>')
# Check for a redirect based on settings.APPEND_SLASH
# and settings.PREPEND_WWW
host = request.get_host()
old_url = [host, request.path]
new_url = old_url[:]
if (settings.PREPEND_WWW and old_url[0] and
not old_url[0].startswith('www.')):
new_url[0] = 'www.' + old_url[0]
# Append a slash if APPEND_SLASH is set and the URL doesn't have a
# trailing slash and there is no pattern for the current path
if settings.APPEND_SLASH and (not old_url[1].endswith('/')):
urlconf = getattr(request, 'urlconf', None)
if (not urlresolvers.is_valid_path(request.path_info, urlconf) and
urlresolvers.is_valid_path("%s/" % request.path_info, urlconf)):
new_url[1] = new_url[1] + '/'
if settings.DEBUG and request.method == 'POST':
raise RuntimeError((""
"You called this URL via POST, but the URL doesn't end "
"in a slash and you have APPEND_SLASH set. Django can't "
"redirect to the slash URL while maintaining POST data. "
"Change your form to point to %s%s (note the trailing "
"slash), or set APPEND_SLASH=False in your Django "
"settings.") % (new_url[0], new_url[1]))
if new_url == old_url:
# No redirects required.
return
if new_url[0]:
newurl = "%s://%s%s" % (
request.scheme,
new_url[0], urlquote(new_url[1]))
else:
newurl = urlquote(new_url[1])
if request.META.get('QUERY_STRING', ''):
if six.PY3:
newurl += '?' + request.META['QUERY_STRING']
else:
# `query_string` is a bytestring. Appending it to the unicode
# string `newurl` will fail if it isn't ASCII-only. This isn't
# allowed; only broken software generates such query strings.
# Better drop the invalid query string than crash (#15152).
try:
newurl += '?' + request.META['QUERY_STRING'].decode()
except UnicodeDecodeError:
pass
return http.HttpResponsePermanentRedirect(newurl)
def process_response(self, request, response):
"""
Calculate the ETag, if needed.
"""
if settings.SEND_BROKEN_LINK_EMAILS:
warnings.warn("SEND_BROKEN_LINK_EMAILS is deprecated. "
"Use BrokenLinkEmailsMiddleware instead.",
RemovedInDjango18Warning, stacklevel=2)
BrokenLinkEmailsMiddleware().process_response(request, response)
if settings.USE_ETAGS:
if response.has_header('ETag'):
etag = response['ETag']
elif response.streaming:
etag = None
else:
etag = '"%s"' % hashlib.md5(response.content).hexdigest()
if etag is not None:
if (200 <= response.status_code < 300
and request.META.get('HTTP_IF_NONE_MATCH') == etag):
cookies = response.cookies
response = http.HttpResponseNotModified()
response.cookies = cookies
else:
response['ETag'] = etag
return response
class BrokenLinkEmailsMiddleware(object):
def process_response(self, request, response):
"""
Send broken link emails for relevant 404 NOT FOUND responses.
"""
if response.status_code == 404 and not settings.DEBUG:
domain = request.get_host()
path = request.get_full_path()
referer = force_text(request.META.get('HTTP_REFERER', ''), errors='replace')
if not self.is_ignorable_request(request, path, domain, referer):
ua = request.META.get('HTTP_USER_AGENT', '<none>')
ip = request.META.get('REMOTE_ADDR', '<none>')
mail_managers(
"Broken %slink on %s" % (
('INTERNAL ' if self.is_internal_request(domain, referer) else ''),
domain
),
"Referrer: %s\nRequested URL: %s\nUser agent: %s\n"
"IP address: %s\n" % (referer, path, ua, ip),
fail_silently=True)
return response
def is_internal_request(self, domain, referer):
"""
Returns True if the referring URL is the same domain as the current request.
"""
# Different subdomains are treated as different domains.
return bool(re.match("^https?://%s/" % re.escape(domain), referer))
def is_ignorable_request(self, request, uri, domain, referer):
"""
Returns True if the given request *shouldn't* notify the site managers.
"""
# '?' in referer is identified as search engine source
if (not referer or
(not self.is_internal_request(domain, referer) and '?' in referer)):
return True
return any(pattern.search(uri) for pattern in settings.IGNORABLE_404_URLS)
| bsd-3-clause |
waseem18/oh-mainline | mysite/search/migrations/0041_add_created_and_modified_timestamps_to_all_models.py | 17 | 14730 | # This file is part of OpenHatch.
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.search.models import *
class Migration:
def forwards(self, orm):
# Adding field 'BugAnswer.created_date'
db.add_column('search_buganswer', 'created_date', orm['search.buganswer:created_date'])
# Adding field 'ProjectInvolvementQuestion.created_date'
db.add_column('search_projectinvolvementquestion', 'created_date', orm['search.projectinvolvementquestion:created_date'])
# Adding field 'Bug.modified_date'
db.add_column('search_bug', 'modified_date', orm['search.bug:modified_date'])
# Adding field 'HitCountCache.created_date'
db.add_column('search_hitcountcache', 'created_date', orm['search.hitcountcache:created_date'])
# Adding field 'HitCountCache.modified_date'
db.add_column('search_hitcountcache', 'modified_date', orm['search.hitcountcache:modified_date'])
# Adding field 'Answer.modified_date'
db.add_column('search_answer', 'modified_date', orm['search.answer:modified_date'])
# Adding field 'Answer.created_date'
db.add_column('search_answer', 'created_date', orm['search.answer:created_date'])
# Adding field 'Bug.created_date'
db.add_column('search_bug', 'created_date', orm['search.bug:created_date'])
# Adding field 'ProjectInvolvementQuestion.modified_date'
db.add_column('search_projectinvolvementquestion', 'modified_date', orm['search.projectinvolvementquestion:modified_date'])
# Adding field 'Project.created_date'
db.add_column('search_project', 'created_date', orm['search.project:created_date'])
# Adding field 'Project.modified_date'
db.add_column('search_project', 'modified_date', orm['search.project:modified_date'])
# Adding field 'BugAnswer.modified_date'
db.add_column('search_buganswer', 'modified_date', orm['search.buganswer:modified_date'])
# Adding field 'Bug.as_appears_in_distribution'
#db.add_column('search_bug', 'as_appears_in_distribution', orm['search.bug:as_appears_in_distribution'])
# Changing field 'Bug.last_polled'
# (to signature: django.db.models.fields.DateTimeField(default=datetime.datetime(1970, 1, 1, 0, 0)))
db.alter_column('search_bug', 'last_polled', orm['search.bug:last_polled'])
def backwards(self, orm):
# Deleting field 'BugAnswer.created_date'
db.delete_column('search_buganswer', 'created_date')
# Deleting field 'ProjectInvolvementQuestion.created_date'
db.delete_column('search_projectinvolvementquestion', 'created_date')
# Deleting field 'Bug.modified_date'
db.delete_column('search_bug', 'modified_date')
# Deleting field 'HitCountCache.created_date'
db.delete_column('search_hitcountcache', 'created_date')
# Deleting field 'HitCountCache.modified_date'
db.delete_column('search_hitcountcache', 'modified_date')
# Deleting field 'Answer.modified_date'
db.delete_column('search_answer', 'modified_date')
# Deleting field 'Answer.created_date'
db.delete_column('search_answer', 'created_date')
# Deleting field 'Bug.created_date'
db.delete_column('search_bug', 'created_date')
# Deleting field 'ProjectInvolvementQuestion.modified_date'
db.delete_column('search_projectinvolvementquestion', 'modified_date')
# Deleting field 'Project.created_date'
db.delete_column('search_project', 'created_date')
# Deleting field 'Project.modified_date'
db.delete_column('search_project', 'modified_date')
# Deleting field 'BugAnswer.modified_date'
db.delete_column('search_buganswer', 'modified_date')
# Deleting field 'Bug.as_appears_in_distribution'
db.delete_column('search_bug', 'as_appears_in_distribution')
# Changing field 'Bug.last_polled'
# (to signature: django.db.models.fields.DateTimeField())
db.alter_column('search_bug', 'last_polled', orm['search.bug:last_polled'])
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'search.answer': {
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['search.ProjectInvolvementQuestion']"}),
'text': ('django.db.models.fields.TextField', [], {})
},
'search.bug': {
'as_appears_in_distribution': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'bize_size_tag_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'canonical_bug_link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'concerns_just_documentation': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True', 'null': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True', 'null': 'True'}),
'date_reported': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'good_for_newcomers': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}),
'last_touched': ('django.db.models.fields.DateTimeField', [], {}),
'looks_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True', 'null': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True', 'null': 'True'}),
'people_involved': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'submitter_realname': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'submitter_username': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'search.buganswer': {
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True', 'null': 'True'}),
'details': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']", 'null': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bug_answers'", 'to': "orm['search.ProjectInvolvementQuestion']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'search.hitcountcache': {
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True', 'null': 'True'}),
'hashed_query': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'hit_count': ('django.db.models.fields.IntegerField', [], {}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True', 'null': 'True'})
},
'search.project': {
'cached_contributor_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True', 'null': 'True'}),
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'icon_for_profile': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_raw': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'logo_contains_name': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True', 'null': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'search.projectinvolvementquestion': {
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_bug_style': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True', 'null': 'True'}),
'key_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True', 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['search']
| agpl-3.0 |
fbossy/SickRage | lib/guessit/plugins/transformers.py | 33 | 9580 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from logging import getLogger
from pkg_resources import EntryPoint
from guessit.options import reload as reload_options
from stevedore import ExtensionManager
from stevedore.extension import Extension
log = getLogger(__name__)
class Transformer(object): # pragma: no cover
def __init__(self, priority=0):
self.priority = priority
self.log = getLogger(self.name)
@property
def name(self):
return self.__class__.__name__
def supported_properties(self):
return {}
def second_pass_options(self, mtree, options=None):
return None
def should_process(self, mtree, options=None):
return True
def process(self, mtree, options=None):
pass
def post_process(self, mtree, options=None):
pass
def register_arguments(self, opts, naming_opts, output_opts, information_opts, webservice_opts, other_options):
pass
def rate_quality(self, guess, *props):
return 0
class CustomTransformerExtensionManager(ExtensionManager):
def __init__(self, namespace='guessit.transformer', invoke_on_load=True,
invoke_args=(), invoke_kwds={}, propagate_map_exceptions=True, on_load_failure_callback=None,
verify_requirements=False):
super(CustomTransformerExtensionManager, self).__init__(namespace=namespace,
invoke_on_load=invoke_on_load,
invoke_args=invoke_args,
invoke_kwds=invoke_kwds,
propagate_map_exceptions=propagate_map_exceptions,
on_load_failure_callback=on_load_failure_callback,
verify_requirements=verify_requirements)
@staticmethod
def order_extensions(extensions):
"""Order the loaded transformers
It should follow those rules
- website before language (eg: tvu.org.ru vs russian)
- language before episodes_rexps
- properties before language (eg: he-aac vs hebrew)
- release_group before properties (eg: XviD-?? vs xvid)
"""
extensions.sort(key=lambda ext: -ext.obj.priority)
return extensions
@staticmethod
def _load_one_plugin(ep, invoke_on_load, invoke_args, invoke_kwds, verify_requirements=True):
if not ep.dist:
# `require` argument of ep.load() is deprecated in newer versions of setuptools
if hasattr(ep, 'resolve'):
plugin = ep.resolve()
elif hasattr(ep, '_load'):
plugin = ep._load()
else:
plugin = ep.load(require=False)
else:
plugin = ep.load()
if invoke_on_load:
obj = plugin(*invoke_args, **invoke_kwds)
else:
obj = None
return Extension(ep.name, ep, plugin, obj)
def _load_plugins(self, invoke_on_load, invoke_args, invoke_kwds, verify_requirements):
return self.order_extensions(super(CustomTransformerExtensionManager, self)._load_plugins(invoke_on_load, invoke_args, invoke_kwds, verify_requirements))
def objects(self):
return self.map(self._get_obj)
@staticmethod
def _get_obj(ext):
return ext.obj
def object(self, name):
try:
return self[name].obj
except KeyError:
return None
def register_module(self, name=None, module_name=None, attrs=(), entry_point=None):
if entry_point:
ep = EntryPoint.parse(entry_point)
else:
ep = EntryPoint(name, module_name, attrs)
loaded = self._load_one_plugin(ep, invoke_on_load=True, invoke_args=(), invoke_kwds={})
if loaded:
self.extensions.append(loaded)
self.extensions = self.order_extensions(self.extensions)
self._extensions_by_name = None
class DefaultTransformerExtensionManager(CustomTransformerExtensionManager):
@property
def _internal_entry_points(self):
return ['split_path_components = guessit.transfo.split_path_components:SplitPathComponents',
'guess_filetype = guessit.transfo.guess_filetype:GuessFiletype',
'split_explicit_groups = guessit.transfo.split_explicit_groups:SplitExplicitGroups',
'guess_date = guessit.transfo.guess_date:GuessDate',
'guess_website = guessit.transfo.guess_website:GuessWebsite',
'guess_release_group = guessit.transfo.guess_release_group:GuessReleaseGroup',
'guess_properties = guessit.transfo.guess_properties:GuessProperties',
'guess_language = guessit.transfo.guess_language:GuessLanguage',
'guess_video_rexps = guessit.transfo.guess_video_rexps:GuessVideoRexps',
'guess_episodes_rexps = guessit.transfo.guess_episodes_rexps:GuessEpisodesRexps',
'guess_weak_episodes_rexps = guessit.transfo.guess_weak_episodes_rexps:GuessWeakEpisodesRexps',
'guess_bonus_features = guessit.transfo.guess_bonus_features:GuessBonusFeatures',
'guess_year = guessit.transfo.guess_year:GuessYear',
'guess_country = guessit.transfo.guess_country:GuessCountry',
'guess_idnumber = guessit.transfo.guess_idnumber:GuessIdnumber',
'split_on_dash = guessit.transfo.split_on_dash:SplitOnDash',
'guess_episode_info_from_position = guessit.transfo.guess_episode_info_from_position:GuessEpisodeInfoFromPosition',
'guess_movie_title_from_position = guessit.transfo.guess_movie_title_from_position:GuessMovieTitleFromPosition',
'guess_episode_details = guessit.transfo.guess_episode_details:GuessEpisodeDetails',
'expected_series = guessit.transfo.expected_series:ExpectedSeries',
'expected_title = guessit.transfo.expected_title:ExpectedTitle',]
def _find_entry_points(self, namespace):
entry_points = {}
# Internal entry points
if namespace == self.namespace:
for internal_entry_point_str in self._internal_entry_points:
internal_entry_point = EntryPoint.parse(internal_entry_point_str)
entry_points[internal_entry_point.name] = internal_entry_point
# Package entry points
setuptools_entrypoints = super(DefaultTransformerExtensionManager, self)._find_entry_points(namespace)
for setuptools_entrypoint in setuptools_entrypoints:
entry_points[setuptools_entrypoint.name] = setuptools_entrypoint
return list(entry_points.values())
_extensions = None
def all_transformers():
return _extensions.objects()
def get_transformer(name):
return _extensions.object(name)
def add_transformer(name, module_name, class_name):
"""
Add a transformer
:param name: the name of the transformer. ie: 'guess_regexp_id'
:param name: the module name. ie: 'flexget.utils.parsers.transformers.guess_regexp_id'
:param class_name: the class name. ie: 'GuessRegexpId'
"""
_extensions.register_module(name, module_name, (class_name,))
def add_transformer(entry_point):
"""
Add a transformer
:param entry_point: entry point spec format. ie: 'guess_regexp_id = flexget.utils.parsers.transformers.guess_regexp_id:GuessRegexpId'
"""
_extensions.register_module(entry_point = entry_point)
def reload(custom=False):
"""
Reload extension manager with default or custom one.
:param custom: if True, custom manager will be used, else default one.
Default manager will load default extensions from guessit and setuptools packaging extensions
Custom manager will not load default extensions from guessit, using only setuptools packaging extensions.
:type custom: boolean
"""
global _extensions
if custom:
_extensions = CustomTransformerExtensionManager()
else:
_extensions = DefaultTransformerExtensionManager()
reload_options(all_transformers())
reload()
| gpl-3.0 |
jedi22/osquery | tools/tests/test_osqueryi.py | 5 | 9121 | #!/usr/bin/env python
# Copyright (c) 2014-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under both the Apache 2.0 license (found in the
# LICENSE file in the root directory of this source tree) and the GPLv2 (found
# in the COPYING file in the root directory of this source tree).
# You may select, at your option, one of the above-listed licenses.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pyexpect.replwrap will not work with unicode_literals
# from __future__ import unicode_literals
import os
import random
import sys
import unittest
import utils
# osquery-specific testing utils
import test_base
SHELL_TIMEOUT = 10
EXIT_CATASTROPHIC = 78
class OsqueryiTest(unittest.TestCase):
def setUp(self):
self.binary = test_base.getLatestOsqueryBinary('osqueryi')
self.osqueryi = test_base.OsqueryWrapper(command=self.binary)
self.dbpath = "%s%s" % (
test_base.CONFIG["options"]["database_path"],
str(random.randint(1000, 9999)))
@unittest.skipIf(os.name == "nt", "stderr tests not supported on Windows.")
def test_error(self):
'''Test that we throw an error on bad query'''
self.osqueryi.run_command(' ')
self.assertRaises(test_base.OsqueryException,
self.osqueryi.run_query, 'foo')
def test_config_check_success(self):
'''Test that a 0-config passes'''
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--database_path=%s" % (self.dbpath),
"--config_path=%s/test.config" % test_base.SCRIPT_DIR,
"--extensions_autoload=",
"--verbose",
],
SHELL_TIMEOUT)
self.assertEqual(proc.stdout, "")
print(proc.stdout)
print(proc.stderr)
self.assertEqual(proc.proc.poll(), 0)
def test_config_dump(self):
'''Test that config raw output is dumped when requested'''
config = os.path.join(test_base.SCRIPT_DIR, "test_noninline_packs.conf")
proc = test_base.TimeoutRunner([
self.binary,
"--config_dump",
"--config_path=%s" % config,
"--extensions_autoload=",
"--verbose",
],
SHELL_TIMEOUT)
content = ""
with open(config, 'r') as fh:
content = fh.read()
actual = proc.stdout
if os.name == "nt":
actual = actual.replace('\r', '')
self.assertEqual(actual, '{"%s": %s}\n' % (config, content))
print (proc.stderr)
self.assertEqual(proc.proc.poll(), 0)
@test_base.flaky
def test_config_check_failure_invalid_path(self):
'''Test that a missing config fails'''
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--database_path=%s" % (self.dbpath),
"--disable_extensions",
"--verbose",
"--config_path=/this/path/does/not/exist"
],
SHELL_TIMEOUT)
self.assertNotEqual(proc.stderr, "")
print(proc.stdout)
print(proc.stderr)
self.assertEqual(proc.proc.poll(), 1)
def test_config_check_failure_valid_path(self):
# Now with a valid path, but invalid content.
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--extensions_autoload=",
"--verbose",
"--database_path=%s" % (self.dbpath),
"--config_path=%s" % os.path.join(test_base.SCRIPT_DIR, "test.badconfig")
],
SHELL_TIMEOUT)
self.assertEqual(proc.proc.poll(), 1)
self.assertNotEqual(proc.stderr, "")
def test_config_check_failure_missing_plugin(self):
# Finally with a missing config plugin
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--database_path=%s" % (self.dbpath),
"--extensions_autoload=",
"--verbose",
"--config_plugin=does_not_exist"
],
SHELL_TIMEOUT)
self.assertNotEqual(proc.stderr, "")
self.assertNotEqual(proc.proc.poll(), 0)
# Also do not accept a SIGSEG
self.assertEqual(proc.proc.poll(), EXIT_CATASTROPHIC)
def test_config_check_example(self):
'''Test that the example config passes'''
example_path = os.path.join("deployment", "osquery.example.conf")
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--config_path=%s" % os.path.join(test_base.SCRIPT_DIR, "..", example_path),
"--extensions_autoload=",
"--verbose",
],
SHELL_TIMEOUT)
self.assertEqual(proc.stdout, "")
print (proc.stdout)
print (proc.stderr)
self.assertEqual(proc.proc.poll(), 0)
def test_meta_commands(self):
'''Test the supported meta shell/help/info commands'''
commands = [
'.help',
'.all',
'.all osquery_info',
'.all this_table_does_not_exist',
'.echo',
'.echo on',
'.echo off',
'.header',
'.header off',
'.header on',
'.mode',
'.mode csv',
'.mode column',
'.mode line',
'.mode list',
'.mode pretty',
'.mode this_mode_does_not_exists',
'.nullvalue',
'.nullvalue ""',
'.print',
'.print hello',
'.schema osquery_info',
'.schema this_table_does_not_exist',
'.schema',
'.separator',
'.separator ,',
'.show',
'.tables osquery',
'.tables osquery_info',
'.tables this_table_does_not_exist',
'.tables',
'.trace',
'.width',
'.width 80',
'.timer',
'.timer on',
'.timer off'
]
for command in commands:
result = self.osqueryi.run_command(command)
pass
def test_json_output(self):
'''Test that the output of --json is valid json'''
proc = test_base.TimeoutRunner([
self.binary,
"select 0",
"--disable_extensions",
"--json",
],
SHELL_TIMEOUT
)
if os.name == "nt":
self.assertEqual(proc.stdout, "[\r\n {\"0\":\"0\"}\r\n]\r\n")
else:
self.assertEqual(proc.stdout, "[\n {\"0\":\"0\"}\n]\n")
print(proc.stdout)
print(proc.stderr)
self.assertEqual(proc.proc.poll(), 0)
@test_base.flaky
def test_time(self):
'''Demonstrating basic usage of OsqueryWrapper with the time table'''
self.osqueryi.run_command(' ') # flush error output
result = self.osqueryi.run_query(
'SELECT hour, minutes, seconds FROM time;')
self.assertEqual(len(result), 1)
row = result[0]
self.assertTrue(0 <= int(row['hour']) <= 24)
self.assertTrue(0 <= int(row['minutes']) <= 60)
self.assertTrue(0 <= int(row['seconds']) <= 60)
# TODO: Running foreign table tests as non-priv user fails
@test_base.flaky
@unittest.skipIf(os.name == "nt", "foreign table tests not supported on Windows.")
def test_foreign_tables(self):
'''Requires the --enable_foreign flag to add at least one table.'''
self.osqueryi.run_command(' ')
query = 'SELECT count(1) c FROM osquery_registry;'
result = self.osqueryi.run_query(query)
before = int(result[0]['c'])
osqueryi2 = test_base.OsqueryWrapper(self.binary,
args={"enable_foreign": True})
osqueryi2.run_command(' ')
# This execution fails if the user is not Administrator on Windows
result = osqueryi2.run_query(query)
after = int(result[0]['c'])
self.assertGreater(after, before)
@test_base.flaky
def test_time_using_all(self):
self.osqueryi.run_command(' ')
result = self.osqueryi.run_command('.all time')
self.assertNotEqual(result.rstrip(), "Error querying table: time")
@test_base.flaky
def test_config_bad_json(self):
self.osqueryi = test_base.OsqueryWrapper(self.binary,
args={"config_path": "/"})
result = self.osqueryi.run_query('SELECT * FROM time;')
self.assertEqual(len(result), 1)
@test_base.flaky
def test_atc(self):
local_osquery_instance = test_base.OsqueryWrapper(self.binary,
args={"config_path": "test.config"})
result = local_osquery_instance.run_query('SELECT a_number FROM test_atc')
self.assertEqual(result, [{'a_number':'314159'}])
if __name__ == '__main__':
test_base.Tester().run()
| bsd-3-clause |
sonuyos/couchpotato | libs/html5lib/treebuilders/etree.py | 721 | 12609 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
import re
from . import _base
from .. import ihatexml
from .. import constants
from ..constants import namespaces
from ..utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation, fullTree=False):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class Element(_base.Node):
def __init__(self, name, namespace=None):
self._name = name
self._namespace = namespace
self._element = ElementTree.Element(self._getETreeTag(name,
namespace))
if namespace is None:
self.nameTuple = namespaces["html"], self._name
else:
self.nameTuple = self._namespace, self._name
self.parent = None
self._childNodes = []
self._flags = []
def _getETreeTag(self, name, namespace):
if namespace is None:
etree_tag = name
else:
etree_tag = "{%s}%s" % (namespace, name)
return etree_tag
def _setName(self, name):
self._name = name
self._element.tag = self._getETreeTag(self._name, self._namespace)
def _getName(self):
return self._name
name = property(_getName, _setName)
def _setNamespace(self, namespace):
self._namespace = namespace
self._element.tag = self._getETreeTag(self._name, self._namespace)
def _getNamespace(self):
return self._namespace
namespace = property(_getNamespace, _setNamespace)
def _getAttributes(self):
return self._element.attrib
def _setAttributes(self, attributes):
# Delete existing attributes first
# XXX - there may be a better way to do this...
for key in list(self._element.attrib.keys()):
del self._element.attrib[key]
for key, value in attributes.items():
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], key[1])
else:
name = key
self._element.set(name, value)
attributes = property(_getAttributes, _setAttributes)
def _getChildNodes(self):
return self._childNodes
def _setChildNodes(self, value):
del self._element[:]
self._childNodes = []
for element in value:
self.insertChild(element)
childNodes = property(_getChildNodes, _setChildNodes)
def hasContent(self):
"""Return true if the node has children or text"""
return bool(self._element.text or len(self._element))
def appendChild(self, node):
self._childNodes.append(node)
self._element.append(node._element)
node.parent = self
def insertBefore(self, node, refNode):
index = list(self._element).index(refNode._element)
self._element.insert(index, node._element)
node.parent = self
def removeChild(self, node):
self._element.remove(node._element)
node.parent = None
def insertText(self, data, insertBefore=None):
if not(len(self._element)):
if not self._element.text:
self._element.text = ""
self._element.text += data
elif insertBefore is None:
# Insert the text as the tail of the last child element
if not self._element[-1].tail:
self._element[-1].tail = ""
self._element[-1].tail += data
else:
# Insert the text before the specified node
children = list(self._element)
index = children.index(insertBefore._element)
if index > 0:
if not self._element[index - 1].tail:
self._element[index - 1].tail = ""
self._element[index - 1].tail += data
else:
if not self._element.text:
self._element.text = ""
self._element.text += data
def cloneNode(self):
element = type(self)(self.name, self.namespace)
for name, value in self.attributes.items():
element.attributes[name] = value
return element
def reparentChildren(self, newParent):
if newParent.childNodes:
newParent.childNodes[-1]._element.tail += self._element.text
else:
if not newParent._element.text:
newParent._element.text = ""
if self._element.text is not None:
newParent._element.text += self._element.text
self._element.text = ""
_base.Node.reparentChildren(self, newParent)
class Comment(Element):
def __init__(self, data):
# Use the superclass constructor to set all properties on the
# wrapper element
self._element = ElementTree.Comment(data)
self.parent = None
self._childNodes = []
self._flags = []
def _getData(self):
return self._element.text
def _setData(self, value):
self._element.text = value
data = property(_getData, _setData)
class DocumentType(Element):
def __init__(self, name, publicId, systemId):
Element.__init__(self, "<!DOCTYPE>")
self._element.text = name
self.publicId = publicId
self.systemId = systemId
def _getPublicId(self):
return self._element.get("publicId", "")
def _setPublicId(self, value):
if value is not None:
self._element.set("publicId", value)
publicId = property(_getPublicId, _setPublicId)
def _getSystemId(self):
return self._element.get("systemId", "")
def _setSystemId(self, value):
if value is not None:
self._element.set("systemId", value)
systemId = property(_getSystemId, _setSystemId)
class Document(Element):
def __init__(self):
Element.__init__(self, "DOCUMENT_ROOT")
class DocumentFragment(Element):
def __init__(self):
Element.__init__(self, "DOCUMENT_FRAGMENT")
def testSerializer(element):
rv = []
def serializeElement(element, indent=0):
if not(hasattr(element, "tag")):
element = element.getroot()
if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"):
publicId = element.get("publicId") or ""
systemId = element.get("systemId") or ""
rv.append("""<!DOCTYPE %s "%s" "%s">""" %
(element.text, publicId, systemId))
else:
rv.append("<!DOCTYPE %s>" % (element.text,))
elif element.tag == "DOCUMENT_ROOT":
rv.append("#document")
if element.text is not None:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
if element.tail is not None:
raise TypeError("Document node cannot have tail")
if hasattr(element, "attrib") and len(element.attrib):
raise TypeError("Document node cannot have attributes")
elif element.tag == ElementTreeCommentType:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
else:
assert isinstance(element.tag, text_type), \
"Expected unicode, got %s, %s" % (type(element.tag), element.tag)
nsmatch = tag_regexp.match(element.tag)
if nsmatch is None:
name = element.tag
else:
ns, name = nsmatch.groups()
prefix = constants.prefixes[ns]
name = "%s %s" % (prefix, name)
rv.append("|%s<%s>" % (' ' * indent, name))
if hasattr(element, "attrib"):
attributes = []
for name, value in element.attrib.items():
nsmatch = tag_regexp.match(name)
if nsmatch is not None:
ns, name = nsmatch.groups()
prefix = constants.prefixes[ns]
attr_string = "%s %s" % (prefix, name)
else:
attr_string = name
attributes.append((attr_string, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
if element.text:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
indent += 2
for child in element:
serializeElement(child, indent)
if element.tail:
rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
serializeElement(element, 0)
return "\n".join(rv)
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
filter = ihatexml.InfosetFilter()
def serializeElement(element):
if isinstance(element, ElementTree.ElementTree):
element = element.getroot()
if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"):
publicId = element.get("publicId") or ""
systemId = element.get("systemId") or ""
rv.append("""<!DOCTYPE %s PUBLIC "%s" "%s">""" %
(element.text, publicId, systemId))
else:
rv.append("<!DOCTYPE %s>" % (element.text,))
elif element.tag == "DOCUMENT_ROOT":
if element.text is not None:
rv.append(element.text)
if element.tail is not None:
raise TypeError("Document node cannot have tail")
if hasattr(element, "attrib") and len(element.attrib):
raise TypeError("Document node cannot have attributes")
for child in element:
serializeElement(child)
elif element.tag == ElementTreeCommentType:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (filter.fromXmlName(element.tag),))
else:
attr = " ".join(["%s=\"%s\"" % (
filter.fromXmlName(name), value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv)
class TreeBuilder(_base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = Element
commentClass = Comment
fragmentClass = DocumentFragment
implementation = ElementTreeImplementation
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
if fullTree:
return self.document._element
else:
if self.defaultNamespace is not None:
return self.document._element.find(
"{%s}html" % self.defaultNamespace)
else:
return self.document._element.find("html")
def getFragment(self):
return _base.TreeBuilder.getFragment(self)._element
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
| gpl-3.0 |
srene/ns-3-inrpp | src/dsdv/bindings/callbacks_list.py | 151 | 1222 | callback_classes = [
['void', 'ns3::Ptr<ns3::Packet const>', 'ns3::Ipv4Header const&', 'ns3::Socket::SocketErrno', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Ipv4Route>', 'ns3::Ptr<ns3::Packet const>', 'ns3::Ipv4Header const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
| gpl-2.0 |
bq/bitbloq-offline | app/res/web2board/linux/res/Scons/sconsFiles/SCons/Tool/rpmutils.py | 6 | 17072 | """SCons.Tool.rpmutils.py
RPM specific helper routines for general usage in the test framework
and SCons core modules.
Since we check for the RPM package target name in several places,
we have to know which machine/system name RPM will use for the current
hardware setup. The following dictionaries and functions try to
mimic the exact naming rules of the RPM source code.
They were directly derived from the file "rpmrc.in" of the version
rpm-4.9.1.3. For updating to a more recent version of RPM, this Python
script can be used standalone. The usage() function below shows the
exact syntax.
"""
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/rpmutils.py rel_2.4.1:3453:73fefd3ea0b0 2015/11/09 03:25:05 bdbaddog"
import platform
import subprocess
# Start of rpmrc dictionaries (Marker, don't change or remove!)
os_canon = {
'AIX' : ['AIX','5'],
'AmigaOS' : ['AmigaOS','5'],
'BSD_OS' : ['bsdi','12'],
'CYGWIN32_95' : ['cygwin32','15'],
'CYGWIN32_NT' : ['cygwin32','14'],
'Darwin' : ['darwin','21'],
'FreeBSD' : ['FreeBSD','8'],
'HP-UX' : ['hpux10','6'],
'IRIX' : ['Irix','2'],
'IRIX64' : ['Irix64','10'],
'Linux' : ['Linux','1'],
'Linux/390' : ['OS/390','20'],
'Linux/ESA' : ['VM/ESA','20'],
'MacOSX' : ['macosx','21'],
'MiNT' : ['FreeMiNT','17'],
'NEXTSTEP' : ['NextStep','11'],
'OS/390' : ['OS/390','18'],
'OSF1' : ['osf1','7'],
'SCO_SV' : ['SCO_SV3.2v5.0.2','9'],
'SunOS4' : ['SunOS','4'],
'SunOS5' : ['solaris','3'],
'UNIX_SV' : ['MP_RAS','16'],
'VM/ESA' : ['VM/ESA','19'],
'machten' : ['machten','13'],
'osf3.2' : ['osf1','7'],
'osf4.0' : ['osf1','7'],
}
buildarch_compat = {
'alpha' : ['noarch'],
'alphaev5' : ['alpha'],
'alphaev56' : ['alphaev5'],
'alphaev6' : ['alphapca56'],
'alphaev67' : ['alphaev6'],
'alphapca56' : ['alphaev56'],
'amd64' : ['x86_64'],
'armv3l' : ['noarch'],
'armv4b' : ['noarch'],
'armv4l' : ['armv3l'],
'armv4tl' : ['armv4l'],
'armv5tejl' : ['armv5tel'],
'armv5tel' : ['armv4tl'],
'armv6l' : ['armv5tejl'],
'armv7l' : ['armv6l'],
'atariclone' : ['m68kmint','noarch'],
'atarist' : ['m68kmint','noarch'],
'atariste' : ['m68kmint','noarch'],
'ataritt' : ['m68kmint','noarch'],
'athlon' : ['i686'],
'falcon' : ['m68kmint','noarch'],
'geode' : ['i586'],
'hades' : ['m68kmint','noarch'],
'hppa1.0' : ['parisc'],
'hppa1.1' : ['hppa1.0'],
'hppa1.2' : ['hppa1.1'],
'hppa2.0' : ['hppa1.2'],
'i386' : ['noarch','fat'],
'i486' : ['i386'],
'i586' : ['i486'],
'i686' : ['i586'],
'ia32e' : ['x86_64'],
'ia64' : ['noarch'],
'm68k' : ['noarch'],
'milan' : ['m68kmint','noarch'],
'mips' : ['noarch'],
'mipsel' : ['noarch'],
'parisc' : ['noarch'],
'pentium3' : ['i686'],
'pentium4' : ['pentium3'],
'ppc' : ['noarch','fat'],
'ppc32dy4' : ['noarch'],
'ppc64' : ['noarch','fat'],
'ppc64iseries' : ['ppc64'],
'ppc64pseries' : ['ppc64'],
'ppc8260' : ['noarch'],
'ppc8560' : ['noarch'],
'ppciseries' : ['noarch'],
'ppcpseries' : ['noarch'],
's390' : ['noarch'],
's390x' : ['noarch'],
'sh3' : ['noarch'],
'sh4' : ['noarch'],
'sh4a' : ['sh4'],
'sparc' : ['noarch'],
'sparc64' : ['sparcv9v'],
'sparc64v' : ['sparc64'],
'sparcv8' : ['sparc'],
'sparcv9' : ['sparcv8'],
'sparcv9v' : ['sparcv9'],
'sun4c' : ['noarch'],
'sun4d' : ['noarch'],
'sun4m' : ['noarch'],
'sun4u' : ['noarch'],
'x86_64' : ['noarch'],
}
os_compat = {
'BSD_OS' : ['bsdi'],
'Darwin' : ['MacOSX'],
'FreeMiNT' : ['mint','MiNT','TOS'],
'IRIX64' : ['IRIX'],
'MiNT' : ['FreeMiNT','mint','TOS'],
'TOS' : ['FreeMiNT','MiNT','mint'],
'bsdi4.0' : ['bsdi'],
'hpux10.00' : ['hpux9.07'],
'hpux10.01' : ['hpux10.00'],
'hpux10.10' : ['hpux10.01'],
'hpux10.20' : ['hpux10.10'],
'hpux10.30' : ['hpux10.20'],
'hpux11.00' : ['hpux10.30'],
'hpux9.05' : ['hpux9.04'],
'hpux9.07' : ['hpux9.05'],
'mint' : ['FreeMiNT','MiNT','TOS'],
'ncr-sysv4.3' : ['ncr-sysv4.2'],
'osf4.0' : ['osf3.2','osf1'],
'solaris2.4' : ['solaris2.3'],
'solaris2.5' : ['solaris2.3','solaris2.4'],
'solaris2.6' : ['solaris2.3','solaris2.4','solaris2.5'],
'solaris2.7' : ['solaris2.3','solaris2.4','solaris2.5','solaris2.6'],
}
arch_compat = {
'alpha' : ['axp','noarch'],
'alphaev5' : ['alpha'],
'alphaev56' : ['alphaev5'],
'alphaev6' : ['alphapca56'],
'alphaev67' : ['alphaev6'],
'alphapca56' : ['alphaev56'],
'amd64' : ['x86_64','athlon','noarch'],
'armv3l' : ['noarch'],
'armv4b' : ['noarch'],
'armv4l' : ['armv3l'],
'armv4tl' : ['armv4l'],
'armv5tejl' : ['armv5tel'],
'armv5tel' : ['armv4tl'],
'armv6l' : ['armv5tejl'],
'armv7l' : ['armv6l'],
'atariclone' : ['m68kmint','noarch'],
'atarist' : ['m68kmint','noarch'],
'atariste' : ['m68kmint','noarch'],
'ataritt' : ['m68kmint','noarch'],
'athlon' : ['i686'],
'falcon' : ['m68kmint','noarch'],
'geode' : ['i586'],
'hades' : ['m68kmint','noarch'],
'hppa1.0' : ['parisc'],
'hppa1.1' : ['hppa1.0'],
'hppa1.2' : ['hppa1.1'],
'hppa2.0' : ['hppa1.2'],
'i370' : ['noarch'],
'i386' : ['noarch','fat'],
'i486' : ['i386'],
'i586' : ['i486'],
'i686' : ['i586'],
'ia32e' : ['x86_64','athlon','noarch'],
'ia64' : ['noarch'],
'milan' : ['m68kmint','noarch'],
'mips' : ['noarch'],
'mipsel' : ['noarch'],
'osfmach3_i386' : ['i486'],
'osfmach3_i486' : ['i486','osfmach3_i386'],
'osfmach3_i586' : ['i586','osfmach3_i486'],
'osfmach3_i686' : ['i686','osfmach3_i586'],
'osfmach3_ppc' : ['ppc'],
'parisc' : ['noarch'],
'pentium3' : ['i686'],
'pentium4' : ['pentium3'],
'powerpc' : ['ppc'],
'powerppc' : ['ppc'],
'ppc' : ['rs6000'],
'ppc32dy4' : ['ppc'],
'ppc64' : ['ppc'],
'ppc64iseries' : ['ppc64'],
'ppc64pseries' : ['ppc64'],
'ppc8260' : ['ppc'],
'ppc8560' : ['ppc'],
'ppciseries' : ['ppc'],
'ppcpseries' : ['ppc'],
'rs6000' : ['noarch','fat'],
's390' : ['noarch'],
's390x' : ['s390','noarch'],
'sh3' : ['noarch'],
'sh4' : ['noarch'],
'sh4a' : ['sh4'],
'sparc' : ['noarch'],
'sparc64' : ['sparcv9'],
'sparc64v' : ['sparc64'],
'sparcv8' : ['sparc'],
'sparcv9' : ['sparcv8'],
'sparcv9v' : ['sparcv9'],
'sun4c' : ['sparc'],
'sun4d' : ['sparc'],
'sun4m' : ['sparc'],
'sun4u' : ['sparc64'],
'x86_64' : ['amd64','athlon','noarch'],
}
buildarchtranslate = {
'alphaev5' : ['alpha'],
'alphaev56' : ['alpha'],
'alphaev6' : ['alpha'],
'alphaev67' : ['alpha'],
'alphapca56' : ['alpha'],
'amd64' : ['x86_64'],
'armv3l' : ['armv3l'],
'armv4b' : ['armv4b'],
'armv4l' : ['armv4l'],
'armv4tl' : ['armv4tl'],
'armv5tejl' : ['armv5tejl'],
'armv5tel' : ['armv5tel'],
'armv6l' : ['armv6l'],
'armv7l' : ['armv7l'],
'atariclone' : ['m68kmint'],
'atarist' : ['m68kmint'],
'atariste' : ['m68kmint'],
'ataritt' : ['m68kmint'],
'athlon' : ['i386'],
'falcon' : ['m68kmint'],
'geode' : ['i386'],
'hades' : ['m68kmint'],
'i386' : ['i386'],
'i486' : ['i386'],
'i586' : ['i386'],
'i686' : ['i386'],
'ia32e' : ['x86_64'],
'ia64' : ['ia64'],
'milan' : ['m68kmint'],
'osfmach3_i386' : ['i386'],
'osfmach3_i486' : ['i386'],
'osfmach3_i586' : ['i386'],
'osfmach3_i686' : ['i386'],
'osfmach3_ppc' : ['ppc'],
'pentium3' : ['i386'],
'pentium4' : ['i386'],
'powerpc' : ['ppc'],
'powerppc' : ['ppc'],
'ppc32dy4' : ['ppc'],
'ppc64iseries' : ['ppc64'],
'ppc64pseries' : ['ppc64'],
'ppc8260' : ['ppc'],
'ppc8560' : ['ppc'],
'ppciseries' : ['ppc'],
'ppcpseries' : ['ppc'],
's390' : ['s390'],
's390x' : ['s390x'],
'sh3' : ['sh3'],
'sh4' : ['sh4'],
'sh4a' : ['sh4'],
'sparc64v' : ['sparc64'],
'sparcv8' : ['sparc'],
'sparcv9' : ['sparc'],
'sparcv9v' : ['sparc'],
'sun4c' : ['sparc'],
'sun4d' : ['sparc'],
'sun4m' : ['sparc'],
'sun4u' : ['sparc64'],
'x86_64' : ['x86_64'],
}
optflags = {
'alpha' : ['-O2','-g','-mieee'],
'alphaev5' : ['-O2','-g','-mieee','-mtune=ev5'],
'alphaev56' : ['-O2','-g','-mieee','-mtune=ev56'],
'alphaev6' : ['-O2','-g','-mieee','-mtune=ev6'],
'alphaev67' : ['-O2','-g','-mieee','-mtune=ev67'],
'alphapca56' : ['-O2','-g','-mieee','-mtune=pca56'],
'amd64' : ['-O2','-g'],
'armv3l' : ['-O2','-g','-march=armv3'],
'armv4b' : ['-O2','-g','-march=armv4'],
'armv4l' : ['-O2','-g','-march=armv4'],
'armv4tl' : ['-O2','-g','-march=armv4t'],
'armv5tejl' : ['-O2','-g','-march=armv5te'],
'armv5tel' : ['-O2','-g','-march=armv5te'],
'armv6l' : ['-O2','-g','-march=armv6'],
'armv7l' : ['-O2','-g','-march=armv7'],
'atariclone' : ['-O2','-g','-fomit-frame-pointer'],
'atarist' : ['-O2','-g','-fomit-frame-pointer'],
'atariste' : ['-O2','-g','-fomit-frame-pointer'],
'ataritt' : ['-O2','-g','-fomit-frame-pointer'],
'athlon' : ['-O2','-g','-march=athlon'],
'falcon' : ['-O2','-g','-fomit-frame-pointer'],
'fat' : ['-O2','-g','-arch','i386','-arch','ppc'],
'geode' : ['-Os','-g','-m32','-march=geode'],
'hades' : ['-O2','-g','-fomit-frame-pointer'],
'hppa1.0' : ['-O2','-g','-mpa-risc-1-0'],
'hppa1.1' : ['-O2','-g','-mpa-risc-1-0'],
'hppa1.2' : ['-O2','-g','-mpa-risc-1-0'],
'hppa2.0' : ['-O2','-g','-mpa-risc-1-0'],
'i386' : ['-O2','-g','-march=i386','-mtune=i686'],
'i486' : ['-O2','-g','-march=i486'],
'i586' : ['-O2','-g','-march=i586'],
'i686' : ['-O2','-g','-march=i686'],
'ia32e' : ['-O2','-g'],
'ia64' : ['-O2','-g'],
'm68k' : ['-O2','-g','-fomit-frame-pointer'],
'milan' : ['-O2','-g','-fomit-frame-pointer'],
'mips' : ['-O2','-g'],
'mipsel' : ['-O2','-g'],
'parisc' : ['-O2','-g','-mpa-risc-1-0'],
'pentium3' : ['-O2','-g','-march=pentium3'],
'pentium4' : ['-O2','-g','-march=pentium4'],
'ppc' : ['-O2','-g','-fsigned-char'],
'ppc32dy4' : ['-O2','-g','-fsigned-char'],
'ppc64' : ['-O2','-g','-fsigned-char'],
'ppc8260' : ['-O2','-g','-fsigned-char'],
'ppc8560' : ['-O2','-g','-fsigned-char'],
'ppciseries' : ['-O2','-g','-fsigned-char'],
'ppcpseries' : ['-O2','-g','-fsigned-char'],
's390' : ['-O2','-g'],
's390x' : ['-O2','-g'],
'sh3' : ['-O2','-g'],
'sh4' : ['-O2','-g','-mieee'],
'sh4a' : ['-O2','-g','-mieee'],
'sparc' : ['-O2','-g','-m32','-mtune=ultrasparc'],
'sparc64' : ['-O2','-g','-m64','-mtune=ultrasparc'],
'sparc64v' : ['-O2','-g','-m64','-mtune=niagara'],
'sparcv8' : ['-O2','-g','-m32','-mtune=ultrasparc','-mv8'],
'sparcv9' : ['-O2','-g','-m32','-mtune=ultrasparc'],
'sparcv9v' : ['-O2','-g','-m32','-mtune=niagara'],
'x86_64' : ['-O2','-g'],
}
arch_canon = {
'IP' : ['sgi','7'],
'alpha' : ['alpha','2'],
'alphaev5' : ['alphaev5','2'],
'alphaev56' : ['alphaev56','2'],
'alphaev6' : ['alphaev6','2'],
'alphaev67' : ['alphaev67','2'],
'alphapca56' : ['alphapca56','2'],
'amd64' : ['amd64','1'],
'armv3l' : ['armv3l','12'],
'armv4b' : ['armv4b','12'],
'armv4l' : ['armv4l','12'],
'armv5tejl' : ['armv5tejl','12'],
'armv5tel' : ['armv5tel','12'],
'armv6l' : ['armv6l','12'],
'armv7l' : ['armv7l','12'],
'atariclone' : ['m68kmint','13'],
'atarist' : ['m68kmint','13'],
'atariste' : ['m68kmint','13'],
'ataritt' : ['m68kmint','13'],
'athlon' : ['athlon','1'],
'falcon' : ['m68kmint','13'],
'geode' : ['geode','1'],
'hades' : ['m68kmint','13'],
'i370' : ['i370','14'],
'i386' : ['i386','1'],
'i486' : ['i486','1'],
'i586' : ['i586','1'],
'i686' : ['i686','1'],
'ia32e' : ['ia32e','1'],
'ia64' : ['ia64','9'],
'm68k' : ['m68k','6'],
'm68kmint' : ['m68kmint','13'],
'milan' : ['m68kmint','13'],
'mips' : ['mips','4'],
'mipsel' : ['mipsel','11'],
'pentium3' : ['pentium3','1'],
'pentium4' : ['pentium4','1'],
'ppc' : ['ppc','5'],
'ppc32dy4' : ['ppc32dy4','5'],
'ppc64' : ['ppc64','16'],
'ppc64iseries' : ['ppc64iseries','16'],
'ppc64pseries' : ['ppc64pseries','16'],
'ppc8260' : ['ppc8260','5'],
'ppc8560' : ['ppc8560','5'],
'ppciseries' : ['ppciseries','5'],
'ppcpseries' : ['ppcpseries','5'],
'rs6000' : ['rs6000','8'],
's390' : ['s390','14'],
's390x' : ['s390x','15'],
'sh' : ['sh','17'],
'sh3' : ['sh3','17'],
'sh4' : ['sh4','17'],
'sh4a' : ['sh4a','17'],
'sparc' : ['sparc','3'],
'sparc64' : ['sparc64','2'],
'sparc64v' : ['sparc64v','2'],
'sparcv8' : ['sparcv8','3'],
'sparcv9' : ['sparcv9','3'],
'sparcv9v' : ['sparcv9v','3'],
'sun4' : ['sparc','3'],
'sun4c' : ['sparc','3'],
'sun4d' : ['sparc','3'],
'sun4m' : ['sparc','3'],
'sun4u' : ['sparc64','2'],
'x86_64' : ['x86_64','1'],
'xtensa' : ['xtensa','18'],
}
# End of rpmrc dictionaries (Marker, don't change or remove!)
def defaultMachine(use_rpm_default=True):
""" Return the canonicalized machine name. """
if use_rpm_default:
try:
# This should be the most reliable way to get the default arch
rmachine = subprocess.check_output(['rpm', '--eval=%_target_cpu'], shell=False).rstrip()
except Exception as e:
# Something went wrong, try again by looking up platform.machine()
return defaultMachine(False)
else:
rmachine = platform.machine()
# Try to lookup the string in the canon table
if rmachine in arch_canon:
rmachine = arch_canon[rmachine][0]
return rmachine
def defaultSystem():
""" Return the canonicalized system name. """
rsystem = platform.system()
# Try to lookup the string in the canon tables
if rsystem in os_canon:
rsystem = os_canon[rsystem][0]
return rsystem
def defaultNames():
""" Return the canonicalized machine and system name. """
return defaultMachine(), defaultSystem()
def updateRpmDicts(rpmrc, pyfile):
""" Read the given rpmrc file with RPM definitions and update the
info dictionaries in the file pyfile with it.
The arguments will usually be 'rpmrc.in' from a recent RPM source
tree, and 'rpmutils.py' referring to this script itself.
See also usage() below.
"""
try:
# Read old rpmutils.py file
oldpy = open(pyfile,"r").readlines()
# Read current rpmrc.in file
rpm = open(rpmrc,"r").readlines()
# Parse for data
data = {}
# Allowed section names that get parsed
sections = ['optflags',
'arch_canon',
'os_canon',
'buildarchtranslate',
'arch_compat',
'os_compat',
'buildarch_compat']
for l in rpm:
l = l.rstrip('\n').replace(':',' ')
# Skip comments
if l.lstrip().startswith('#'):
continue
tokens = l.strip().split()
if len(tokens):
key = tokens[0]
if key in sections:
# Have we met this section before?
if not data.has_key(tokens[0]):
# No, so insert it
data[key] = {}
# Insert data
data[key][tokens[1]] = tokens[2:]
# Write new rpmutils.py file
out = open(pyfile,"w")
pm = 0
for l in oldpy:
if pm:
if l.startswith('# End of rpmrc dictionaries'):
pm = 0
out.write(l)
else:
out.write(l)
if l.startswith('# Start of rpmrc dictionaries'):
pm = 1
# Write data sections to single dictionaries
for key, entries in data.iteritems():
out.write("%s = {\n" % key)
for arch in sorted(entries.keys()):
out.write(" '%s' : ['%s'],\n" % (arch, "','".join(entries[arch])))
out.write("}\n\n")
out.close()
except:
pass
def usage():
print "rpmutils.py rpmrc.in rpmutils.py"
def main():
import sys
if len(sys.argv) < 3:
usage()
sys.exit(0)
updateRpmDicts(sys.argv[1], sys.argv[2])
if __name__ == "__main__":
main()
| gpl-3.0 |
Etxea/gestion_eide_web | grupos/migrations/0003_auto__add_field_alumno_activo.py | 1 | 2078 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Alumno.activo'
db.add_column(u'alumnos_alumno', 'activo',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Alumno.activo'
db.delete_column(u'alumnos_alumno', 'activo')
models = {
u'alumnos.alumno': {
'Meta': {'object_name': 'Alumno'},
'activo': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'apellido1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'apellido2': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cp': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5'}),
'cuenta_bancaria': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '25'}),
'dni': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '9', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'localidad': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '25'}),
'nombre': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '25'}),
'telefono1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '9'}),
'telefono2': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '9', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['alumnos'] | mit |
Dhivyap/ansible | lib/ansible/module_utils/network/eos/providers/module.py | 20 | 2106 | #
# (c) 2019, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.eos.providers import providers
from ansible.module_utils._text import to_text
class NetworkModule(AnsibleModule):
fail_on_missing_provider = True
def __init__(self, connection=None, *args, **kwargs):
super(NetworkModule, self).__init__(*args, **kwargs)
if connection is None:
connection = Connection(self._socket_path)
self.connection = connection
@property
def provider(self):
if not hasattr(self, '_provider'):
capabilities = self.from_json(self.connection.get_capabilities())
network_os = capabilities['device_info']['network_os']
network_api = capabilities['network_api']
if network_api == 'cliconf':
connection_type = 'network_cli'
cls = providers.get(network_os, self._name.split('.')[-1], connection_type)
if not cls:
msg = 'unable to find suitable provider for network os %s' % network_os
if self.fail_on_missing_provider:
self.fail_json(msg=msg)
else:
self.warn(msg)
obj = cls(self.params, self.connection, self.check_mode)
setattr(self, '_provider', obj)
return getattr(self, '_provider')
def get_facts(self, subset=None):
try:
self.provider.get_facts(subset)
except Exception as exc:
self.fail_json(msg=to_text(exc))
def edit_config(self, config_filter=None):
current_config = self.connection.get_config(flags=config_filter)
try:
commands = self.provider.edit_config(current_config)
changed = bool(commands)
return {'commands': commands, 'changed': changed}
except Exception as exc:
self.fail_json(msg=to_text(exc))
| gpl-3.0 |
ArneBachmann/configr | configr/test.py | 1 | 3339 | import doctest
import json
import logging
import os
import unittest
import sys
sys.path.insert(0, "..")
import configr
class Tests(unittest.TestCase):
''' Test suite. '''
def tests_metadata(_):
_.assertTrue(hasattr(configr, "version"))
_.assertTrue(hasattr(configr.version, "__version__"))
_.assertTrue(hasattr(configr.version, "__version_info__"))
def test_details(_):
try:
for file in (f for f in os.listdir() if f.endswith(configr.EXTENSION + ".bak")):
try: os.unlink(file)
except: pass
except: pass
c = configr.Configr("myapp", data = {"d": 2}, defaults = {"e": 1})
_.assertEqual("myapp", c.__name)
_.assertEqual("myapp", c["__name"])
try: c["c"]; raise Exception("Should have crashed") # not existing data via dictionary access case
except: pass
try: c.c; raise Exception("Should have crashed") # not existing data via attribute access case
except: pass
_.assertEqual(2, c.d) # pre-defined data case
_.assertEqual(1, c["e"]) # default case
# Create some contents
c.a = "a"
c["b"] = "b"
_.assertEqual("a", c["a"])
_.assertEqual("b", c.b)
# Save to file
value = c.saveSettings(location = os.getcwd(), keys = ["a", "b"], clientCodeLocation = __file__) # CWD should be "tests" folder
_.assertIsNotNone(value.path)
_.assertIsNone(value.error)
_.assertEqual(value, c.__savedTo)
_.assertEqual(os.getcwd(), os.path.dirname(c.__savedTo.path))
_.assertEqual("a", c["a"])
_.assertEqual("b", c.b)
name = c.__savedTo.path
with open(name, "r") as fd: contents = json.loads(fd.read())
_.assertEqual({"a": "a", "b": "b"}, contents)
# Now load and see if all is correct
c = configr.Configr("myapp")
value = c.loadSettings(location = os.getcwd(), data = {"c": 33}, clientCodeLocation = __file__)
_.assertEqual(name, c.__loadedFrom.path)
_.assertIsNotNone(value.path)
_.assertIsNone(value.error)
_.assertEqual(value, c.__loadedFrom)
_.assertEqual(c.a, "a")
_.assertEqual(c["b"], "b")
_.assertEqual(c.c, 33)
os.unlink(value.path)
value = c.loadSettings(location = "bla", clientCodeLocation = __file__) # provoke error
_.assertIsNone(value.path)
_.assertIsNotNone(value.error)
# Now test removal
del c["b"]
del c.a
_.assertEqual(1, len(c.keys()))
_.assertIn("c", c.keys())
# Now stringify
_.assertEqual("Configr(c: 33)", str(c))
_.assertEqual("Configr(c: 33)", repr(c))
# Testing map functions: already done in doctest
# TODO test ignores option for saveSettings
def testNested(_):
c = configr.Configr(data = {"a": "a"}, defaults = configr.Configr(data = {"b": "b"}, defaults = configr.Configr(data = {"c": "c"})))
_.assertEqual("a", c.a)
_.assertEqual("b", c["b"])
_.assertEqual("c", c.c)
_.assertTrue("a" in c)
_.assertTrue("b" in c)
_.assertTrue("c" in c)
_.assertFalse("d" in c)
def load_tests(loader, tests, ignore):
''' The function name suffix "_tests" tells the unittest module about a test case. '''
tests.addTests(doctest.DocTestSuite(configr))
return tests
if __name__ == "__main__":
logging.basicConfig(level = logging.DEBUG, stream = sys.stderr, format = "%(asctime)-25s %(levelname)-8s %(name)-12s | %(message)s")
print(unittest.main())
| mit |
fnugrahendi/petuk.corp | installer/pysource/source/installer.py | 1 | 6796 | import os,sys
from PyQt4 import QtCore
from PyQt4 import QtGui
import functools
import itertools
import re
from subprocess import Popen
import pythoncom #-- shortcut
from win32com.shell import shell, shellcon
from installer_ui import Ui_MainWindow
class MainGUI(QtGui.QMainWindow,Ui_MainWindow):
def __init__(self,parent=None):
super(MainGUI,self).__init__(parent)
self.setupUi(self)
self.show()
#-- path
self.Path = str(__file__).replace("installer.py","").replace("\\","/")
print self.Path
self.BasePath = self.Path+"../"
try:open(self.BasePath+"archive/eula.txt","r").close()
except Exception,e:
print str(e)
self.BasePath = self.Path
print ("base path is now",self.BasePath)
#-- icon
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(self.BasePath+"archive/Garvin.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.setWindowIcon(icon)
#-- deteksi 64 bit ataukah 32 bit
self.arc = 32
if ("PROGRAMFILES(X86)" in os.environ):#-- bila 64bit
self.arc = 64
self.PageL = ["INSTALL FOLDER","INSTALL BIN","QUIT"]
dataf = open(self.BasePath+"archive/eula.txt","r")
data = dataf.read()
dataf.close()
self.te_Lisensi.setText(data)
self.te_Lisensi.hide()
self.tb_Lisensi.clicked.connect(self.TampilLisensi)
self.InstallDir()
def TampilLisensi(self):
self.te_Lisensi.show()
def Goto(self,name):
self.stackedWidget.setCurrentIndex(self.PageL.index(name.upper()))
def InstallDir(self):
self.Goto("Install Folder")
self.GarvinDisconnect(self.tb_Browse.clicked)
self.GarvinDisconnect(self.tb_Install.clicked)
self.GarvinDisconnect(self.tb_Quit.clicked)
self.tb_Browse.clicked.connect(self.Browse)
self.tb_Install.clicked.connect(self.InstallBin)
self.tb_Quit.clicked.connect(self.Quit)
def Browse(self):
dialog = QtGui.QFileDialog(self)
dialog.setFileMode(QtGui.QFileDialog.Directory)
namafolder = str(dialog.getExistingDirectory(self, ("Pilih folder instalasi"),"",QtGui.QFileDialog.ShowDirsOnly| QtGui.QFileDialog.DontResolveSymlinks))
if not ("garvin" in namafolder.lower()):
if namafolder[-1]=="\\":
namafolder=namafolder[:-1]
namafolder=namafolder+"\Garvin"
self.le_InstallDir.setText(namafolder)
def InstallBin_Act(self):
self.aatime.stop()
archiveBin = self.BasePath+"archive/bin.grvz"
installpath = str(self.le_InstallDir.text())
if not os.path.exists(installpath): os.makedirs(installpath)
os.system(self.BasePath+"7z.exe -y x "+archiveBin+" -o"+installpath+" -pnyungsep")
#~ self.tb_InstallBin_Next.show()
self.InstallMysql()
os.makedirs(installpath+"\data")
def InstallBin(self):
if str(self.le_InstallDir.text())[-1]=='\\':
self.le_InstallDir.setText(str(self.le_InstallDir.text())[:-1]) #-- strip \ dibelakang
self.Goto("Install Bin")
self.lb_InstallBin_Judul.setText("Menginstall Garvin Accounting...")
self.tb_InstallBin_Next.hide()
self.aatime = QtCore.QTimer(self)
self.aatime.timeout.connect(self.InstallBin_Act)
self.aatime.start(100)
def InstallMysql_Act(self):
self.aatime.stop()
archiveBin = self.BasePath+"archive/mysql32.grvz"
if self.arc==64:
archiveBin = self.BasePath+"archive/mysql64.grvz"
installpath = str(self.le_InstallDir.text())
if not os.path.exists(installpath): os.makedirs(installpath)
os.system(self.BasePath+"7z.exe -y x "+archiveBin+" -o"+installpath+" -pnyungsep")
self.InstallConfig()
def InstallMysql(self):
self.Goto("Install Bin")
if self.arc==32:self.lb_InstallBin_Judul.setText("Menginstall MySQL database server (32 bit)...")
else:self.lb_InstallBin_Judul.setText("Menginstall MySQL database server (64 bit)...")
self.tb_InstallBin_Next.hide()
self.aatime = QtCore.QTimer(self)
self.aatime.timeout.connect(self.InstallMysql_Act)
self.aatime.start(100)
def InstallConfig_Act(self):
self.aatime.stop()
print "jalankan", str(self.le_InstallDir.text())+"\\mysql\\bin\\mysqld --port=44559"
self.childproses = Popen(str(self.le_InstallDir.text())+"\\mysql\\bin\\mysqld --port=44559")
self.aatime = QtCore.QTimer(self)
self.aatime.timeout.connect(self.InstallConfig_MysqlUser)
self.aatime.start(10000)
def InstallConfig_MysqlUser(self):
self.aatime.stop()
querytambahuser = """ CREATE USER 'gd_user_akunting'@'localhost' IDENTIFIED BY 'nyungsep';
GRANT ALL PRIVILEGES ON *.* TO 'gd_user_akunting'@'localhost' IDENTIFIED BY 'nyungsep' WITH GRANT OPTION MAX_QUERIES_PER_HOUR 0 MAX_CONNECTIONS_PER_HOUR 0 MAX_UPDATES_PER_HOUR 0 MAX_USER_CONNECTIONS 0;
"""
f = open("querytambahuser.md","w")
f.write(querytambahuser)
f.close()
print "jalankan",(str(self.le_InstallDir.text())+"\\mysql\\bin\\mysql --port=44559 -u root test < querytambahuser.md")
os.system(str(self.le_InstallDir.text())+"\\mysql\\bin\\mysql --port=44559 -u root test < querytambahuser.md")
self.Install_StartMenu()
def InstallConfig(self):
self.Goto("Install Bin")
self.lb_InstallBin_Judul.setText("Melakukan configurasi program...")
self.tb_InstallBin_Next.hide()
self.aatime = QtCore.QTimer(self)
self.aatime.timeout.connect(self.InstallConfig_Act)
self.aatime.start(100)
def Install_StartMenu(self):
#--- install start menu
self.Goto("Install Bin")
self.lb_InstallBin_Judul.setText("Memasang start menu...")
self.tb_InstallBin_Next.hide()
startmenudir = os.environ["PROGRAMDATA"]+"\\Microsoft\\Windows\\Start Menu\\Garvin Accounting"
installpath = str(self.le_InstallDir.text())
if not os.path.exists(startmenudir): os.makedirs(startmenudir)
startmenulink = startmenudir + "\\Garvin.lnk"
shortcut = pythoncom.CoCreateInstance (shell.CLSID_ShellLink, None, pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink)
shortcut.SetPath(installpath+"\\bin\\Garvin.exe")
shortcut.SetDescription ("Garvin Accounting")
shortcut.SetIconLocation(installpath+"\\bin\\Garvin.exe",0)
shortcut.SetWorkingDirectory(installpath+"\\bin\\")
persist_file = shortcut.QueryInterface (pythoncom.IID_IPersistFile)
persist_file.Save(startmenulink,0)
self.Selesai()
def Selesai(self):
#--- todo tambah source file info
self.lb_InstallBin_Judul.setText("Instalasi sukses")
self.tb_InstallBin_Next.show()
self.tb_InstallBin_Next.setText("Finish")
self.tb_InstallBin_Next.clicked.connect(self.Quit)
def Quit(self):
#-- bunuh subproses mysqld dulu
try:self.childproses.kill()
except:pass
sys.exit (0)
def GarvinDisconnect(self,stuff):
"nyimpel2ke disconnect signal, cara manggil koyo self.GarvinDisconnect(self.tbl_BukuBesar_DaftarTransaksiJurnal_Tambah_List.cellDoubleClicked)"
try:
stuff.disconnect()
return True
except:
return False
if __name__=="__main__":
app = QtGui.QApplication(sys.argv)
w = MainGUI()
sys.exit(app.exec_())
| gpl-2.0 |
cristianquaglio/odoo | addons/hr_attendance/report/attendance_errors.py | 377 | 3669 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import time
from openerp.osv import osv
from openerp.report import report_sxw
class attendance_print(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(attendance_print, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'lst': self._lst,
'total': self._lst_total,
'get_employees':self._get_employees,
})
def _get_employees(self, emp_ids):
emp_obj_list = self.pool.get('hr.employee').browse(self.cr, self.uid, emp_ids)
return emp_obj_list
def _lst(self, employee_id, dt_from, dt_to, max, *args):
self.cr.execute("select name as date, create_date, action, create_date-name as delay from hr_attendance where employee_id=%s and to_char(name,'YYYY-mm-dd')<=%s and to_char(name,'YYYY-mm-dd')>=%s and action IN (%s,%s) order by name", (employee_id, dt_to, dt_from, 'sign_in', 'sign_out'))
res = self.cr.dictfetchall()
for r in res:
if r['action'] == 'sign_out':
r['delay'] = -r['delay']
temp = r['delay'].seconds
r['delay'] = str(r['delay']).split('.')[0]
if abs(temp) < max*60:
r['delay2'] = r['delay']
else:
r['delay2'] = '/'
return res
def _lst_total(self, employee_id, dt_from, dt_to, max, *args):
self.cr.execute("select name as date, create_date, action, create_date-name as delay from hr_attendance where employee_id=%s and to_char(name,'YYYY-mm-dd')<=%s and to_char(name,'YYYY-mm-dd')>=%s and action IN (%s,%s) order by name", (employee_id, dt_to, dt_from, 'sign_in', 'sign_out'))
res = self.cr.dictfetchall()
if not res:
return ('/','/')
total2 = datetime.timedelta(seconds = 0, minutes = 0, hours = 0)
total = datetime.timedelta(seconds = 0, minutes = 0, hours = 0)
for r in res:
if r['action'] == 'sign_out':
r['delay'] = -r['delay']
total += r['delay']
if abs(r['delay'].seconds) < max*60:
total2 += r['delay']
result_dict = {
'total': total and str(total).split('.')[0],
'total2': total2 and str(total2).split('.')[0]
}
return [result_dict]
class report_hr_attendanceerrors(osv.AbstractModel):
_name = 'report.hr_attendance.report_attendanceerrors'
_inherit = 'report.abstract_report'
_template = 'hr_attendance.report_attendanceerrors'
_wrapped_report_class = attendance_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| apache-2.0 |
enigmampc/catalyst | catalyst/support/issue_227.py | 1 | 1521 | import pytz
from datetime import datetime
from catalyst.api import symbol
from catalyst.utils.run_algo import run_algorithm
coin = 'btc'
quote_currency = 'usd'
n_candles = 5
def initialize(context):
context.symbol = symbol('%s_%s' % (coin, quote_currency))
def handle_data_polo_partial_candles(context, data):
history = data.history(symbol('btc_usdt'), ['volume'],
bar_count=10,
frequency='4H')
print('\nnow: %s\n%s' % (data.current_dt, history))
if not hasattr(context, 'i'):
context.i = 0
context.i += 1
if context.i > 5:
raise Exception('stop')
live = False
if live:
run_algorithm(initialize=lambda ctx: True,
handle_data=handle_data_polo_partial_candles,
exchange_name='poloniex',
quote_currency='usdt',
algo_namespace='ns',
live=True,
data_frequency='minute',
capital_base=3000)
else:
run_algorithm(initialize=lambda ctx: True,
handle_data=handle_data_polo_partial_candles,
exchange_name='poloniex',
quote_currency='usdt',
algo_namespace='ns',
live=False,
data_frequency='minute',
capital_base=3000,
start=datetime(2018, 2, 2, 0, 0, 0, 0, pytz.utc),
end=datetime(2018, 2, 20, 0, 0, 0, 0, pytz.utc)
)
| apache-2.0 |
ArchiDroid/ArchiKernel | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
atmark-techno/atmark-dist | user/python/Lib/lib-tk/turtle.py | 4 | 10917 | # LogoMation-like turtle graphics
from math import * # Also for export
import Tkinter
class Error(Exception):
pass
class RawPen:
def __init__(self, canvas):
self._canvas = canvas
self._items = []
self._tracing = 1
self.degrees()
self.reset()
def degrees(self, fullcircle=360.0):
self._fullcircle = fullcircle
self._invradian = pi / (fullcircle * 0.5)
def radians(self):
self.degrees(2.0*pi)
def reset(self):
canvas = self._canvas
width = canvas.winfo_width()
height = canvas.winfo_height()
if width <= 1:
width = canvas['width']
if height <= 1:
height = canvas['height']
self._origin = float(width)/2.0, float(height)/2.0
self._position = self._origin
self._angle = 0.0
self._drawing = 1
self._width = 1
self._color = "black"
self._filling = 0
self._path = []
self._tofill = []
self.clear()
canvas._root().tkraise()
def clear(self):
self.fill(0)
canvas = self._canvas
items = self._items
self._items = []
for item in items:
canvas.delete(item)
def tracer(self, flag):
self._tracing = flag
def forward(self, distance):
x0, y0 = start = self._position
x1 = x0 + distance * cos(self._angle*self._invradian)
y1 = y0 - distance * sin(self._angle*self._invradian)
self._goto(x1, y1)
def backward(self, distance):
self.forward(-distance)
def left(self, angle):
self._angle = (self._angle + angle) % self._fullcircle
def right(self, angle):
self.left(-angle)
def up(self):
self._drawing = 0
def down(self):
self._drawing = 1
def width(self, width):
self._width = float(width)
def color(self, *args):
if not args:
raise Error, "no color arguments"
if len(args) == 1:
color = args[0]
if type(color) == type(""):
# Test the color first
try:
id = self._canvas.create_line(0, 0, 0, 0, fill=color)
except Tkinter.TclError:
raise Error, "bad color string: %s" % `color`
self._color = color
return
try:
r, g, b = color
except:
raise Error, "bad color sequence: %s" % `color`
else:
try:
r, g, b = args
except:
raise Error, "bad color arguments: %s" % `args`
assert 0 <= r <= 1
assert 0 <= g <= 1
assert 0 <= b <= 1
x = 255.0
y = 0.5
self._color = "#%02x%02x%02x" % (int(r*x+y), int(g*x+y), int(b*x+y))
def write(self, arg, move=0):
x, y = start = self._position
x = x-1 # correction -- calibrated for Windows
item = self._canvas.create_text(x, y,
text=str(arg), anchor="sw",
fill=self._color)
self._items.append(item)
if move:
x0, y0, x1, y1 = self._canvas.bbox(item)
self._goto(x1, y1)
def fill(self, flag):
if self._filling:
path = tuple(self._path)
smooth = self._filling < 0
if len(path) > 2:
item = self._canvas._create('polygon', path,
{'fill': self._color,
'smooth': smooth})
self._items.append(item)
self._canvas.lower(item)
if self._tofill:
for item in self._tofill:
self._canvas.itemconfigure(item, fill=self._color)
self._items.append(item)
self._path = []
self._tofill = []
self._filling = flag
if flag:
self._path.append(self._position)
def circle(self, radius, extent=None):
if extent is None:
extent = self._fullcircle
x0, y0 = self._position
xc = x0 - radius * sin(self._angle * self._invradian)
yc = y0 - radius * cos(self._angle * self._invradian)
if radius >= 0.0:
start = self._angle - 90.0
else:
start = self._angle + 90.0
extent = -extent
if self._filling:
if abs(extent) >= self._fullcircle:
item = self._canvas.create_oval(xc-radius, yc-radius,
xc+radius, yc+radius,
width=self._width,
outline="")
self._tofill.append(item)
item = self._canvas.create_arc(xc-radius, yc-radius,
xc+radius, yc+radius,
style="chord",
start=start,
extent=extent,
width=self._width,
outline="")
self._tofill.append(item)
if self._drawing:
if abs(extent) >= self._fullcircle:
item = self._canvas.create_oval(xc-radius, yc-radius,
xc+radius, yc+radius,
width=self._width,
outline=self._color)
self._items.append(item)
item = self._canvas.create_arc(xc-radius, yc-radius,
xc+radius, yc+radius,
style="arc",
start=start,
extent=extent,
width=self._width,
outline=self._color)
self._items.append(item)
angle = start + extent
x1 = xc + abs(radius) * cos(angle * self._invradian)
y1 = yc - abs(radius) * sin(angle * self._invradian)
self._angle = (self._angle + extent) % self._fullcircle
self._position = x1, y1
if self._filling:
self._path.append(self._position)
def goto(self, *args):
if len(args) == 1:
try:
x, y = args[0]
except:
raise Error, "bad point argument: %s" % `args[0]`
else:
try:
x, y = args
except:
raise Error, "bad coordinates: %s" % `args[0]`
x0, y0 = self._origin
self._goto(x0+x, y0-y)
def _goto(self, x1, y1):
x0, y0 = start = self._position
self._position = map(float, (x1, y1))
if self._filling:
self._path.append(self._position)
if self._drawing:
if self._tracing:
dx = float(x1 - x0)
dy = float(y1 - y0)
distance = hypot(dx, dy)
nhops = int(distance)
item = self._canvas.create_line(x0, y0, x0, y0,
width=self._width,
arrow="last",
capstyle="round",
fill=self._color)
try:
for i in range(1, 1+nhops):
x, y = x0 + dx*i/nhops, y0 + dy*i/nhops
self._canvas.coords(item, x0, y0, x, y)
self._canvas.update()
self._canvas.after(10)
self._canvas.itemconfigure(item, arrow="none")
except Tkinter.TclError:
# Probably the window was closed!
return
else:
item = self._canvas.create_line(x0, y0, x1, y1,
width=self._width,
capstyle="round",
fill=self._color)
self._items.append(item)
_root = None
_canvas = None
_pen = None
class Pen(RawPen):
def __init__(self):
global _root, _canvas
if _root is None:
_root = Tkinter.Tk()
_root.wm_protocol("WM_DELETE_WINDOW", self._destroy)
if _canvas is None:
# XXX Should have scroll bars
_canvas = Tkinter.Canvas(_root, background="white")
_canvas.pack(expand=1, fill="both")
RawPen.__init__(self, _canvas)
def _destroy(self):
global _root, _canvas, _pen
root = self._canvas._root()
if root is _root:
_pen = None
_root = None
_canvas = None
root.destroy()
def _getpen():
global _pen
pen = _pen
if not pen:
_pen = pen = Pen()
return pen
def degrees(): _getpen().degrees()
def radians(): _getpen().radians()
def reset(): _getpen().reset()
def clear(): _getpen().clear()
def tracer(flag): _getpen().tracer(flag)
def forward(distance): _getpen().forward(distance)
def backward(distance): _getpen().backward(distance)
def left(angle): _getpen().left(angle)
def right(angle): _getpen().right(angle)
def up(): _getpen().up()
def down(): _getpen().down()
def width(width): _getpen().width(width)
def color(*args): apply(_getpen().color, args)
def write(arg, move=0): _getpen().write(arg, move)
def fill(flag): _getpen().fill(flag)
def circle(radius, extent=None): _getpen().circle(radius, extent)
def goto(*args): apply(_getpen().goto, args)
def demo():
reset()
tracer(1)
up()
backward(100)
down()
# draw 3 squares; the last filled
width(3)
for i in range(3):
if i == 2:
fill(1)
for j in range(4):
forward(20)
left(90)
if i == 2:
color("maroon")
fill(0)
up()
forward(30)
down()
width(1)
color("black")
# move out of the way
tracer(0)
up()
right(90)
forward(100)
right(90)
forward(100)
right(180)
down()
# some text
write("startstart", 1)
write("start", 1)
color("red")
# staircase
for i in range(5):
forward(20)
left(90)
forward(20)
right(90)
# filled staircase
fill(1)
for i in range(5):
forward(20)
left(90)
forward(20)
right(90)
fill(0)
# more text
write("end")
if __name__ == '__main__':
_root.mainloop()
if __name__ == '__main__':
demo()
| gpl-2.0 |
jbobron/node-workshop | challenge6/start/node_modules/browserify/node_modules/syntax-error/node_modules/esprima-six/tools/generate-unicode-regex.py | 341 | 5096 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# By Yusuke Suzuki <[email protected]>
# Modified by Mathias Bynens <http://mathiasbynens.be/>
# http://code.google.com/p/esprima/issues/detail?id=110
import sys
import string
import re
class RegExpGenerator(object):
def __init__(self, detector):
self.detector = detector
def generate_identifier_start(self):
r = [ ch for ch in range(0xFFFF + 1) if self.detector.is_identifier_start(ch)]
return self._generate_range(r)
def generate_identifier_part(self):
r = [ ch for ch in range(0xFFFF + 1) if self.detector.is_identifier_part(ch)]
return self._generate_range(r)
def generate_non_ascii_identifier_start(self):
r = [ ch for ch in xrange(0x0080, 0xFFFF + 1) if self.detector.is_identifier_start(ch)]
return self._generate_range(r)
def generate_non_ascii_identifier_part(self):
r = [ ch for ch in range(0x0080, 0xFFFF + 1) if self.detector.is_identifier_part(ch)]
return self._generate_range(r)
def generate_non_ascii_separator_space(self):
r = [ ch for ch in range(0x0080, 0xFFFF + 1) if self.detector.is_separator_space(ch)]
return self._generate_range(r)
def _generate_range(self, r):
if len(r) == 0:
return '[]'
buf = []
start = r[0]
end = r[0]
predict = start + 1
r = r[1:]
for code in r:
if predict == code:
end = code
predict = code + 1
continue
else:
if start == end:
buf.append("\\u%04X" % start)
elif end == start + 1:
buf.append("\\u%04X\\u%04X" % (start, end))
else:
buf.append("\\u%04X-\\u%04X" % (start, end))
start = code
end = code
predict = code + 1
if start == end:
buf.append("\\u%04X" % start)
else:
buf.append("\\u%04X-\\u%04X" % (start, end))
return '[' + ''.join(buf) + ']'
class Detector(object):
def __init__(self, data):
self.data = data
def is_ascii(self, ch):
return ch < 0x80
def is_ascii_alpha(self, ch):
v = ch | 0x20
return v >= ord('a') and v <= ord('z')
def is_decimal_digit(self, ch):
return ch >= ord('0') and ch <= ord('9')
def is_octal_digit(self, ch):
return ch >= ord('0') and ch <= ord('7')
def is_hex_digit(self, ch):
v = ch | 0x20
return self.is_decimal_digit(c) or (v >= ord('a') and v <= ord('f'))
def is_digit(self, ch):
return self.is_decimal_digit(ch) or self.data[ch] == 'Nd'
def is_ascii_alphanumeric(self, ch):
return self.is_decimal_digit(ch) or self.is_ascii_alpha(ch)
def _is_non_ascii_identifier_start(self, ch):
c = self.data[ch]
return c == 'Lu' or c == 'Ll' or c == 'Lt' or c == 'Lm' or c == 'Lo' or c == 'Nl'
def _is_non_ascii_identifier_part(self, ch):
c = self.data[ch]
return c == 'Lu' or c == 'Ll' or c == 'Lt' or c == 'Lm' or c == 'Lo' or c == 'Nl' or c == 'Mn' or c == 'Mc' or c == 'Nd' or c == 'Pc' or ch == 0x200C or ch == 0x200D
def is_separator_space(self, ch):
return self.data[ch] == 'Zs'
def is_white_space(self, ch):
return ch == ord(' ') or ch == ord("\t") or ch == 0xB or ch == 0xC or ch == 0x00A0 or ch == 0xFEFF or self.is_separator_space(ch)
def is_line_terminator(self, ch):
return ch == 0x000D or ch == 0x000A or self.is_line_or_paragraph_terminator(ch)
def is_line_or_paragraph_terminator(self, ch):
return ch == 0x2028 or ch == 0x2029
def is_identifier_start(self, ch):
if self.is_ascii(ch):
return ch == ord('$') or ch == ord('_') or ch == ord('\\') or self.is_ascii_alpha(ch)
return self._is_non_ascii_identifier_start(ch)
def is_identifier_part(self, ch):
if self.is_ascii(ch):
return ch == ord('$') or ch == ord('_') or ch == ord('\\') or self.is_ascii_alphanumeric(ch)
return self._is_non_ascii_identifier_part(ch)
def analyze(source):
data = []
dictionary = {}
with open(source) as uni:
flag = False
first = 0
for line in uni:
d = string.split(line.strip(), ";")
val = int(d[0], 16)
if flag:
if re.compile("<.+, Last>").match(d[1]):
# print "%s : u%X" % (d[1], val)
flag = False
for t in range(first, val+1):
dictionary[t] = str(d[2])
else:
raise "Database Exception"
else:
if re.compile("<.+, First>").match(d[1]):
# print "%s : u%X" % (d[1], val)
flag = True
first = val
else:
dictionary[val] = str(d[2])
for i in range(0xFFFF + 1):
if dictionary.get(i) == None:
data.append("Un")
else:
data.append(dictionary[i])
return RegExpGenerator(Detector(data))
def main(source):
generator = analyze(source)
print generator.generate_non_ascii_identifier_start()
print generator.generate_non_ascii_identifier_part()
print generator.generate_non_ascii_separator_space()
if __name__ == '__main__':
main(sys.argv[1])
| mit |
bfurtaw/thrust | performance/report.py | 12 | 1519 | from build import plot_results, print_results
#valid formats are png, pdf, ps, eps and svg
#if format=None the plot will be displayed
format = 'png'
#output = print_results
output = plot_results
for function in ['fill', 'reduce', 'inner_product', 'gather', 'merge']:
output(function + '.xml', 'InputType', 'InputSize', 'Bandwidth', format=format)
for function in ['inclusive_scan', 'inclusive_segmented_scan', 'unique']:
output(function + '.xml', 'InputType', 'InputSize', 'Throughput', format=format)
for method in ['indirect_sort']:
output(method + '.xml', 'Sort', 'VectorLength', 'Time', plot='semilogx', title='Indirect Sorting', format=format)
for method in ['sort', 'merge_sort', 'radix_sort']:
output(method + '.xml', 'KeyType', 'InputSize', 'Sorting', title='thrust::' + method, format=format)
output(method + '_by_key.xml', 'KeyType', 'InputSize', 'Sorting', title='thrust::' + method + '_by_key', format=format)
output('stl_sort.xml', 'KeyType', 'InputSize', 'Sorting', title='std::sort', format=format)
for method in ['radix_sort']:
output(method + '_bits.xml', 'KeyType', 'KeyBits', 'Sorting', title='thrust::' + method, plot='plot', dpi=72, format=format)
for format in ['png', 'pdf']:
output('reduce_float.xml', 'InputType', 'InputSize', 'Bandwidth', dpi=120, plot='semilogx', title='thrust::reduce<float>()', format=format)
output('sort_large.xml', 'KeyType', 'InputSize', 'Sorting', dpi=120, plot='semilogx', title='thrust::sort<T>()', format=format)
| apache-2.0 |
IT-Department-Projects/OOAD-Project | Flask_App/oakcrest/lib/python2.7/site-packages/requests/auth.py | 68 | 9541 | # -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
import threading
import warnings
from base64 import b64encode
from .compat import urlparse, str, basestring
from .cookies import extract_cookies_to_jar
from ._internal_utils import to_native_string
from .utils import parse_dict_header
from .status_codes import codes
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
# "I want us to put a big-ol' comment on top of it that
# says that this behaviour is dumb but we need to preserve
# it because people are relying on it."
# - Lukasa
#
# These are here solely to maintain backwards compatibility
# for things like ints. This will be removed in 3.0.0.
if not isinstance(username, basestring):
warnings.warn(
"Non-string usernames will no longer be supported in Requests "
"3.0.0. Please convert the object you've passed in ({0!r}) to "
"a string or bytes object in the near future to avoid "
"problems.".format(username),
category=DeprecationWarning,
)
username = str(username)
if not isinstance(password, basestring):
warnings.warn(
"Non-string passwords will no longer be supported in Requests "
"3.0.0. Please convert the object you've passed in ({0!r}) to "
"a string or bytes object in the near future to avoid "
"problems.".format(password),
category=DeprecationWarning,
)
password = str(password)
# -- End Removal --
if isinstance(username, str):
username = username.encode('latin1')
if isinstance(password, str):
password = password.encode('latin1')
authstr = 'Basic ' + to_native_string(
b64encode(b':'.join((username, password))).strip()
)
return authstr
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __eq__(self, other):
return all([
self.username == getattr(other, 'username', None),
self.password == getattr(other, 'password', None)
])
def __ne__(self, other):
return not self == other
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
# Keep state in per-thread local storage
self._thread_local = threading.local()
def init_per_thread_state(self):
# Ensure state is initialized just once per-thread
if not hasattr(self._thread_local, 'init'):
self._thread_local.init = True
self._thread_local.last_nonce = ''
self._thread_local.nonce_count = 0
self._thread_local.chal = {}
self._thread_local.pos = None
self._thread_local.num_401_calls = None
def build_digest_header(self, method, url):
"""
:rtype: str
"""
realm = self._thread_local.chal['realm']
nonce = self._thread_local.chal['nonce']
qop = self._thread_local.chal.get('qop')
algorithm = self._thread_local.chal.get('algorithm')
opaque = self._thread_local.chal.get('opaque')
hash_utf8 = None
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
#: path is request-uri defined in RFC 2616 which should not be empty
path = p_parsed.path or "/"
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self._thread_local.last_nonce:
self._thread_local.nonce_count += 1
else:
self._thread_local.nonce_count = 1
ncvalue = '%08x' % self._thread_local.nonce_count
s = str(self._thread_local.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if not qop:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
noncebit = "%s:%s:%s:%s:%s" % (
nonce, ncvalue, cnonce, 'auth', HA2
)
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self._thread_local.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
self._thread_local.num_401_calls = 1
def handle_401(self, r, **kwargs):
"""
Takes the given response and tries digest-auth, if needed.
:rtype: requests.Response
"""
if self._thread_local.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self._thread_local.pos)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2:
self._thread_local.num_401_calls += 1
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.close()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
self._thread_local.num_401_calls = 1
return r
def __call__(self, r):
# Initialize per-thread state, if needed
self.init_per_thread_state()
# If we have a saved nonce, skip the 401
if self._thread_local.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self._thread_local.pos = r.body.tell()
except AttributeError:
# In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self._thread_local.pos = None
r.register_hook('response', self.handle_401)
r.register_hook('response', self.handle_redirect)
self._thread_local.num_401_calls = 1
return r
def __eq__(self, other):
return all([
self.username == getattr(other, 'username', None),
self.password == getattr(other, 'password', None)
])
def __ne__(self, other):
return not self == other
| mit |
obeattie/sqlalchemy | lib/sqlalchemy/sql/functions.py | 19 | 3067 | from sqlalchemy import types as sqltypes
from sqlalchemy.sql.expression import (
ClauseList, Function, _literal_as_binds, text, _type_from_args
)
from sqlalchemy.sql import operators
from sqlalchemy.sql.visitors import VisitableType
class _GenericMeta(VisitableType):
def __call__(self, *args, **kwargs):
args = [_literal_as_binds(c) for c in args]
return type.__call__(self, *args, **kwargs)
class GenericFunction(Function):
__metaclass__ = _GenericMeta
def __init__(self, type_=None, args=(), **kwargs):
self.packagenames = []
self.name = self.__class__.__name__
self._bind = kwargs.get('bind', None)
self.clause_expr = ClauseList(
operator=operators.comma_op,
group_contents=True, *args).self_group()
self.type = sqltypes.to_instance(
type_ or getattr(self, '__return_type__', None))
class AnsiFunction(GenericFunction):
def __init__(self, **kwargs):
GenericFunction.__init__(self, **kwargs)
class ReturnTypeFromArgs(GenericFunction):
"""Define a function whose return type is the same as its arguments."""
def __init__(self, *args, **kwargs):
kwargs.setdefault('type_', _type_from_args(args))
GenericFunction.__init__(self, args=args, **kwargs)
class coalesce(ReturnTypeFromArgs):
pass
class max(ReturnTypeFromArgs):
pass
class min(ReturnTypeFromArgs):
pass
class sum(ReturnTypeFromArgs):
pass
class now(GenericFunction):
__return_type__ = sqltypes.DateTime
class concat(GenericFunction):
__return_type__ = sqltypes.String
def __init__(self, *args, **kwargs):
GenericFunction.__init__(self, args=args, **kwargs)
class char_length(GenericFunction):
__return_type__ = sqltypes.Integer
def __init__(self, arg, **kwargs):
GenericFunction.__init__(self, args=[arg], **kwargs)
class random(GenericFunction):
def __init__(self, *args, **kwargs):
kwargs.setdefault('type_', None)
GenericFunction.__init__(self, args=args, **kwargs)
class count(GenericFunction):
"""The ANSI COUNT aggregate function. With no arguments, emits COUNT \*."""
__return_type__ = sqltypes.Integer
def __init__(self, expression=None, **kwargs):
if expression is None:
expression = text('*')
GenericFunction.__init__(self, args=(expression,), **kwargs)
class current_date(AnsiFunction):
__return_type__ = sqltypes.Date
class current_time(AnsiFunction):
__return_type__ = sqltypes.Time
class current_timestamp(AnsiFunction):
__return_type__ = sqltypes.DateTime
class current_user(AnsiFunction):
__return_type__ = sqltypes.String
class localtime(AnsiFunction):
__return_type__ = sqltypes.DateTime
class localtimestamp(AnsiFunction):
__return_type__ = sqltypes.DateTime
class session_user(AnsiFunction):
__return_type__ = sqltypes.String
class sysdate(AnsiFunction):
__return_type__ = sqltypes.DateTime
class user(AnsiFunction):
__return_type__ = sqltypes.String
| mit |
calebfoss/tensorflow | tensorflow/contrib/metrics/python/metrics/classification.py | 23 | 2583 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classification metrics library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# TODO(nsilberman): move into metrics/python/ops/
def accuracy(predictions, labels, weights=None):
"""Computes the percentage of times that predictions matches labels.
Args:
predictions: the predicted values, a `Tensor` whose dtype and shape
matches 'labels'.
labels: the ground truth values, a `Tensor` of any shape and
bool, integer, or string dtype.
weights: None or `Tensor` of float values to reweight the accuracy.
Returns:
Accuracy `Tensor`.
Raises:
ValueError: if dtypes don't match or
if dtype is not bool, integer, or string.
"""
if not (labels.dtype.is_integer or
labels.dtype in (dtypes.bool, dtypes.string)):
raise ValueError(
'Labels should have bool, integer, or string dtype, not %r' %
labels.dtype)
if not labels.dtype.is_compatible_with(predictions.dtype):
raise ValueError('Dtypes of predictions and labels should match. '
'Given: predictions (%r) and labels (%r)' %
(predictions.dtype, labels.dtype))
with ops.name_scope('accuracy', values=[predictions, labels]):
is_correct = math_ops.cast(
math_ops.equal(predictions, labels), dtypes.float32)
if weights is not None:
is_correct = math_ops.multiply(is_correct, weights)
num_values = math_ops.multiply(weights, array_ops.ones_like(is_correct))
return math_ops.div(math_ops.reduce_sum(is_correct),
math_ops.reduce_sum(num_values))
return math_ops.reduce_mean(is_correct)
| apache-2.0 |
elhuhdron/knossos | python/user/custom_graphics_view.py | 2 | 2659 | #!/home/amos/anaconda/bin/python
from PythonQt.Qt import *
from PythonQt.QtGui import *
class CGScene(QGraphicsScene):
def __init__(self, parent = None):
super(CGScene, self).__init__(self)
class Texture(QGraphicsItem):
def __init__(self, pos, tex):
super(Texture, self).__init__(self)
self.tex = tex
self.setPos(pos)
def paint(self, painter, item, widget):
painter.drawPixmap(self.x(), self.y(), self.tex)
def mousePressEvent(self, event):
QGraphicsItem.mousePressEvent(self.event)
QGraphicsItem.update()
class CGView(QGraphicsView):
def __init__(self, scene):
super(CGView, self).__init__(scene)
def wheelEvent(self, event):
if event.delta() > 0:
self.zoom(1.2)
else:
self.zoom(1 / 1.2)
def zoom(self, factor):
scaling = self.transform().scale(factor, factor).mapRect(QRectF(0, 0, 1, 1)).width()
if scaling < 0.07 or scaling > 100:
return
self.scale(factor, factor)
scene = CGScene()
view = CGView(scene)
view.setInteractive(True)
view.setBackgroundBrush(QBrush(QColor(0, 0, 0), QPixmap("python/user/images/ni.jpg")))
view.setSceneRect(-768, -512, 2000, 2000)
view.setOptimizationFlags(QGraphicsView.DontClipPainter | QGraphicsView.DontAdjustForAntialiasing);
view.setDragMode(QGraphicsView.ScrollHandDrag)
view.setCacheMode(QGraphicsView.CacheBackground);
view.setViewportUpdateMode(QGraphicsView.FullViewportUpdate);
view.setTransformationAnchor(QGraphicsView.AnchorUnderMouse);
view.setRenderHints(QPainter.SmoothPixmapTransform)
widget = QWidget()
widget.setWindowTitle("CGExample")
widget.setGeometry(100, 100, 800, 800)
splitter = QSplitter(widget)
toolbox = QToolBox()
splitter.addWidget(toolbox)
splitter.addWidget(view)
tex = Texture(QPointF(0, 0), QPixmap("python/user/images/e1088_xy.png"))
tex.setFlags(QGraphicsItem.ItemIsMovable | QGraphicsItem.ItemSendsScenePositionChanges)
tex.setAcceptedMouseButtons(Qt.LeftButton | Qt.RightButton);
tex.setAcceptHoverEvents(True);
scene.addItem(tex)
label = QLabel()
label.setPixmap(QPixmap(":/images/splash.png"))
proxy = scene.addWidget(label)
proxy.setScale(0.5)
group_box = QGroupBox()
layout = QHBoxLayout(group_box)
group_proxy = scene.addWidget(group_box)
group_proxy.setPos(0, 0)
rect = QRectF(-64, -64, 64, 128)
pen = QColor(1, 1, 1)
brush = QBrush(QColor(0, 0, 0))
widget.show()
"""
class Watcher(QtCore.QRunnable):
def __init__(self):
super(Watcher, self).__init__()
print "init"
def run(self):
pass
#view.rotate(5)
watcher = Watcher()
timer = QTimer()
timer.setInterval(100)
timer.timeout.connect(watcher.run)
timer.start(100)
pool = QThreadPool()
"""
| gpl-2.0 |
franky88/emperioanimesta | env/Lib/site-packages/pip/utils/outdated.py | 513 | 5455 | from __future__ import absolute_import
import datetime
import json
import logging
import os.path
import sys
from pip._vendor import lockfile
from pip._vendor.packaging import version as packaging_version
from pip.compat import total_seconds, WINDOWS
from pip.models import PyPI
from pip.locations import USER_CACHE_DIR, running_under_virtualenv
from pip.utils import ensure_dir, get_installed_version
from pip.utils.filesystem import check_path_owner
SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
logger = logging.getLogger(__name__)
class VirtualenvSelfCheckState(object):
def __init__(self):
self.statefile_path = os.path.join(sys.prefix, "pip-selfcheck.json")
# Load the existing state
try:
with open(self.statefile_path) as statefile:
self.state = json.load(statefile)
except (IOError, ValueError):
self.state = {}
def save(self, pypi_version, current_time):
# Attempt to write out our version check file
with open(self.statefile_path, "w") as statefile:
json.dump(
{
"last_check": current_time.strftime(SELFCHECK_DATE_FMT),
"pypi_version": pypi_version,
},
statefile,
sort_keys=True,
separators=(",", ":")
)
class GlobalSelfCheckState(object):
def __init__(self):
self.statefile_path = os.path.join(USER_CACHE_DIR, "selfcheck.json")
# Load the existing state
try:
with open(self.statefile_path) as statefile:
self.state = json.load(statefile)[sys.prefix]
except (IOError, ValueError, KeyError):
self.state = {}
def save(self, pypi_version, current_time):
# Check to make sure that we own the directory
if not check_path_owner(os.path.dirname(self.statefile_path)):
return
# Now that we've ensured the directory is owned by this user, we'll go
# ahead and make sure that all our directories are created.
ensure_dir(os.path.dirname(self.statefile_path))
# Attempt to write out our version check file
with lockfile.LockFile(self.statefile_path):
if os.path.exists(self.statefile_path):
with open(self.statefile_path) as statefile:
state = json.load(statefile)
else:
state = {}
state[sys.prefix] = {
"last_check": current_time.strftime(SELFCHECK_DATE_FMT),
"pypi_version": pypi_version,
}
with open(self.statefile_path, "w") as statefile:
json.dump(state, statefile, sort_keys=True,
separators=(",", ":"))
def load_selfcheck_statefile():
if running_under_virtualenv():
return VirtualenvSelfCheckState()
else:
return GlobalSelfCheckState()
def pip_version_check(session):
"""Check for an update for pip.
Limit the frequency of checks to once per week. State is stored either in
the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix
of the pip script path.
"""
installed_version = get_installed_version("pip")
if installed_version is None:
return
pip_version = packaging_version.parse(installed_version)
pypi_version = None
try:
state = load_selfcheck_statefile()
current_time = datetime.datetime.utcnow()
# Determine if we need to refresh the state
if "last_check" in state.state and "pypi_version" in state.state:
last_check = datetime.datetime.strptime(
state.state["last_check"],
SELFCHECK_DATE_FMT
)
if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60:
pypi_version = state.state["pypi_version"]
# Refresh the version if we need to or just see if we need to warn
if pypi_version is None:
resp = session.get(
PyPI.pip_json_url,
headers={"Accept": "application/json"},
)
resp.raise_for_status()
pypi_version = [
v for v in sorted(
list(resp.json()["releases"]),
key=packaging_version.parse,
)
if not packaging_version.parse(v).is_prerelease
][-1]
# save that we've performed a check
state.save(pypi_version, current_time)
remote_version = packaging_version.parse(pypi_version)
# Determine if our pypi_version is older
if (pip_version < remote_version and
pip_version.base_version != remote_version.base_version):
# Advise "python -m pip" on Windows to avoid issues
# with overwriting pip.exe.
if WINDOWS:
pip_cmd = "python -m pip"
else:
pip_cmd = "pip"
logger.warning(
"You are using pip version %s, however version %s is "
"available.\nYou should consider upgrading via the "
"'%s install --upgrade pip' command.",
pip_version, pypi_version, pip_cmd
)
except Exception:
logger.debug(
"There was an error checking the latest version of pip",
exc_info=True,
)
| gpl-3.0 |
b3c/VTK-5.8 | Wrapping/Python/vtk/util/misc.py | 9 | 1984 | """Miscellaneous functions and classes that dont fit into specific
categories."""
import sys, os, vtk
#----------------------------------------------------------------------
# the following functions are for the vtk regression testing and examples
def vtkGetDataRoot():
"""vtkGetDataRoot() -- return vtk example data directory
"""
dataIndex=-1;
for i in range(0, len(sys.argv)):
if sys.argv[i] == '-D' and i < len(sys.argv)-1:
dataIndex = i+1
if dataIndex != -1:
dataRoot = sys.argv[dataIndex]
else:
try:
dataRoot = os.environ['VTK_DATA_ROOT']
except KeyError:
dataRoot = '../../../../VTKData'
return dataRoot
def vtkRegressionTestImage( renWin ):
"""vtkRegressionTestImage(renWin) -- produce regression image for window
This function writes out a regression .png file for a vtkWindow.
Does anyone involved in testing care to elaborate?
"""
imageIndex=-1;
for i in range(0, len(sys.argv)):
if sys.argv[i] == '-V' and i < len(sys.argv)-1:
imageIndex = i+1
if imageIndex != -1:
fname = os.path.join(vtkGetDataRoot(), sys.argv[imageIndex])
rt_w2if = vtk.vtkWindowToImageFilter()
rt_w2if.SetInput(renWin)
if os.path.isfile(fname):
pass
else:
rt_pngw = vtk.vtkPNGWriter()
rt_pngw.SetFileName(fname)
rt_pngw.SetInput(rt_w2if.GetOutput())
rt_pngw.Write()
rt_pngw = None
rt_png = vtk.vtkPNGReader()
rt_png.SetFileName(fname)
rt_id = vtk.vtkImageDifference()
rt_id.SetInput(rt_w2if.GetOutput())
rt_id.SetImage(rt_png.GetOutput())
rt_id.Update()
if rt_id.GetThresholdedError() <= 10:
return 1
else:
sys.stderr.write('Failed image test: %f\n'
% rt_id.GetThresholdedError())
return 0
return 2
| bsd-3-clause |
ActiveState/code | recipes/Python/577760_Change_a_Functions_Closure/recipe-577760.py | 1 | 1282 | """inject_closure module"""
INJECTEDKEY = "injected_{}"
OUTERLINE = " outer_{0} = injected_{0}"
INNERLINE = " inner_{0} = outer_{0}"
SOURCE= ("def not_important():",
" def also_not_important():",
" return also_not_important")
def inject_closure(f, *args):
"""Return a copy of f, with a new closure.
The new closure will be derived from args, in the same
order. This requires that the caller have knowledge
of the existing closure.
"""
# build the source to exec
injected = {}
source = list(SOURCE)
for i in range(len(args)):
source.insert(1, OUTERLINE.format(i))
source.insert(-1, INNERLINE.format(i))
injected[INJECTEDKEY.format(i)] = args[i]
# exec the source and pull the new closure
exec("\n".join(source), injected, injected)
closure = injected["not_important"]().__closure__
# build the new function object
func = type(f)(f.__code__, f.__globals__, f.__name__,
f.__defaults__, closure)
func.__annotations__ = f.__annotations__
func.__doc__ = f.__doc__
func.__kwdefaults__ = f.__kwdefaults__
func.__module__ = f.__module__
return func
| mit |
HackSoftware/hackconf.bg | home/website/migrations/0002_auto_20160903_1043.py | 1 | 12244 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-03 10:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0013_make_rendition_upload_callable'),
('wagtailcore', '0029_unicode_slugfield_dj19'),
('wagtailredirects', '0005_capitalizeverbose'),
('wagtailforms', '0003_capitalizeverbose'),
('website', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BranchPartners',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, null=True)),
('webpage', models.URLField(blank=True, max_length=255, null=True)),
('description', wagtail.wagtailcore.fields.RichTextField()),
],
),
migrations.CreateModel(
name='GeneralPartners',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='MediaPartners',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='Partner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, null=True)),
('video_url', models.URLField(blank=True, max_length=255, null=True)),
('description', wagtail.wagtailcore.fields.RichTextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='PastEvents',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='website.Event')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.RemoveField(
model_name='howweworkpage',
name='header_image',
),
migrations.RemoveField(
model_name='howweworkpage',
name='page_ptr',
),
migrations.RemoveField(
model_name='howweworkpage',
name='what_we_do_image',
),
migrations.RemoveField(
model_name='ourteampage',
name='header_image',
),
migrations.RemoveField(
model_name='ourteampage',
name='page_ptr',
),
migrations.RemoveField(
model_name='project',
name='background_image',
),
migrations.RemoveField(
model_name='project',
name='logo',
),
migrations.RemoveField(
model_name='projectsplacement',
name='page',
),
migrations.RemoveField(
model_name='projectsplacement',
name='project',
),
migrations.RemoveField(
model_name='teammate',
name='initial_photo',
),
migrations.RemoveField(
model_name='teammate',
name='secondary_photo',
),
migrations.RemoveField(
model_name='teammatepageplacement',
name='page',
),
migrations.RemoveField(
model_name='teammatepageplacement',
name='teammate',
),
migrations.RemoveField(
model_name='teammateplacement',
name='page',
),
migrations.RemoveField(
model_name='teammateplacement',
name='teammate',
),
migrations.RemoveField(
model_name='technologiespageplacement',
name='page',
),
migrations.RemoveField(
model_name='technologiespageplacement',
name='technology',
),
migrations.RemoveField(
model_name='technologiesplacement',
name='page',
),
migrations.RemoveField(
model_name='technologiesplacement',
name='technology',
),
migrations.RemoveField(
model_name='technologiesweusepage',
name='header_image',
),
migrations.RemoveField(
model_name='technologiesweusepage',
name='page_ptr',
),
migrations.RemoveField(
model_name='technology',
name='logo',
),
migrations.RemoveField(
model_name='homepage',
name='how_we_work_center',
),
migrations.RemoveField(
model_name='homepage',
name='how_we_work_left',
),
migrations.RemoveField(
model_name='homepage',
name='how_we_work_right',
),
migrations.RemoveField(
model_name='homepage',
name='how_we_work_title',
),
migrations.RemoveField(
model_name='homepage',
name='intro_h1',
),
migrations.RemoveField(
model_name='homepage',
name='intro_h2',
),
migrations.RemoveField(
model_name='homepage',
name='intro_image',
),
migrations.RemoveField(
model_name='homepage',
name='our_team_center',
),
migrations.RemoveField(
model_name='homepage',
name='our_team_title',
),
migrations.RemoveField(
model_name='homepage',
name='portfolio_center',
),
migrations.RemoveField(
model_name='homepage',
name='portfolio_image',
),
migrations.RemoveField(
model_name='homepage',
name='portfolio_title',
),
migrations.RemoveField(
model_name='homepage',
name='technologies_we_use_center',
),
migrations.RemoveField(
model_name='homepage',
name='technologies_we_use_image',
),
migrations.RemoveField(
model_name='homepage',
name='technologies_we_use_title',
),
migrations.AddField(
model_name='homepage',
name='about_text',
field=wagtail.wagtailcore.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='homepage',
name='call_for_speakers_form_url',
field=models.URLField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='header_image_logo',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'),
),
migrations.AddField(
model_name='homepage',
name='header_text',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='location_description',
field=wagtail.wagtailcore.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='homepage',
name='speakers_title',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='tickets_description',
field=wagtail.wagtailcore.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='homepage',
name='tickets_title',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='tickets_widget_code',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='homepage',
name='video_url',
field=models.URLField(blank=True, max_length=255, null=True),
),
migrations.DeleteModel(
name='HowWeWorkPage',
),
migrations.DeleteModel(
name='OurTeamPage',
),
migrations.DeleteModel(
name='Project',
),
migrations.DeleteModel(
name='ProjectsPlacement',
),
migrations.DeleteModel(
name='Teammate',
),
migrations.DeleteModel(
name='TeammatePagePlacement',
),
migrations.DeleteModel(
name='TeammatePlacement',
),
migrations.DeleteModel(
name='TechnologiesPagePlacement',
),
migrations.DeleteModel(
name='TechnologiesPlacement',
),
migrations.DeleteModel(
name='TechnologiesWeUsePage',
),
migrations.DeleteModel(
name='Technology',
),
migrations.AddField(
model_name='pastevents',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='past_events', to='website.HomePage'),
),
migrations.AddField(
model_name='mediapartners',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='media_partners', to='website.HomePage'),
),
migrations.AddField(
model_name='mediapartners',
name='partner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='website.Partner'),
),
migrations.AddField(
model_name='generalpartners',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='general_partners', to='website.HomePage'),
),
migrations.AddField(
model_name='generalpartners',
name='partner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='website.Partner'),
),
migrations.AddField(
model_name='branchpartners',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='branch_partners', to='website.HomePage'),
),
migrations.AddField(
model_name='branchpartners',
name='partner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='website.Partner'),
),
]
| mit |
duhzecca/cinder | cinder/tests/unit/fake_service.py | 13 | 1787 | # Copyright 2015 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
from oslo_versionedobjects import fields
from cinder import objects
def fake_db_service(**updates):
NOW = timeutils.utcnow().replace(microsecond=0)
db_service = {
'created_at': NOW,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'id': 123,
'host': 'fake-host',
'binary': 'fake-service',
'topic': 'fake-service-topic',
'report_count': 1,
'disabled': False,
'disabled_reason': None,
'modified_at': NOW,
}
for name, field in objects.Service.fields.items():
if name in db_service:
continue
if field.nullable:
db_service[name] = None
elif field.default != fields.UnspecifiedDefault:
db_service[name] = field.default
else:
raise Exception('fake_db_service needs help with %s.' % name)
if updates:
db_service.update(updates)
return db_service
def fake_service_obj(context, **updates):
return objects.Service._from_db_object(context, objects.Service(),
fake_db_service(**updates))
| apache-2.0 |
beiko-lab/gengis | bin/Lib/site-packages/wx-2.8-msw-unicode/wx/lib/flashwin.py | 6 | 8028 | #----------------------------------------------------------------------
# Name: wx.lib.flashwin
# Purpose: A class that allows the use of the Shockwave Flash
# ActiveX control
#
# Author: Robin Dunn
#
# Created: 22-March-2004
# RCS-ID: $Id: flashwin.py 54040 2008-06-08 23:03:22Z RD $
# Copyright: (c) 2008 by Total Control Software
# Licence: wxWindows license
#----------------------------------------------------------------------
import wx
import wx.lib.activex
import comtypes.client as cc
import sys
if not hasattr(sys, 'frozen'):
cc.GetModule( ('{D27CDB6B-AE6D-11CF-96B8-444553540000}', 1, 0) )
from comtypes.gen import ShockwaveFlashObjects
clsID = '{D27CDB6E-AE6D-11CF-96B8-444553540000}'
progID = 'ShockwaveFlash.ShockwaveFlash.1'
class FlashWindow(wx.lib.activex.ActiveXCtrl):
def __init__(self, parent, id=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=0, name='FlashWindow'):
wx.lib.activex.ActiveXCtrl.__init__(self, parent, progID,
id, pos, size, style, name)
def SetZoomRect(self, left, top, right, bottom):
return self.ctrl.SetZoomRect(left, top, right, bottom)
def Zoom(self, factor):
return self.ctrl.Zoom(factor)
def Pan(self, x, y, mode):
return self.ctrl.Pan(x, y, mode)
def Play(self):
return self.ctrl.Play()
def Stop(self):
return self.ctrl.Stop()
def Back(self):
return self.ctrl.Back()
def Forward(self):
return self.ctrl.Forward()
def Rewind(self):
return self.ctrl.Rewind()
def StopPlay(self):
return self.ctrl.StopPlay()
def GotoFrame(self, FrameNum):
return self.ctrl.GotoFrame(FrameNum)
def CurrentFrame(self):
return self.ctrl.CurrentFrame()
def IsPlaying(self):
return self.ctrl.IsPlaying()
def PercentLoaded(self):
return self.ctrl.PercentLoaded()
def FrameLoaded(self, FrameNum):
return self.ctrl.FrameLoaded(FrameNum)
def FlashVersion(self):
return self.ctrl.FlashVersion()
def LoadMovie(self, layer, url):
return self.ctrl.LoadMovie(layer, url)
def TGotoFrame(self, target, FrameNum):
return self.ctrl.TGotoFrame(target, FrameNum)
def TGotoLabel(self, target, label):
return self.ctrl.TGotoLabel(target, label)
def TCurrentFrame(self, target):
return self.ctrl.TCurrentFrame(target)
def TCurrentLabel(self, target):
return self.ctrl.TCurrentLabel(target)
def TPlay(self, target):
return self.ctrl.TPlay(target)
def TStopPlay(self, target):
return self.ctrl.TStopPlay(target)
def SetVariable(self, name, value):
return self.ctrl.SetVariable(name, value)
def GetVariable(self, name):
return self.ctrl.GetVariable(name)
def TSetProperty(self, target, property, value):
return self.ctrl.TSetProperty(target, property, value)
def TGetProperty(self, target, property):
return self.ctrl.TGetProperty(target, property)
def TCallFrame(self, target, FrameNum):
return self.ctrl.TCallFrame(target, FrameNum)
def TCallLabel(self, target, label):
return self.ctrl.TCallLabel(target, label)
def TSetPropertyNum(self, target, property, value):
return self.ctrl.TSetPropertyNum(target, property, value)
def TGetPropertyNum(self, target, property):
return self.ctrl.TGetPropertyNum(target, property)
def TGetPropertyAsNumber(self, target, property):
return self.ctrl.TGetPropertyAsNumber(target, property)
# Getters, Setters and properties
def _get_ReadyState(self):
return self.ctrl.ReadyState
readystate = property(_get_ReadyState, None)
def _get_TotalFrames(self):
return self.ctrl.TotalFrames
totalframes = property(_get_TotalFrames, None)
def _get_Playing(self):
return self.ctrl.Playing
def _set_Playing(self, Playing):
self.ctrl.Playing = Playing
playing = property(_get_Playing, _set_Playing)
def _get_Quality(self):
return self.ctrl.Quality
def _set_Quality(self, Quality):
self.ctrl.Quality = Quality
quality = property(_get_Quality, _set_Quality)
def _get_ScaleMode(self):
return self.ctrl.ScaleMode
def _set_ScaleMode(self, ScaleMode):
self.ctrl.ScaleMode = ScaleMode
scalemode = property(_get_ScaleMode, _set_ScaleMode)
def _get_AlignMode(self):
return self.ctrl.AlignMode
def _set_AlignMode(self, AlignMode):
self.ctrl.AlignMode = AlignMode
alignmode = property(_get_AlignMode, _set_AlignMode)
def _get_BackgroundColor(self):
return self.ctrl.BackgroundColor
def _set_BackgroundColor(self, BackgroundColor):
self.ctrl.BackgroundColor = BackgroundColor
backgroundcolor = property(_get_BackgroundColor, _set_BackgroundColor)
def _get_Loop(self):
return self.ctrl.Loop
def _set_Loop(self, Loop):
self.ctrl.Loop = Loop
loop = property(_get_Loop, _set_Loop)
def _get_Movie(self):
return self.ctrl.Movie
def _set_Movie(self, Movie):
self.ctrl.Movie = Movie
movie = property(_get_Movie, _set_Movie)
def _get_FrameNum(self):
return self.ctrl.FrameNum
def _set_FrameNum(self, FrameNum):
self.ctrl.FrameNum = FrameNum
framenum = property(_get_FrameNum, _set_FrameNum)
def _get_WMode(self):
return self.ctrl.WMode
def _set_WMode(self, WMode):
self.ctrl.WMode = WMode
wmode = property(_get_WMode, _set_WMode)
def _get_SAlign(self):
return self.ctrl.SAlign
def _set_SAlign(self, SAlign):
self.ctrl.SAlign = SAlign
salign = property(_get_SAlign, _set_SAlign)
def _get_Menu(self):
return self.ctrl.Menu
def _set_Menu(self, Menu):
self.ctrl.Menu = Menu
menu = property(_get_Menu, _set_Menu)
def _get_Base(self):
return self.ctrl.Base
def _set_Base(self, Base):
self.ctrl.Base = Base
base = property(_get_Base, _set_Base)
def _get_Scale(self):
return self.ctrl.Scale
def _set_Scale(self, Scale):
self.ctrl.Scale = Scale
scale = property(_get_Scale, _set_Scale)
def _get_DeviceFont(self):
return self.ctrl.DeviceFont
def _set_DeviceFont(self, DeviceFont):
self.ctrl.DeviceFont = DeviceFont
devicefont = property(_get_DeviceFont, _set_DeviceFont)
def _get_EmbedMovie(self):
return self.ctrl.EmbedMovie
def _set_EmbedMovie(self, EmbedMovie):
self.ctrl.EmbedMovie = EmbedMovie
embedmovie = property(_get_EmbedMovie, _set_EmbedMovie)
def _get_BGColor(self):
return self.ctrl.BGColor
def _set_BGColor(self, BGColor):
self.ctrl.BGColor = BGColor
bgcolor = property(_get_BGColor, _set_BGColor)
def _get_Quality2(self):
return self.ctrl.Quality2
def _set_Quality2(self, Quality2):
self.ctrl.Quality2 = Quality2
quality2 = property(_get_Quality2, _set_Quality2)
def _get_SWRemote(self):
return self.ctrl.SWRemote
def _set_SWRemote(self, SWRemote):
self.ctrl.SWRemote = SWRemote
swremote = property(_get_SWRemote, _set_SWRemote)
def _get_FlashVars(self):
return self.ctrl.FlashVars
def _set_FlashVars(self, FlashVars):
self.ctrl.FlashVars = FlashVars
flashvars = property(_get_FlashVars, _set_FlashVars)
def _get_AllowScriptAccess(self):
return self.ctrl.AllowScriptAccess
def _set_AllowScriptAccess(self, AllowScriptAccess):
self.ctrl.AllowScriptAccess = AllowScriptAccess
allowscriptaccess = property(_get_AllowScriptAccess, _set_AllowScriptAccess)
def _get_MovieData(self):
return self.ctrl.MovieData
def _set_MovieData(self, MovieData):
self.ctrl.MovieData = MovieData
moviedata = property(_get_MovieData, _set_MovieData)
| gpl-3.0 |
aliyun/oss-ftp | python27/win32/Lib/site-packages/pip/_vendor/html5lib/constants.py | 963 | 87346 | from __future__ import absolute_import, division, unicode_literals
import string
import gettext
_ = gettext.gettext
EOF = None
E = {
"null-character":
_("Null character in input stream, replaced with U+FFFD."),
"invalid-codepoint":
_("Invalid codepoint in stream."),
"incorrectly-placed-solidus":
_("Solidus (/) incorrectly placed in tag."),
"incorrect-cr-newline-entity":
_("Incorrect CR newline entity, replaced with LF."),
"illegal-windows-1252-entity":
_("Entity used with illegal number (windows-1252 reference)."),
"cant-convert-numeric-entity":
_("Numeric entity couldn't be converted to character "
"(codepoint U+%(charAsInt)08x)."),
"illegal-codepoint-for-numeric-entity":
_("Numeric entity represents an illegal codepoint: "
"U+%(charAsInt)08x."),
"numeric-entity-without-semicolon":
_("Numeric entity didn't end with ';'."),
"expected-numeric-entity-but-got-eof":
_("Numeric entity expected. Got end of file instead."),
"expected-numeric-entity":
_("Numeric entity expected but none found."),
"named-entity-without-semicolon":
_("Named entity didn't end with ';'."),
"expected-named-entity":
_("Named entity expected. Got none."),
"attributes-in-end-tag":
_("End tag contains unexpected attributes."),
'self-closing-flag-on-end-tag':
_("End tag contains unexpected self-closing flag."),
"expected-tag-name-but-got-right-bracket":
_("Expected tag name. Got '>' instead."),
"expected-tag-name-but-got-question-mark":
_("Expected tag name. Got '?' instead. (HTML doesn't "
"support processing instructions.)"),
"expected-tag-name":
_("Expected tag name. Got something else instead"),
"expected-closing-tag-but-got-right-bracket":
_("Expected closing tag. Got '>' instead. Ignoring '</>'."),
"expected-closing-tag-but-got-eof":
_("Expected closing tag. Unexpected end of file."),
"expected-closing-tag-but-got-char":
_("Expected closing tag. Unexpected character '%(data)s' found."),
"eof-in-tag-name":
_("Unexpected end of file in the tag name."),
"expected-attribute-name-but-got-eof":
_("Unexpected end of file. Expected attribute name instead."),
"eof-in-attribute-name":
_("Unexpected end of file in attribute name."),
"invalid-character-in-attribute-name":
_("Invalid character in attribute name"),
"duplicate-attribute":
_("Dropped duplicate attribute on tag."),
"expected-end-of-tag-name-but-got-eof":
_("Unexpected end of file. Expected = or end of tag."),
"expected-attribute-value-but-got-eof":
_("Unexpected end of file. Expected attribute value."),
"expected-attribute-value-but-got-right-bracket":
_("Expected attribute value. Got '>' instead."),
'equals-in-unquoted-attribute-value':
_("Unexpected = in unquoted attribute"),
'unexpected-character-in-unquoted-attribute-value':
_("Unexpected character in unquoted attribute"),
"invalid-character-after-attribute-name":
_("Unexpected character after attribute name."),
"unexpected-character-after-attribute-value":
_("Unexpected character after attribute value."),
"eof-in-attribute-value-double-quote":
_("Unexpected end of file in attribute value (\")."),
"eof-in-attribute-value-single-quote":
_("Unexpected end of file in attribute value (')."),
"eof-in-attribute-value-no-quotes":
_("Unexpected end of file in attribute value."),
"unexpected-EOF-after-solidus-in-tag":
_("Unexpected end of file in tag. Expected >"),
"unexpected-character-after-solidus-in-tag":
_("Unexpected character after / in tag. Expected >"),
"expected-dashes-or-doctype":
_("Expected '--' or 'DOCTYPE'. Not found."),
"unexpected-bang-after-double-dash-in-comment":
_("Unexpected ! after -- in comment"),
"unexpected-space-after-double-dash-in-comment":
_("Unexpected space after -- in comment"),
"incorrect-comment":
_("Incorrect comment."),
"eof-in-comment":
_("Unexpected end of file in comment."),
"eof-in-comment-end-dash":
_("Unexpected end of file in comment (-)"),
"unexpected-dash-after-double-dash-in-comment":
_("Unexpected '-' after '--' found in comment."),
"eof-in-comment-double-dash":
_("Unexpected end of file in comment (--)."),
"eof-in-comment-end-space-state":
_("Unexpected end of file in comment."),
"eof-in-comment-end-bang-state":
_("Unexpected end of file in comment."),
"unexpected-char-in-comment":
_("Unexpected character in comment found."),
"need-space-after-doctype":
_("No space after literal string 'DOCTYPE'."),
"expected-doctype-name-but-got-right-bracket":
_("Unexpected > character. Expected DOCTYPE name."),
"expected-doctype-name-but-got-eof":
_("Unexpected end of file. Expected DOCTYPE name."),
"eof-in-doctype-name":
_("Unexpected end of file in DOCTYPE name."),
"eof-in-doctype":
_("Unexpected end of file in DOCTYPE."),
"expected-space-or-right-bracket-in-doctype":
_("Expected space or '>'. Got '%(data)s'"),
"unexpected-end-of-doctype":
_("Unexpected end of DOCTYPE."),
"unexpected-char-in-doctype":
_("Unexpected character in DOCTYPE."),
"eof-in-innerhtml":
_("XXX innerHTML EOF"),
"unexpected-doctype":
_("Unexpected DOCTYPE. Ignored."),
"non-html-root":
_("html needs to be the first start tag."),
"expected-doctype-but-got-eof":
_("Unexpected End of file. Expected DOCTYPE."),
"unknown-doctype":
_("Erroneous DOCTYPE."),
"expected-doctype-but-got-chars":
_("Unexpected non-space characters. Expected DOCTYPE."),
"expected-doctype-but-got-start-tag":
_("Unexpected start tag (%(name)s). Expected DOCTYPE."),
"expected-doctype-but-got-end-tag":
_("Unexpected end tag (%(name)s). Expected DOCTYPE."),
"end-tag-after-implied-root":
_("Unexpected end tag (%(name)s) after the (implied) root element."),
"expected-named-closing-tag-but-got-eof":
_("Unexpected end of file. Expected end tag (%(name)s)."),
"two-heads-are-not-better-than-one":
_("Unexpected start tag head in existing head. Ignored."),
"unexpected-end-tag":
_("Unexpected end tag (%(name)s). Ignored."),
"unexpected-start-tag-out-of-my-head":
_("Unexpected start tag (%(name)s) that can be in head. Moved."),
"unexpected-start-tag":
_("Unexpected start tag (%(name)s)."),
"missing-end-tag":
_("Missing end tag (%(name)s)."),
"missing-end-tags":
_("Missing end tags (%(name)s)."),
"unexpected-start-tag-implies-end-tag":
_("Unexpected start tag (%(startName)s) "
"implies end tag (%(endName)s)."),
"unexpected-start-tag-treated-as":
_("Unexpected start tag (%(originalName)s). Treated as %(newName)s."),
"deprecated-tag":
_("Unexpected start tag %(name)s. Don't use it!"),
"unexpected-start-tag-ignored":
_("Unexpected start tag %(name)s. Ignored."),
"expected-one-end-tag-but-got-another":
_("Unexpected end tag (%(gotName)s). "
"Missing end tag (%(expectedName)s)."),
"end-tag-too-early":
_("End tag (%(name)s) seen too early. Expected other end tag."),
"end-tag-too-early-named":
_("Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s)."),
"end-tag-too-early-ignored":
_("End tag (%(name)s) seen too early. Ignored."),
"adoption-agency-1.1":
_("End tag (%(name)s) violates step 1, "
"paragraph 1 of the adoption agency algorithm."),
"adoption-agency-1.2":
_("End tag (%(name)s) violates step 1, "
"paragraph 2 of the adoption agency algorithm."),
"adoption-agency-1.3":
_("End tag (%(name)s) violates step 1, "
"paragraph 3 of the adoption agency algorithm."),
"adoption-agency-4.4":
_("End tag (%(name)s) violates step 4, "
"paragraph 4 of the adoption agency algorithm."),
"unexpected-end-tag-treated-as":
_("Unexpected end tag (%(originalName)s). Treated as %(newName)s."),
"no-end-tag":
_("This element (%(name)s) has no end tag."),
"unexpected-implied-end-tag-in-table":
_("Unexpected implied end tag (%(name)s) in the table phase."),
"unexpected-implied-end-tag-in-table-body":
_("Unexpected implied end tag (%(name)s) in the table body phase."),
"unexpected-char-implies-table-voodoo":
_("Unexpected non-space characters in "
"table context caused voodoo mode."),
"unexpected-hidden-input-in-table":
_("Unexpected input with type hidden in table context."),
"unexpected-form-in-table":
_("Unexpected form in table context."),
"unexpected-start-tag-implies-table-voodoo":
_("Unexpected start tag (%(name)s) in "
"table context caused voodoo mode."),
"unexpected-end-tag-implies-table-voodoo":
_("Unexpected end tag (%(name)s) in "
"table context caused voodoo mode."),
"unexpected-cell-in-table-body":
_("Unexpected table cell start tag (%(name)s) "
"in the table body phase."),
"unexpected-cell-end-tag":
_("Got table cell end tag (%(name)s) "
"while required end tags are missing."),
"unexpected-end-tag-in-table-body":
_("Unexpected end tag (%(name)s) in the table body phase. Ignored."),
"unexpected-implied-end-tag-in-table-row":
_("Unexpected implied end tag (%(name)s) in the table row phase."),
"unexpected-end-tag-in-table-row":
_("Unexpected end tag (%(name)s) in the table row phase. Ignored."),
"unexpected-select-in-select":
_("Unexpected select start tag in the select phase "
"treated as select end tag."),
"unexpected-input-in-select":
_("Unexpected input start tag in the select phase."),
"unexpected-start-tag-in-select":
_("Unexpected start tag token (%(name)s in the select phase. "
"Ignored."),
"unexpected-end-tag-in-select":
_("Unexpected end tag (%(name)s) in the select phase. Ignored."),
"unexpected-table-element-start-tag-in-select-in-table":
_("Unexpected table element start tag (%(name)s) in the select in table phase."),
"unexpected-table-element-end-tag-in-select-in-table":
_("Unexpected table element end tag (%(name)s) in the select in table phase."),
"unexpected-char-after-body":
_("Unexpected non-space characters in the after body phase."),
"unexpected-start-tag-after-body":
_("Unexpected start tag token (%(name)s)"
" in the after body phase."),
"unexpected-end-tag-after-body":
_("Unexpected end tag token (%(name)s)"
" in the after body phase."),
"unexpected-char-in-frameset":
_("Unexpected characters in the frameset phase. Characters ignored."),
"unexpected-start-tag-in-frameset":
_("Unexpected start tag token (%(name)s)"
" in the frameset phase. Ignored."),
"unexpected-frameset-in-frameset-innerhtml":
_("Unexpected end tag token (frameset) "
"in the frameset phase (innerHTML)."),
"unexpected-end-tag-in-frameset":
_("Unexpected end tag token (%(name)s)"
" in the frameset phase. Ignored."),
"unexpected-char-after-frameset":
_("Unexpected non-space characters in the "
"after frameset phase. Ignored."),
"unexpected-start-tag-after-frameset":
_("Unexpected start tag (%(name)s)"
" in the after frameset phase. Ignored."),
"unexpected-end-tag-after-frameset":
_("Unexpected end tag (%(name)s)"
" in the after frameset phase. Ignored."),
"unexpected-end-tag-after-body-innerhtml":
_("Unexpected end tag after body(innerHtml)"),
"expected-eof-but-got-char":
_("Unexpected non-space characters. Expected end of file."),
"expected-eof-but-got-start-tag":
_("Unexpected start tag (%(name)s)"
". Expected end of file."),
"expected-eof-but-got-end-tag":
_("Unexpected end tag (%(name)s)"
". Expected end of file."),
"eof-in-table":
_("Unexpected end of file. Expected table content."),
"eof-in-select":
_("Unexpected end of file. Expected select content."),
"eof-in-frameset":
_("Unexpected end of file. Expected frameset content."),
"eof-in-script-in-script":
_("Unexpected end of file. Expected script content."),
"eof-in-foreign-lands":
_("Unexpected end of file. Expected foreign content"),
"non-void-element-with-trailing-solidus":
_("Trailing solidus not allowed on element %(name)s"),
"unexpected-html-element-in-foreign-content":
_("Element %(name)s not allowed in a non-html context"),
"unexpected-end-tag-before-html":
_("Unexpected end tag (%(name)s) before html."),
"XXX-undefined-error":
_("Undefined error (this sucks and should be fixed)"),
}
namespaces = {
"html": "http://www.w3.org/1999/xhtml",
"mathml": "http://www.w3.org/1998/Math/MathML",
"svg": "http://www.w3.org/2000/svg",
"xlink": "http://www.w3.org/1999/xlink",
"xml": "http://www.w3.org/XML/1998/namespace",
"xmlns": "http://www.w3.org/2000/xmlns/"
}
scopingElements = frozenset((
(namespaces["html"], "applet"),
(namespaces["html"], "caption"),
(namespaces["html"], "html"),
(namespaces["html"], "marquee"),
(namespaces["html"], "object"),
(namespaces["html"], "table"),
(namespaces["html"], "td"),
(namespaces["html"], "th"),
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext"),
(namespaces["mathml"], "annotation-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title"),
))
formattingElements = frozenset((
(namespaces["html"], "a"),
(namespaces["html"], "b"),
(namespaces["html"], "big"),
(namespaces["html"], "code"),
(namespaces["html"], "em"),
(namespaces["html"], "font"),
(namespaces["html"], "i"),
(namespaces["html"], "nobr"),
(namespaces["html"], "s"),
(namespaces["html"], "small"),
(namespaces["html"], "strike"),
(namespaces["html"], "strong"),
(namespaces["html"], "tt"),
(namespaces["html"], "u")
))
specialElements = frozenset((
(namespaces["html"], "address"),
(namespaces["html"], "applet"),
(namespaces["html"], "area"),
(namespaces["html"], "article"),
(namespaces["html"], "aside"),
(namespaces["html"], "base"),
(namespaces["html"], "basefont"),
(namespaces["html"], "bgsound"),
(namespaces["html"], "blockquote"),
(namespaces["html"], "body"),
(namespaces["html"], "br"),
(namespaces["html"], "button"),
(namespaces["html"], "caption"),
(namespaces["html"], "center"),
(namespaces["html"], "col"),
(namespaces["html"], "colgroup"),
(namespaces["html"], "command"),
(namespaces["html"], "dd"),
(namespaces["html"], "details"),
(namespaces["html"], "dir"),
(namespaces["html"], "div"),
(namespaces["html"], "dl"),
(namespaces["html"], "dt"),
(namespaces["html"], "embed"),
(namespaces["html"], "fieldset"),
(namespaces["html"], "figure"),
(namespaces["html"], "footer"),
(namespaces["html"], "form"),
(namespaces["html"], "frame"),
(namespaces["html"], "frameset"),
(namespaces["html"], "h1"),
(namespaces["html"], "h2"),
(namespaces["html"], "h3"),
(namespaces["html"], "h4"),
(namespaces["html"], "h5"),
(namespaces["html"], "h6"),
(namespaces["html"], "head"),
(namespaces["html"], "header"),
(namespaces["html"], "hr"),
(namespaces["html"], "html"),
(namespaces["html"], "iframe"),
# Note that image is commented out in the spec as "this isn't an
# element that can end up on the stack, so it doesn't matter,"
(namespaces["html"], "image"),
(namespaces["html"], "img"),
(namespaces["html"], "input"),
(namespaces["html"], "isindex"),
(namespaces["html"], "li"),
(namespaces["html"], "link"),
(namespaces["html"], "listing"),
(namespaces["html"], "marquee"),
(namespaces["html"], "menu"),
(namespaces["html"], "meta"),
(namespaces["html"], "nav"),
(namespaces["html"], "noembed"),
(namespaces["html"], "noframes"),
(namespaces["html"], "noscript"),
(namespaces["html"], "object"),
(namespaces["html"], "ol"),
(namespaces["html"], "p"),
(namespaces["html"], "param"),
(namespaces["html"], "plaintext"),
(namespaces["html"], "pre"),
(namespaces["html"], "script"),
(namespaces["html"], "section"),
(namespaces["html"], "select"),
(namespaces["html"], "style"),
(namespaces["html"], "table"),
(namespaces["html"], "tbody"),
(namespaces["html"], "td"),
(namespaces["html"], "textarea"),
(namespaces["html"], "tfoot"),
(namespaces["html"], "th"),
(namespaces["html"], "thead"),
(namespaces["html"], "title"),
(namespaces["html"], "tr"),
(namespaces["html"], "ul"),
(namespaces["html"], "wbr"),
(namespaces["html"], "xmp"),
(namespaces["svg"], "foreignObject")
))
htmlIntegrationPointElements = frozenset((
(namespaces["mathml"], "annotaion-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title")
))
mathmlTextIntegrationPointElements = frozenset((
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext")
))
adjustForeignAttributes = {
"xlink:actuate": ("xlink", "actuate", namespaces["xlink"]),
"xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]),
"xlink:href": ("xlink", "href", namespaces["xlink"]),
"xlink:role": ("xlink", "role", namespaces["xlink"]),
"xlink:show": ("xlink", "show", namespaces["xlink"]),
"xlink:title": ("xlink", "title", namespaces["xlink"]),
"xlink:type": ("xlink", "type", namespaces["xlink"]),
"xml:base": ("xml", "base", namespaces["xml"]),
"xml:lang": ("xml", "lang", namespaces["xml"]),
"xml:space": ("xml", "space", namespaces["xml"]),
"xmlns": (None, "xmlns", namespaces["xmlns"]),
"xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"])
}
unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in
adjustForeignAttributes.items()])
spaceCharacters = frozenset((
"\t",
"\n",
"\u000C",
" ",
"\r"
))
tableInsertModeElements = frozenset((
"table",
"tbody",
"tfoot",
"thead",
"tr"
))
asciiLowercase = frozenset(string.ascii_lowercase)
asciiUppercase = frozenset(string.ascii_uppercase)
asciiLetters = frozenset(string.ascii_letters)
digits = frozenset(string.digits)
hexDigits = frozenset(string.hexdigits)
asciiUpper2Lower = dict([(ord(c), ord(c.lower()))
for c in string.ascii_uppercase])
# Heading elements need to be ordered
headingElements = (
"h1",
"h2",
"h3",
"h4",
"h5",
"h6"
)
voidElements = frozenset((
"base",
"command",
"event-source",
"link",
"meta",
"hr",
"br",
"img",
"embed",
"param",
"area",
"col",
"input",
"source",
"track"
))
cdataElements = frozenset(('title', 'textarea'))
rcdataElements = frozenset((
'style',
'script',
'xmp',
'iframe',
'noembed',
'noframes',
'noscript'
))
booleanAttributes = {
"": frozenset(("irrelevant",)),
"style": frozenset(("scoped",)),
"img": frozenset(("ismap",)),
"audio": frozenset(("autoplay", "controls")),
"video": frozenset(("autoplay", "controls")),
"script": frozenset(("defer", "async")),
"details": frozenset(("open",)),
"datagrid": frozenset(("multiple", "disabled")),
"command": frozenset(("hidden", "disabled", "checked", "default")),
"hr": frozenset(("noshade")),
"menu": frozenset(("autosubmit",)),
"fieldset": frozenset(("disabled", "readonly")),
"option": frozenset(("disabled", "readonly", "selected")),
"optgroup": frozenset(("disabled", "readonly")),
"button": frozenset(("disabled", "autofocus")),
"input": frozenset(("disabled", "readonly", "required", "autofocus", "checked", "ismap")),
"select": frozenset(("disabled", "readonly", "autofocus", "multiple")),
"output": frozenset(("disabled", "readonly")),
}
# entitiesWindows1252 has to be _ordered_ and needs to have an index. It
# therefore can't be a frozenset.
entitiesWindows1252 = (
8364, # 0x80 0x20AC EURO SIGN
65533, # 0x81 UNDEFINED
8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK
402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK
8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK
8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS
8224, # 0x86 0x2020 DAGGER
8225, # 0x87 0x2021 DOUBLE DAGGER
710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT
8240, # 0x89 0x2030 PER MILLE SIGN
352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON
8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK
338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE
65533, # 0x8D UNDEFINED
381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON
65533, # 0x8F UNDEFINED
65533, # 0x90 UNDEFINED
8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK
8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK
8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK
8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK
8226, # 0x95 0x2022 BULLET
8211, # 0x96 0x2013 EN DASH
8212, # 0x97 0x2014 EM DASH
732, # 0x98 0x02DC SMALL TILDE
8482, # 0x99 0x2122 TRADE MARK SIGN
353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON
8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE
65533, # 0x9D UNDEFINED
382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON
376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS
)
xmlEntities = frozenset(('lt;', 'gt;', 'amp;', 'apos;', 'quot;'))
entities = {
"AElig": "\xc6",
"AElig;": "\xc6",
"AMP": "&",
"AMP;": "&",
"Aacute": "\xc1",
"Aacute;": "\xc1",
"Abreve;": "\u0102",
"Acirc": "\xc2",
"Acirc;": "\xc2",
"Acy;": "\u0410",
"Afr;": "\U0001d504",
"Agrave": "\xc0",
"Agrave;": "\xc0",
"Alpha;": "\u0391",
"Amacr;": "\u0100",
"And;": "\u2a53",
"Aogon;": "\u0104",
"Aopf;": "\U0001d538",
"ApplyFunction;": "\u2061",
"Aring": "\xc5",
"Aring;": "\xc5",
"Ascr;": "\U0001d49c",
"Assign;": "\u2254",
"Atilde": "\xc3",
"Atilde;": "\xc3",
"Auml": "\xc4",
"Auml;": "\xc4",
"Backslash;": "\u2216",
"Barv;": "\u2ae7",
"Barwed;": "\u2306",
"Bcy;": "\u0411",
"Because;": "\u2235",
"Bernoullis;": "\u212c",
"Beta;": "\u0392",
"Bfr;": "\U0001d505",
"Bopf;": "\U0001d539",
"Breve;": "\u02d8",
"Bscr;": "\u212c",
"Bumpeq;": "\u224e",
"CHcy;": "\u0427",
"COPY": "\xa9",
"COPY;": "\xa9",
"Cacute;": "\u0106",
"Cap;": "\u22d2",
"CapitalDifferentialD;": "\u2145",
"Cayleys;": "\u212d",
"Ccaron;": "\u010c",
"Ccedil": "\xc7",
"Ccedil;": "\xc7",
"Ccirc;": "\u0108",
"Cconint;": "\u2230",
"Cdot;": "\u010a",
"Cedilla;": "\xb8",
"CenterDot;": "\xb7",
"Cfr;": "\u212d",
"Chi;": "\u03a7",
"CircleDot;": "\u2299",
"CircleMinus;": "\u2296",
"CirclePlus;": "\u2295",
"CircleTimes;": "\u2297",
"ClockwiseContourIntegral;": "\u2232",
"CloseCurlyDoubleQuote;": "\u201d",
"CloseCurlyQuote;": "\u2019",
"Colon;": "\u2237",
"Colone;": "\u2a74",
"Congruent;": "\u2261",
"Conint;": "\u222f",
"ContourIntegral;": "\u222e",
"Copf;": "\u2102",
"Coproduct;": "\u2210",
"CounterClockwiseContourIntegral;": "\u2233",
"Cross;": "\u2a2f",
"Cscr;": "\U0001d49e",
"Cup;": "\u22d3",
"CupCap;": "\u224d",
"DD;": "\u2145",
"DDotrahd;": "\u2911",
"DJcy;": "\u0402",
"DScy;": "\u0405",
"DZcy;": "\u040f",
"Dagger;": "\u2021",
"Darr;": "\u21a1",
"Dashv;": "\u2ae4",
"Dcaron;": "\u010e",
"Dcy;": "\u0414",
"Del;": "\u2207",
"Delta;": "\u0394",
"Dfr;": "\U0001d507",
"DiacriticalAcute;": "\xb4",
"DiacriticalDot;": "\u02d9",
"DiacriticalDoubleAcute;": "\u02dd",
"DiacriticalGrave;": "`",
"DiacriticalTilde;": "\u02dc",
"Diamond;": "\u22c4",
"DifferentialD;": "\u2146",
"Dopf;": "\U0001d53b",
"Dot;": "\xa8",
"DotDot;": "\u20dc",
"DotEqual;": "\u2250",
"DoubleContourIntegral;": "\u222f",
"DoubleDot;": "\xa8",
"DoubleDownArrow;": "\u21d3",
"DoubleLeftArrow;": "\u21d0",
"DoubleLeftRightArrow;": "\u21d4",
"DoubleLeftTee;": "\u2ae4",
"DoubleLongLeftArrow;": "\u27f8",
"DoubleLongLeftRightArrow;": "\u27fa",
"DoubleLongRightArrow;": "\u27f9",
"DoubleRightArrow;": "\u21d2",
"DoubleRightTee;": "\u22a8",
"DoubleUpArrow;": "\u21d1",
"DoubleUpDownArrow;": "\u21d5",
"DoubleVerticalBar;": "\u2225",
"DownArrow;": "\u2193",
"DownArrowBar;": "\u2913",
"DownArrowUpArrow;": "\u21f5",
"DownBreve;": "\u0311",
"DownLeftRightVector;": "\u2950",
"DownLeftTeeVector;": "\u295e",
"DownLeftVector;": "\u21bd",
"DownLeftVectorBar;": "\u2956",
"DownRightTeeVector;": "\u295f",
"DownRightVector;": "\u21c1",
"DownRightVectorBar;": "\u2957",
"DownTee;": "\u22a4",
"DownTeeArrow;": "\u21a7",
"Downarrow;": "\u21d3",
"Dscr;": "\U0001d49f",
"Dstrok;": "\u0110",
"ENG;": "\u014a",
"ETH": "\xd0",
"ETH;": "\xd0",
"Eacute": "\xc9",
"Eacute;": "\xc9",
"Ecaron;": "\u011a",
"Ecirc": "\xca",
"Ecirc;": "\xca",
"Ecy;": "\u042d",
"Edot;": "\u0116",
"Efr;": "\U0001d508",
"Egrave": "\xc8",
"Egrave;": "\xc8",
"Element;": "\u2208",
"Emacr;": "\u0112",
"EmptySmallSquare;": "\u25fb",
"EmptyVerySmallSquare;": "\u25ab",
"Eogon;": "\u0118",
"Eopf;": "\U0001d53c",
"Epsilon;": "\u0395",
"Equal;": "\u2a75",
"EqualTilde;": "\u2242",
"Equilibrium;": "\u21cc",
"Escr;": "\u2130",
"Esim;": "\u2a73",
"Eta;": "\u0397",
"Euml": "\xcb",
"Euml;": "\xcb",
"Exists;": "\u2203",
"ExponentialE;": "\u2147",
"Fcy;": "\u0424",
"Ffr;": "\U0001d509",
"FilledSmallSquare;": "\u25fc",
"FilledVerySmallSquare;": "\u25aa",
"Fopf;": "\U0001d53d",
"ForAll;": "\u2200",
"Fouriertrf;": "\u2131",
"Fscr;": "\u2131",
"GJcy;": "\u0403",
"GT": ">",
"GT;": ">",
"Gamma;": "\u0393",
"Gammad;": "\u03dc",
"Gbreve;": "\u011e",
"Gcedil;": "\u0122",
"Gcirc;": "\u011c",
"Gcy;": "\u0413",
"Gdot;": "\u0120",
"Gfr;": "\U0001d50a",
"Gg;": "\u22d9",
"Gopf;": "\U0001d53e",
"GreaterEqual;": "\u2265",
"GreaterEqualLess;": "\u22db",
"GreaterFullEqual;": "\u2267",
"GreaterGreater;": "\u2aa2",
"GreaterLess;": "\u2277",
"GreaterSlantEqual;": "\u2a7e",
"GreaterTilde;": "\u2273",
"Gscr;": "\U0001d4a2",
"Gt;": "\u226b",
"HARDcy;": "\u042a",
"Hacek;": "\u02c7",
"Hat;": "^",
"Hcirc;": "\u0124",
"Hfr;": "\u210c",
"HilbertSpace;": "\u210b",
"Hopf;": "\u210d",
"HorizontalLine;": "\u2500",
"Hscr;": "\u210b",
"Hstrok;": "\u0126",
"HumpDownHump;": "\u224e",
"HumpEqual;": "\u224f",
"IEcy;": "\u0415",
"IJlig;": "\u0132",
"IOcy;": "\u0401",
"Iacute": "\xcd",
"Iacute;": "\xcd",
"Icirc": "\xce",
"Icirc;": "\xce",
"Icy;": "\u0418",
"Idot;": "\u0130",
"Ifr;": "\u2111",
"Igrave": "\xcc",
"Igrave;": "\xcc",
"Im;": "\u2111",
"Imacr;": "\u012a",
"ImaginaryI;": "\u2148",
"Implies;": "\u21d2",
"Int;": "\u222c",
"Integral;": "\u222b",
"Intersection;": "\u22c2",
"InvisibleComma;": "\u2063",
"InvisibleTimes;": "\u2062",
"Iogon;": "\u012e",
"Iopf;": "\U0001d540",
"Iota;": "\u0399",
"Iscr;": "\u2110",
"Itilde;": "\u0128",
"Iukcy;": "\u0406",
"Iuml": "\xcf",
"Iuml;": "\xcf",
"Jcirc;": "\u0134",
"Jcy;": "\u0419",
"Jfr;": "\U0001d50d",
"Jopf;": "\U0001d541",
"Jscr;": "\U0001d4a5",
"Jsercy;": "\u0408",
"Jukcy;": "\u0404",
"KHcy;": "\u0425",
"KJcy;": "\u040c",
"Kappa;": "\u039a",
"Kcedil;": "\u0136",
"Kcy;": "\u041a",
"Kfr;": "\U0001d50e",
"Kopf;": "\U0001d542",
"Kscr;": "\U0001d4a6",
"LJcy;": "\u0409",
"LT": "<",
"LT;": "<",
"Lacute;": "\u0139",
"Lambda;": "\u039b",
"Lang;": "\u27ea",
"Laplacetrf;": "\u2112",
"Larr;": "\u219e",
"Lcaron;": "\u013d",
"Lcedil;": "\u013b",
"Lcy;": "\u041b",
"LeftAngleBracket;": "\u27e8",
"LeftArrow;": "\u2190",
"LeftArrowBar;": "\u21e4",
"LeftArrowRightArrow;": "\u21c6",
"LeftCeiling;": "\u2308",
"LeftDoubleBracket;": "\u27e6",
"LeftDownTeeVector;": "\u2961",
"LeftDownVector;": "\u21c3",
"LeftDownVectorBar;": "\u2959",
"LeftFloor;": "\u230a",
"LeftRightArrow;": "\u2194",
"LeftRightVector;": "\u294e",
"LeftTee;": "\u22a3",
"LeftTeeArrow;": "\u21a4",
"LeftTeeVector;": "\u295a",
"LeftTriangle;": "\u22b2",
"LeftTriangleBar;": "\u29cf",
"LeftTriangleEqual;": "\u22b4",
"LeftUpDownVector;": "\u2951",
"LeftUpTeeVector;": "\u2960",
"LeftUpVector;": "\u21bf",
"LeftUpVectorBar;": "\u2958",
"LeftVector;": "\u21bc",
"LeftVectorBar;": "\u2952",
"Leftarrow;": "\u21d0",
"Leftrightarrow;": "\u21d4",
"LessEqualGreater;": "\u22da",
"LessFullEqual;": "\u2266",
"LessGreater;": "\u2276",
"LessLess;": "\u2aa1",
"LessSlantEqual;": "\u2a7d",
"LessTilde;": "\u2272",
"Lfr;": "\U0001d50f",
"Ll;": "\u22d8",
"Lleftarrow;": "\u21da",
"Lmidot;": "\u013f",
"LongLeftArrow;": "\u27f5",
"LongLeftRightArrow;": "\u27f7",
"LongRightArrow;": "\u27f6",
"Longleftarrow;": "\u27f8",
"Longleftrightarrow;": "\u27fa",
"Longrightarrow;": "\u27f9",
"Lopf;": "\U0001d543",
"LowerLeftArrow;": "\u2199",
"LowerRightArrow;": "\u2198",
"Lscr;": "\u2112",
"Lsh;": "\u21b0",
"Lstrok;": "\u0141",
"Lt;": "\u226a",
"Map;": "\u2905",
"Mcy;": "\u041c",
"MediumSpace;": "\u205f",
"Mellintrf;": "\u2133",
"Mfr;": "\U0001d510",
"MinusPlus;": "\u2213",
"Mopf;": "\U0001d544",
"Mscr;": "\u2133",
"Mu;": "\u039c",
"NJcy;": "\u040a",
"Nacute;": "\u0143",
"Ncaron;": "\u0147",
"Ncedil;": "\u0145",
"Ncy;": "\u041d",
"NegativeMediumSpace;": "\u200b",
"NegativeThickSpace;": "\u200b",
"NegativeThinSpace;": "\u200b",
"NegativeVeryThinSpace;": "\u200b",
"NestedGreaterGreater;": "\u226b",
"NestedLessLess;": "\u226a",
"NewLine;": "\n",
"Nfr;": "\U0001d511",
"NoBreak;": "\u2060",
"NonBreakingSpace;": "\xa0",
"Nopf;": "\u2115",
"Not;": "\u2aec",
"NotCongruent;": "\u2262",
"NotCupCap;": "\u226d",
"NotDoubleVerticalBar;": "\u2226",
"NotElement;": "\u2209",
"NotEqual;": "\u2260",
"NotEqualTilde;": "\u2242\u0338",
"NotExists;": "\u2204",
"NotGreater;": "\u226f",
"NotGreaterEqual;": "\u2271",
"NotGreaterFullEqual;": "\u2267\u0338",
"NotGreaterGreater;": "\u226b\u0338",
"NotGreaterLess;": "\u2279",
"NotGreaterSlantEqual;": "\u2a7e\u0338",
"NotGreaterTilde;": "\u2275",
"NotHumpDownHump;": "\u224e\u0338",
"NotHumpEqual;": "\u224f\u0338",
"NotLeftTriangle;": "\u22ea",
"NotLeftTriangleBar;": "\u29cf\u0338",
"NotLeftTriangleEqual;": "\u22ec",
"NotLess;": "\u226e",
"NotLessEqual;": "\u2270",
"NotLessGreater;": "\u2278",
"NotLessLess;": "\u226a\u0338",
"NotLessSlantEqual;": "\u2a7d\u0338",
"NotLessTilde;": "\u2274",
"NotNestedGreaterGreater;": "\u2aa2\u0338",
"NotNestedLessLess;": "\u2aa1\u0338",
"NotPrecedes;": "\u2280",
"NotPrecedesEqual;": "\u2aaf\u0338",
"NotPrecedesSlantEqual;": "\u22e0",
"NotReverseElement;": "\u220c",
"NotRightTriangle;": "\u22eb",
"NotRightTriangleBar;": "\u29d0\u0338",
"NotRightTriangleEqual;": "\u22ed",
"NotSquareSubset;": "\u228f\u0338",
"NotSquareSubsetEqual;": "\u22e2",
"NotSquareSuperset;": "\u2290\u0338",
"NotSquareSupersetEqual;": "\u22e3",
"NotSubset;": "\u2282\u20d2",
"NotSubsetEqual;": "\u2288",
"NotSucceeds;": "\u2281",
"NotSucceedsEqual;": "\u2ab0\u0338",
"NotSucceedsSlantEqual;": "\u22e1",
"NotSucceedsTilde;": "\u227f\u0338",
"NotSuperset;": "\u2283\u20d2",
"NotSupersetEqual;": "\u2289",
"NotTilde;": "\u2241",
"NotTildeEqual;": "\u2244",
"NotTildeFullEqual;": "\u2247",
"NotTildeTilde;": "\u2249",
"NotVerticalBar;": "\u2224",
"Nscr;": "\U0001d4a9",
"Ntilde": "\xd1",
"Ntilde;": "\xd1",
"Nu;": "\u039d",
"OElig;": "\u0152",
"Oacute": "\xd3",
"Oacute;": "\xd3",
"Ocirc": "\xd4",
"Ocirc;": "\xd4",
"Ocy;": "\u041e",
"Odblac;": "\u0150",
"Ofr;": "\U0001d512",
"Ograve": "\xd2",
"Ograve;": "\xd2",
"Omacr;": "\u014c",
"Omega;": "\u03a9",
"Omicron;": "\u039f",
"Oopf;": "\U0001d546",
"OpenCurlyDoubleQuote;": "\u201c",
"OpenCurlyQuote;": "\u2018",
"Or;": "\u2a54",
"Oscr;": "\U0001d4aa",
"Oslash": "\xd8",
"Oslash;": "\xd8",
"Otilde": "\xd5",
"Otilde;": "\xd5",
"Otimes;": "\u2a37",
"Ouml": "\xd6",
"Ouml;": "\xd6",
"OverBar;": "\u203e",
"OverBrace;": "\u23de",
"OverBracket;": "\u23b4",
"OverParenthesis;": "\u23dc",
"PartialD;": "\u2202",
"Pcy;": "\u041f",
"Pfr;": "\U0001d513",
"Phi;": "\u03a6",
"Pi;": "\u03a0",
"PlusMinus;": "\xb1",
"Poincareplane;": "\u210c",
"Popf;": "\u2119",
"Pr;": "\u2abb",
"Precedes;": "\u227a",
"PrecedesEqual;": "\u2aaf",
"PrecedesSlantEqual;": "\u227c",
"PrecedesTilde;": "\u227e",
"Prime;": "\u2033",
"Product;": "\u220f",
"Proportion;": "\u2237",
"Proportional;": "\u221d",
"Pscr;": "\U0001d4ab",
"Psi;": "\u03a8",
"QUOT": "\"",
"QUOT;": "\"",
"Qfr;": "\U0001d514",
"Qopf;": "\u211a",
"Qscr;": "\U0001d4ac",
"RBarr;": "\u2910",
"REG": "\xae",
"REG;": "\xae",
"Racute;": "\u0154",
"Rang;": "\u27eb",
"Rarr;": "\u21a0",
"Rarrtl;": "\u2916",
"Rcaron;": "\u0158",
"Rcedil;": "\u0156",
"Rcy;": "\u0420",
"Re;": "\u211c",
"ReverseElement;": "\u220b",
"ReverseEquilibrium;": "\u21cb",
"ReverseUpEquilibrium;": "\u296f",
"Rfr;": "\u211c",
"Rho;": "\u03a1",
"RightAngleBracket;": "\u27e9",
"RightArrow;": "\u2192",
"RightArrowBar;": "\u21e5",
"RightArrowLeftArrow;": "\u21c4",
"RightCeiling;": "\u2309",
"RightDoubleBracket;": "\u27e7",
"RightDownTeeVector;": "\u295d",
"RightDownVector;": "\u21c2",
"RightDownVectorBar;": "\u2955",
"RightFloor;": "\u230b",
"RightTee;": "\u22a2",
"RightTeeArrow;": "\u21a6",
"RightTeeVector;": "\u295b",
"RightTriangle;": "\u22b3",
"RightTriangleBar;": "\u29d0",
"RightTriangleEqual;": "\u22b5",
"RightUpDownVector;": "\u294f",
"RightUpTeeVector;": "\u295c",
"RightUpVector;": "\u21be",
"RightUpVectorBar;": "\u2954",
"RightVector;": "\u21c0",
"RightVectorBar;": "\u2953",
"Rightarrow;": "\u21d2",
"Ropf;": "\u211d",
"RoundImplies;": "\u2970",
"Rrightarrow;": "\u21db",
"Rscr;": "\u211b",
"Rsh;": "\u21b1",
"RuleDelayed;": "\u29f4",
"SHCHcy;": "\u0429",
"SHcy;": "\u0428",
"SOFTcy;": "\u042c",
"Sacute;": "\u015a",
"Sc;": "\u2abc",
"Scaron;": "\u0160",
"Scedil;": "\u015e",
"Scirc;": "\u015c",
"Scy;": "\u0421",
"Sfr;": "\U0001d516",
"ShortDownArrow;": "\u2193",
"ShortLeftArrow;": "\u2190",
"ShortRightArrow;": "\u2192",
"ShortUpArrow;": "\u2191",
"Sigma;": "\u03a3",
"SmallCircle;": "\u2218",
"Sopf;": "\U0001d54a",
"Sqrt;": "\u221a",
"Square;": "\u25a1",
"SquareIntersection;": "\u2293",
"SquareSubset;": "\u228f",
"SquareSubsetEqual;": "\u2291",
"SquareSuperset;": "\u2290",
"SquareSupersetEqual;": "\u2292",
"SquareUnion;": "\u2294",
"Sscr;": "\U0001d4ae",
"Star;": "\u22c6",
"Sub;": "\u22d0",
"Subset;": "\u22d0",
"SubsetEqual;": "\u2286",
"Succeeds;": "\u227b",
"SucceedsEqual;": "\u2ab0",
"SucceedsSlantEqual;": "\u227d",
"SucceedsTilde;": "\u227f",
"SuchThat;": "\u220b",
"Sum;": "\u2211",
"Sup;": "\u22d1",
"Superset;": "\u2283",
"SupersetEqual;": "\u2287",
"Supset;": "\u22d1",
"THORN": "\xde",
"THORN;": "\xde",
"TRADE;": "\u2122",
"TSHcy;": "\u040b",
"TScy;": "\u0426",
"Tab;": "\t",
"Tau;": "\u03a4",
"Tcaron;": "\u0164",
"Tcedil;": "\u0162",
"Tcy;": "\u0422",
"Tfr;": "\U0001d517",
"Therefore;": "\u2234",
"Theta;": "\u0398",
"ThickSpace;": "\u205f\u200a",
"ThinSpace;": "\u2009",
"Tilde;": "\u223c",
"TildeEqual;": "\u2243",
"TildeFullEqual;": "\u2245",
"TildeTilde;": "\u2248",
"Topf;": "\U0001d54b",
"TripleDot;": "\u20db",
"Tscr;": "\U0001d4af",
"Tstrok;": "\u0166",
"Uacute": "\xda",
"Uacute;": "\xda",
"Uarr;": "\u219f",
"Uarrocir;": "\u2949",
"Ubrcy;": "\u040e",
"Ubreve;": "\u016c",
"Ucirc": "\xdb",
"Ucirc;": "\xdb",
"Ucy;": "\u0423",
"Udblac;": "\u0170",
"Ufr;": "\U0001d518",
"Ugrave": "\xd9",
"Ugrave;": "\xd9",
"Umacr;": "\u016a",
"UnderBar;": "_",
"UnderBrace;": "\u23df",
"UnderBracket;": "\u23b5",
"UnderParenthesis;": "\u23dd",
"Union;": "\u22c3",
"UnionPlus;": "\u228e",
"Uogon;": "\u0172",
"Uopf;": "\U0001d54c",
"UpArrow;": "\u2191",
"UpArrowBar;": "\u2912",
"UpArrowDownArrow;": "\u21c5",
"UpDownArrow;": "\u2195",
"UpEquilibrium;": "\u296e",
"UpTee;": "\u22a5",
"UpTeeArrow;": "\u21a5",
"Uparrow;": "\u21d1",
"Updownarrow;": "\u21d5",
"UpperLeftArrow;": "\u2196",
"UpperRightArrow;": "\u2197",
"Upsi;": "\u03d2",
"Upsilon;": "\u03a5",
"Uring;": "\u016e",
"Uscr;": "\U0001d4b0",
"Utilde;": "\u0168",
"Uuml": "\xdc",
"Uuml;": "\xdc",
"VDash;": "\u22ab",
"Vbar;": "\u2aeb",
"Vcy;": "\u0412",
"Vdash;": "\u22a9",
"Vdashl;": "\u2ae6",
"Vee;": "\u22c1",
"Verbar;": "\u2016",
"Vert;": "\u2016",
"VerticalBar;": "\u2223",
"VerticalLine;": "|",
"VerticalSeparator;": "\u2758",
"VerticalTilde;": "\u2240",
"VeryThinSpace;": "\u200a",
"Vfr;": "\U0001d519",
"Vopf;": "\U0001d54d",
"Vscr;": "\U0001d4b1",
"Vvdash;": "\u22aa",
"Wcirc;": "\u0174",
"Wedge;": "\u22c0",
"Wfr;": "\U0001d51a",
"Wopf;": "\U0001d54e",
"Wscr;": "\U0001d4b2",
"Xfr;": "\U0001d51b",
"Xi;": "\u039e",
"Xopf;": "\U0001d54f",
"Xscr;": "\U0001d4b3",
"YAcy;": "\u042f",
"YIcy;": "\u0407",
"YUcy;": "\u042e",
"Yacute": "\xdd",
"Yacute;": "\xdd",
"Ycirc;": "\u0176",
"Ycy;": "\u042b",
"Yfr;": "\U0001d51c",
"Yopf;": "\U0001d550",
"Yscr;": "\U0001d4b4",
"Yuml;": "\u0178",
"ZHcy;": "\u0416",
"Zacute;": "\u0179",
"Zcaron;": "\u017d",
"Zcy;": "\u0417",
"Zdot;": "\u017b",
"ZeroWidthSpace;": "\u200b",
"Zeta;": "\u0396",
"Zfr;": "\u2128",
"Zopf;": "\u2124",
"Zscr;": "\U0001d4b5",
"aacute": "\xe1",
"aacute;": "\xe1",
"abreve;": "\u0103",
"ac;": "\u223e",
"acE;": "\u223e\u0333",
"acd;": "\u223f",
"acirc": "\xe2",
"acirc;": "\xe2",
"acute": "\xb4",
"acute;": "\xb4",
"acy;": "\u0430",
"aelig": "\xe6",
"aelig;": "\xe6",
"af;": "\u2061",
"afr;": "\U0001d51e",
"agrave": "\xe0",
"agrave;": "\xe0",
"alefsym;": "\u2135",
"aleph;": "\u2135",
"alpha;": "\u03b1",
"amacr;": "\u0101",
"amalg;": "\u2a3f",
"amp": "&",
"amp;": "&",
"and;": "\u2227",
"andand;": "\u2a55",
"andd;": "\u2a5c",
"andslope;": "\u2a58",
"andv;": "\u2a5a",
"ang;": "\u2220",
"ange;": "\u29a4",
"angle;": "\u2220",
"angmsd;": "\u2221",
"angmsdaa;": "\u29a8",
"angmsdab;": "\u29a9",
"angmsdac;": "\u29aa",
"angmsdad;": "\u29ab",
"angmsdae;": "\u29ac",
"angmsdaf;": "\u29ad",
"angmsdag;": "\u29ae",
"angmsdah;": "\u29af",
"angrt;": "\u221f",
"angrtvb;": "\u22be",
"angrtvbd;": "\u299d",
"angsph;": "\u2222",
"angst;": "\xc5",
"angzarr;": "\u237c",
"aogon;": "\u0105",
"aopf;": "\U0001d552",
"ap;": "\u2248",
"apE;": "\u2a70",
"apacir;": "\u2a6f",
"ape;": "\u224a",
"apid;": "\u224b",
"apos;": "'",
"approx;": "\u2248",
"approxeq;": "\u224a",
"aring": "\xe5",
"aring;": "\xe5",
"ascr;": "\U0001d4b6",
"ast;": "*",
"asymp;": "\u2248",
"asympeq;": "\u224d",
"atilde": "\xe3",
"atilde;": "\xe3",
"auml": "\xe4",
"auml;": "\xe4",
"awconint;": "\u2233",
"awint;": "\u2a11",
"bNot;": "\u2aed",
"backcong;": "\u224c",
"backepsilon;": "\u03f6",
"backprime;": "\u2035",
"backsim;": "\u223d",
"backsimeq;": "\u22cd",
"barvee;": "\u22bd",
"barwed;": "\u2305",
"barwedge;": "\u2305",
"bbrk;": "\u23b5",
"bbrktbrk;": "\u23b6",
"bcong;": "\u224c",
"bcy;": "\u0431",
"bdquo;": "\u201e",
"becaus;": "\u2235",
"because;": "\u2235",
"bemptyv;": "\u29b0",
"bepsi;": "\u03f6",
"bernou;": "\u212c",
"beta;": "\u03b2",
"beth;": "\u2136",
"between;": "\u226c",
"bfr;": "\U0001d51f",
"bigcap;": "\u22c2",
"bigcirc;": "\u25ef",
"bigcup;": "\u22c3",
"bigodot;": "\u2a00",
"bigoplus;": "\u2a01",
"bigotimes;": "\u2a02",
"bigsqcup;": "\u2a06",
"bigstar;": "\u2605",
"bigtriangledown;": "\u25bd",
"bigtriangleup;": "\u25b3",
"biguplus;": "\u2a04",
"bigvee;": "\u22c1",
"bigwedge;": "\u22c0",
"bkarow;": "\u290d",
"blacklozenge;": "\u29eb",
"blacksquare;": "\u25aa",
"blacktriangle;": "\u25b4",
"blacktriangledown;": "\u25be",
"blacktriangleleft;": "\u25c2",
"blacktriangleright;": "\u25b8",
"blank;": "\u2423",
"blk12;": "\u2592",
"blk14;": "\u2591",
"blk34;": "\u2593",
"block;": "\u2588",
"bne;": "=\u20e5",
"bnequiv;": "\u2261\u20e5",
"bnot;": "\u2310",
"bopf;": "\U0001d553",
"bot;": "\u22a5",
"bottom;": "\u22a5",
"bowtie;": "\u22c8",
"boxDL;": "\u2557",
"boxDR;": "\u2554",
"boxDl;": "\u2556",
"boxDr;": "\u2553",
"boxH;": "\u2550",
"boxHD;": "\u2566",
"boxHU;": "\u2569",
"boxHd;": "\u2564",
"boxHu;": "\u2567",
"boxUL;": "\u255d",
"boxUR;": "\u255a",
"boxUl;": "\u255c",
"boxUr;": "\u2559",
"boxV;": "\u2551",
"boxVH;": "\u256c",
"boxVL;": "\u2563",
"boxVR;": "\u2560",
"boxVh;": "\u256b",
"boxVl;": "\u2562",
"boxVr;": "\u255f",
"boxbox;": "\u29c9",
"boxdL;": "\u2555",
"boxdR;": "\u2552",
"boxdl;": "\u2510",
"boxdr;": "\u250c",
"boxh;": "\u2500",
"boxhD;": "\u2565",
"boxhU;": "\u2568",
"boxhd;": "\u252c",
"boxhu;": "\u2534",
"boxminus;": "\u229f",
"boxplus;": "\u229e",
"boxtimes;": "\u22a0",
"boxuL;": "\u255b",
"boxuR;": "\u2558",
"boxul;": "\u2518",
"boxur;": "\u2514",
"boxv;": "\u2502",
"boxvH;": "\u256a",
"boxvL;": "\u2561",
"boxvR;": "\u255e",
"boxvh;": "\u253c",
"boxvl;": "\u2524",
"boxvr;": "\u251c",
"bprime;": "\u2035",
"breve;": "\u02d8",
"brvbar": "\xa6",
"brvbar;": "\xa6",
"bscr;": "\U0001d4b7",
"bsemi;": "\u204f",
"bsim;": "\u223d",
"bsime;": "\u22cd",
"bsol;": "\\",
"bsolb;": "\u29c5",
"bsolhsub;": "\u27c8",
"bull;": "\u2022",
"bullet;": "\u2022",
"bump;": "\u224e",
"bumpE;": "\u2aae",
"bumpe;": "\u224f",
"bumpeq;": "\u224f",
"cacute;": "\u0107",
"cap;": "\u2229",
"capand;": "\u2a44",
"capbrcup;": "\u2a49",
"capcap;": "\u2a4b",
"capcup;": "\u2a47",
"capdot;": "\u2a40",
"caps;": "\u2229\ufe00",
"caret;": "\u2041",
"caron;": "\u02c7",
"ccaps;": "\u2a4d",
"ccaron;": "\u010d",
"ccedil": "\xe7",
"ccedil;": "\xe7",
"ccirc;": "\u0109",
"ccups;": "\u2a4c",
"ccupssm;": "\u2a50",
"cdot;": "\u010b",
"cedil": "\xb8",
"cedil;": "\xb8",
"cemptyv;": "\u29b2",
"cent": "\xa2",
"cent;": "\xa2",
"centerdot;": "\xb7",
"cfr;": "\U0001d520",
"chcy;": "\u0447",
"check;": "\u2713",
"checkmark;": "\u2713",
"chi;": "\u03c7",
"cir;": "\u25cb",
"cirE;": "\u29c3",
"circ;": "\u02c6",
"circeq;": "\u2257",
"circlearrowleft;": "\u21ba",
"circlearrowright;": "\u21bb",
"circledR;": "\xae",
"circledS;": "\u24c8",
"circledast;": "\u229b",
"circledcirc;": "\u229a",
"circleddash;": "\u229d",
"cire;": "\u2257",
"cirfnint;": "\u2a10",
"cirmid;": "\u2aef",
"cirscir;": "\u29c2",
"clubs;": "\u2663",
"clubsuit;": "\u2663",
"colon;": ":",
"colone;": "\u2254",
"coloneq;": "\u2254",
"comma;": ",",
"commat;": "@",
"comp;": "\u2201",
"compfn;": "\u2218",
"complement;": "\u2201",
"complexes;": "\u2102",
"cong;": "\u2245",
"congdot;": "\u2a6d",
"conint;": "\u222e",
"copf;": "\U0001d554",
"coprod;": "\u2210",
"copy": "\xa9",
"copy;": "\xa9",
"copysr;": "\u2117",
"crarr;": "\u21b5",
"cross;": "\u2717",
"cscr;": "\U0001d4b8",
"csub;": "\u2acf",
"csube;": "\u2ad1",
"csup;": "\u2ad0",
"csupe;": "\u2ad2",
"ctdot;": "\u22ef",
"cudarrl;": "\u2938",
"cudarrr;": "\u2935",
"cuepr;": "\u22de",
"cuesc;": "\u22df",
"cularr;": "\u21b6",
"cularrp;": "\u293d",
"cup;": "\u222a",
"cupbrcap;": "\u2a48",
"cupcap;": "\u2a46",
"cupcup;": "\u2a4a",
"cupdot;": "\u228d",
"cupor;": "\u2a45",
"cups;": "\u222a\ufe00",
"curarr;": "\u21b7",
"curarrm;": "\u293c",
"curlyeqprec;": "\u22de",
"curlyeqsucc;": "\u22df",
"curlyvee;": "\u22ce",
"curlywedge;": "\u22cf",
"curren": "\xa4",
"curren;": "\xa4",
"curvearrowleft;": "\u21b6",
"curvearrowright;": "\u21b7",
"cuvee;": "\u22ce",
"cuwed;": "\u22cf",
"cwconint;": "\u2232",
"cwint;": "\u2231",
"cylcty;": "\u232d",
"dArr;": "\u21d3",
"dHar;": "\u2965",
"dagger;": "\u2020",
"daleth;": "\u2138",
"darr;": "\u2193",
"dash;": "\u2010",
"dashv;": "\u22a3",
"dbkarow;": "\u290f",
"dblac;": "\u02dd",
"dcaron;": "\u010f",
"dcy;": "\u0434",
"dd;": "\u2146",
"ddagger;": "\u2021",
"ddarr;": "\u21ca",
"ddotseq;": "\u2a77",
"deg": "\xb0",
"deg;": "\xb0",
"delta;": "\u03b4",
"demptyv;": "\u29b1",
"dfisht;": "\u297f",
"dfr;": "\U0001d521",
"dharl;": "\u21c3",
"dharr;": "\u21c2",
"diam;": "\u22c4",
"diamond;": "\u22c4",
"diamondsuit;": "\u2666",
"diams;": "\u2666",
"die;": "\xa8",
"digamma;": "\u03dd",
"disin;": "\u22f2",
"div;": "\xf7",
"divide": "\xf7",
"divide;": "\xf7",
"divideontimes;": "\u22c7",
"divonx;": "\u22c7",
"djcy;": "\u0452",
"dlcorn;": "\u231e",
"dlcrop;": "\u230d",
"dollar;": "$",
"dopf;": "\U0001d555",
"dot;": "\u02d9",
"doteq;": "\u2250",
"doteqdot;": "\u2251",
"dotminus;": "\u2238",
"dotplus;": "\u2214",
"dotsquare;": "\u22a1",
"doublebarwedge;": "\u2306",
"downarrow;": "\u2193",
"downdownarrows;": "\u21ca",
"downharpoonleft;": "\u21c3",
"downharpoonright;": "\u21c2",
"drbkarow;": "\u2910",
"drcorn;": "\u231f",
"drcrop;": "\u230c",
"dscr;": "\U0001d4b9",
"dscy;": "\u0455",
"dsol;": "\u29f6",
"dstrok;": "\u0111",
"dtdot;": "\u22f1",
"dtri;": "\u25bf",
"dtrif;": "\u25be",
"duarr;": "\u21f5",
"duhar;": "\u296f",
"dwangle;": "\u29a6",
"dzcy;": "\u045f",
"dzigrarr;": "\u27ff",
"eDDot;": "\u2a77",
"eDot;": "\u2251",
"eacute": "\xe9",
"eacute;": "\xe9",
"easter;": "\u2a6e",
"ecaron;": "\u011b",
"ecir;": "\u2256",
"ecirc": "\xea",
"ecirc;": "\xea",
"ecolon;": "\u2255",
"ecy;": "\u044d",
"edot;": "\u0117",
"ee;": "\u2147",
"efDot;": "\u2252",
"efr;": "\U0001d522",
"eg;": "\u2a9a",
"egrave": "\xe8",
"egrave;": "\xe8",
"egs;": "\u2a96",
"egsdot;": "\u2a98",
"el;": "\u2a99",
"elinters;": "\u23e7",
"ell;": "\u2113",
"els;": "\u2a95",
"elsdot;": "\u2a97",
"emacr;": "\u0113",
"empty;": "\u2205",
"emptyset;": "\u2205",
"emptyv;": "\u2205",
"emsp13;": "\u2004",
"emsp14;": "\u2005",
"emsp;": "\u2003",
"eng;": "\u014b",
"ensp;": "\u2002",
"eogon;": "\u0119",
"eopf;": "\U0001d556",
"epar;": "\u22d5",
"eparsl;": "\u29e3",
"eplus;": "\u2a71",
"epsi;": "\u03b5",
"epsilon;": "\u03b5",
"epsiv;": "\u03f5",
"eqcirc;": "\u2256",
"eqcolon;": "\u2255",
"eqsim;": "\u2242",
"eqslantgtr;": "\u2a96",
"eqslantless;": "\u2a95",
"equals;": "=",
"equest;": "\u225f",
"equiv;": "\u2261",
"equivDD;": "\u2a78",
"eqvparsl;": "\u29e5",
"erDot;": "\u2253",
"erarr;": "\u2971",
"escr;": "\u212f",
"esdot;": "\u2250",
"esim;": "\u2242",
"eta;": "\u03b7",
"eth": "\xf0",
"eth;": "\xf0",
"euml": "\xeb",
"euml;": "\xeb",
"euro;": "\u20ac",
"excl;": "!",
"exist;": "\u2203",
"expectation;": "\u2130",
"exponentiale;": "\u2147",
"fallingdotseq;": "\u2252",
"fcy;": "\u0444",
"female;": "\u2640",
"ffilig;": "\ufb03",
"fflig;": "\ufb00",
"ffllig;": "\ufb04",
"ffr;": "\U0001d523",
"filig;": "\ufb01",
"fjlig;": "fj",
"flat;": "\u266d",
"fllig;": "\ufb02",
"fltns;": "\u25b1",
"fnof;": "\u0192",
"fopf;": "\U0001d557",
"forall;": "\u2200",
"fork;": "\u22d4",
"forkv;": "\u2ad9",
"fpartint;": "\u2a0d",
"frac12": "\xbd",
"frac12;": "\xbd",
"frac13;": "\u2153",
"frac14": "\xbc",
"frac14;": "\xbc",
"frac15;": "\u2155",
"frac16;": "\u2159",
"frac18;": "\u215b",
"frac23;": "\u2154",
"frac25;": "\u2156",
"frac34": "\xbe",
"frac34;": "\xbe",
"frac35;": "\u2157",
"frac38;": "\u215c",
"frac45;": "\u2158",
"frac56;": "\u215a",
"frac58;": "\u215d",
"frac78;": "\u215e",
"frasl;": "\u2044",
"frown;": "\u2322",
"fscr;": "\U0001d4bb",
"gE;": "\u2267",
"gEl;": "\u2a8c",
"gacute;": "\u01f5",
"gamma;": "\u03b3",
"gammad;": "\u03dd",
"gap;": "\u2a86",
"gbreve;": "\u011f",
"gcirc;": "\u011d",
"gcy;": "\u0433",
"gdot;": "\u0121",
"ge;": "\u2265",
"gel;": "\u22db",
"geq;": "\u2265",
"geqq;": "\u2267",
"geqslant;": "\u2a7e",
"ges;": "\u2a7e",
"gescc;": "\u2aa9",
"gesdot;": "\u2a80",
"gesdoto;": "\u2a82",
"gesdotol;": "\u2a84",
"gesl;": "\u22db\ufe00",
"gesles;": "\u2a94",
"gfr;": "\U0001d524",
"gg;": "\u226b",
"ggg;": "\u22d9",
"gimel;": "\u2137",
"gjcy;": "\u0453",
"gl;": "\u2277",
"glE;": "\u2a92",
"gla;": "\u2aa5",
"glj;": "\u2aa4",
"gnE;": "\u2269",
"gnap;": "\u2a8a",
"gnapprox;": "\u2a8a",
"gne;": "\u2a88",
"gneq;": "\u2a88",
"gneqq;": "\u2269",
"gnsim;": "\u22e7",
"gopf;": "\U0001d558",
"grave;": "`",
"gscr;": "\u210a",
"gsim;": "\u2273",
"gsime;": "\u2a8e",
"gsiml;": "\u2a90",
"gt": ">",
"gt;": ">",
"gtcc;": "\u2aa7",
"gtcir;": "\u2a7a",
"gtdot;": "\u22d7",
"gtlPar;": "\u2995",
"gtquest;": "\u2a7c",
"gtrapprox;": "\u2a86",
"gtrarr;": "\u2978",
"gtrdot;": "\u22d7",
"gtreqless;": "\u22db",
"gtreqqless;": "\u2a8c",
"gtrless;": "\u2277",
"gtrsim;": "\u2273",
"gvertneqq;": "\u2269\ufe00",
"gvnE;": "\u2269\ufe00",
"hArr;": "\u21d4",
"hairsp;": "\u200a",
"half;": "\xbd",
"hamilt;": "\u210b",
"hardcy;": "\u044a",
"harr;": "\u2194",
"harrcir;": "\u2948",
"harrw;": "\u21ad",
"hbar;": "\u210f",
"hcirc;": "\u0125",
"hearts;": "\u2665",
"heartsuit;": "\u2665",
"hellip;": "\u2026",
"hercon;": "\u22b9",
"hfr;": "\U0001d525",
"hksearow;": "\u2925",
"hkswarow;": "\u2926",
"hoarr;": "\u21ff",
"homtht;": "\u223b",
"hookleftarrow;": "\u21a9",
"hookrightarrow;": "\u21aa",
"hopf;": "\U0001d559",
"horbar;": "\u2015",
"hscr;": "\U0001d4bd",
"hslash;": "\u210f",
"hstrok;": "\u0127",
"hybull;": "\u2043",
"hyphen;": "\u2010",
"iacute": "\xed",
"iacute;": "\xed",
"ic;": "\u2063",
"icirc": "\xee",
"icirc;": "\xee",
"icy;": "\u0438",
"iecy;": "\u0435",
"iexcl": "\xa1",
"iexcl;": "\xa1",
"iff;": "\u21d4",
"ifr;": "\U0001d526",
"igrave": "\xec",
"igrave;": "\xec",
"ii;": "\u2148",
"iiiint;": "\u2a0c",
"iiint;": "\u222d",
"iinfin;": "\u29dc",
"iiota;": "\u2129",
"ijlig;": "\u0133",
"imacr;": "\u012b",
"image;": "\u2111",
"imagline;": "\u2110",
"imagpart;": "\u2111",
"imath;": "\u0131",
"imof;": "\u22b7",
"imped;": "\u01b5",
"in;": "\u2208",
"incare;": "\u2105",
"infin;": "\u221e",
"infintie;": "\u29dd",
"inodot;": "\u0131",
"int;": "\u222b",
"intcal;": "\u22ba",
"integers;": "\u2124",
"intercal;": "\u22ba",
"intlarhk;": "\u2a17",
"intprod;": "\u2a3c",
"iocy;": "\u0451",
"iogon;": "\u012f",
"iopf;": "\U0001d55a",
"iota;": "\u03b9",
"iprod;": "\u2a3c",
"iquest": "\xbf",
"iquest;": "\xbf",
"iscr;": "\U0001d4be",
"isin;": "\u2208",
"isinE;": "\u22f9",
"isindot;": "\u22f5",
"isins;": "\u22f4",
"isinsv;": "\u22f3",
"isinv;": "\u2208",
"it;": "\u2062",
"itilde;": "\u0129",
"iukcy;": "\u0456",
"iuml": "\xef",
"iuml;": "\xef",
"jcirc;": "\u0135",
"jcy;": "\u0439",
"jfr;": "\U0001d527",
"jmath;": "\u0237",
"jopf;": "\U0001d55b",
"jscr;": "\U0001d4bf",
"jsercy;": "\u0458",
"jukcy;": "\u0454",
"kappa;": "\u03ba",
"kappav;": "\u03f0",
"kcedil;": "\u0137",
"kcy;": "\u043a",
"kfr;": "\U0001d528",
"kgreen;": "\u0138",
"khcy;": "\u0445",
"kjcy;": "\u045c",
"kopf;": "\U0001d55c",
"kscr;": "\U0001d4c0",
"lAarr;": "\u21da",
"lArr;": "\u21d0",
"lAtail;": "\u291b",
"lBarr;": "\u290e",
"lE;": "\u2266",
"lEg;": "\u2a8b",
"lHar;": "\u2962",
"lacute;": "\u013a",
"laemptyv;": "\u29b4",
"lagran;": "\u2112",
"lambda;": "\u03bb",
"lang;": "\u27e8",
"langd;": "\u2991",
"langle;": "\u27e8",
"lap;": "\u2a85",
"laquo": "\xab",
"laquo;": "\xab",
"larr;": "\u2190",
"larrb;": "\u21e4",
"larrbfs;": "\u291f",
"larrfs;": "\u291d",
"larrhk;": "\u21a9",
"larrlp;": "\u21ab",
"larrpl;": "\u2939",
"larrsim;": "\u2973",
"larrtl;": "\u21a2",
"lat;": "\u2aab",
"latail;": "\u2919",
"late;": "\u2aad",
"lates;": "\u2aad\ufe00",
"lbarr;": "\u290c",
"lbbrk;": "\u2772",
"lbrace;": "{",
"lbrack;": "[",
"lbrke;": "\u298b",
"lbrksld;": "\u298f",
"lbrkslu;": "\u298d",
"lcaron;": "\u013e",
"lcedil;": "\u013c",
"lceil;": "\u2308",
"lcub;": "{",
"lcy;": "\u043b",
"ldca;": "\u2936",
"ldquo;": "\u201c",
"ldquor;": "\u201e",
"ldrdhar;": "\u2967",
"ldrushar;": "\u294b",
"ldsh;": "\u21b2",
"le;": "\u2264",
"leftarrow;": "\u2190",
"leftarrowtail;": "\u21a2",
"leftharpoondown;": "\u21bd",
"leftharpoonup;": "\u21bc",
"leftleftarrows;": "\u21c7",
"leftrightarrow;": "\u2194",
"leftrightarrows;": "\u21c6",
"leftrightharpoons;": "\u21cb",
"leftrightsquigarrow;": "\u21ad",
"leftthreetimes;": "\u22cb",
"leg;": "\u22da",
"leq;": "\u2264",
"leqq;": "\u2266",
"leqslant;": "\u2a7d",
"les;": "\u2a7d",
"lescc;": "\u2aa8",
"lesdot;": "\u2a7f",
"lesdoto;": "\u2a81",
"lesdotor;": "\u2a83",
"lesg;": "\u22da\ufe00",
"lesges;": "\u2a93",
"lessapprox;": "\u2a85",
"lessdot;": "\u22d6",
"lesseqgtr;": "\u22da",
"lesseqqgtr;": "\u2a8b",
"lessgtr;": "\u2276",
"lesssim;": "\u2272",
"lfisht;": "\u297c",
"lfloor;": "\u230a",
"lfr;": "\U0001d529",
"lg;": "\u2276",
"lgE;": "\u2a91",
"lhard;": "\u21bd",
"lharu;": "\u21bc",
"lharul;": "\u296a",
"lhblk;": "\u2584",
"ljcy;": "\u0459",
"ll;": "\u226a",
"llarr;": "\u21c7",
"llcorner;": "\u231e",
"llhard;": "\u296b",
"lltri;": "\u25fa",
"lmidot;": "\u0140",
"lmoust;": "\u23b0",
"lmoustache;": "\u23b0",
"lnE;": "\u2268",
"lnap;": "\u2a89",
"lnapprox;": "\u2a89",
"lne;": "\u2a87",
"lneq;": "\u2a87",
"lneqq;": "\u2268",
"lnsim;": "\u22e6",
"loang;": "\u27ec",
"loarr;": "\u21fd",
"lobrk;": "\u27e6",
"longleftarrow;": "\u27f5",
"longleftrightarrow;": "\u27f7",
"longmapsto;": "\u27fc",
"longrightarrow;": "\u27f6",
"looparrowleft;": "\u21ab",
"looparrowright;": "\u21ac",
"lopar;": "\u2985",
"lopf;": "\U0001d55d",
"loplus;": "\u2a2d",
"lotimes;": "\u2a34",
"lowast;": "\u2217",
"lowbar;": "_",
"loz;": "\u25ca",
"lozenge;": "\u25ca",
"lozf;": "\u29eb",
"lpar;": "(",
"lparlt;": "\u2993",
"lrarr;": "\u21c6",
"lrcorner;": "\u231f",
"lrhar;": "\u21cb",
"lrhard;": "\u296d",
"lrm;": "\u200e",
"lrtri;": "\u22bf",
"lsaquo;": "\u2039",
"lscr;": "\U0001d4c1",
"lsh;": "\u21b0",
"lsim;": "\u2272",
"lsime;": "\u2a8d",
"lsimg;": "\u2a8f",
"lsqb;": "[",
"lsquo;": "\u2018",
"lsquor;": "\u201a",
"lstrok;": "\u0142",
"lt": "<",
"lt;": "<",
"ltcc;": "\u2aa6",
"ltcir;": "\u2a79",
"ltdot;": "\u22d6",
"lthree;": "\u22cb",
"ltimes;": "\u22c9",
"ltlarr;": "\u2976",
"ltquest;": "\u2a7b",
"ltrPar;": "\u2996",
"ltri;": "\u25c3",
"ltrie;": "\u22b4",
"ltrif;": "\u25c2",
"lurdshar;": "\u294a",
"luruhar;": "\u2966",
"lvertneqq;": "\u2268\ufe00",
"lvnE;": "\u2268\ufe00",
"mDDot;": "\u223a",
"macr": "\xaf",
"macr;": "\xaf",
"male;": "\u2642",
"malt;": "\u2720",
"maltese;": "\u2720",
"map;": "\u21a6",
"mapsto;": "\u21a6",
"mapstodown;": "\u21a7",
"mapstoleft;": "\u21a4",
"mapstoup;": "\u21a5",
"marker;": "\u25ae",
"mcomma;": "\u2a29",
"mcy;": "\u043c",
"mdash;": "\u2014",
"measuredangle;": "\u2221",
"mfr;": "\U0001d52a",
"mho;": "\u2127",
"micro": "\xb5",
"micro;": "\xb5",
"mid;": "\u2223",
"midast;": "*",
"midcir;": "\u2af0",
"middot": "\xb7",
"middot;": "\xb7",
"minus;": "\u2212",
"minusb;": "\u229f",
"minusd;": "\u2238",
"minusdu;": "\u2a2a",
"mlcp;": "\u2adb",
"mldr;": "\u2026",
"mnplus;": "\u2213",
"models;": "\u22a7",
"mopf;": "\U0001d55e",
"mp;": "\u2213",
"mscr;": "\U0001d4c2",
"mstpos;": "\u223e",
"mu;": "\u03bc",
"multimap;": "\u22b8",
"mumap;": "\u22b8",
"nGg;": "\u22d9\u0338",
"nGt;": "\u226b\u20d2",
"nGtv;": "\u226b\u0338",
"nLeftarrow;": "\u21cd",
"nLeftrightarrow;": "\u21ce",
"nLl;": "\u22d8\u0338",
"nLt;": "\u226a\u20d2",
"nLtv;": "\u226a\u0338",
"nRightarrow;": "\u21cf",
"nVDash;": "\u22af",
"nVdash;": "\u22ae",
"nabla;": "\u2207",
"nacute;": "\u0144",
"nang;": "\u2220\u20d2",
"nap;": "\u2249",
"napE;": "\u2a70\u0338",
"napid;": "\u224b\u0338",
"napos;": "\u0149",
"napprox;": "\u2249",
"natur;": "\u266e",
"natural;": "\u266e",
"naturals;": "\u2115",
"nbsp": "\xa0",
"nbsp;": "\xa0",
"nbump;": "\u224e\u0338",
"nbumpe;": "\u224f\u0338",
"ncap;": "\u2a43",
"ncaron;": "\u0148",
"ncedil;": "\u0146",
"ncong;": "\u2247",
"ncongdot;": "\u2a6d\u0338",
"ncup;": "\u2a42",
"ncy;": "\u043d",
"ndash;": "\u2013",
"ne;": "\u2260",
"neArr;": "\u21d7",
"nearhk;": "\u2924",
"nearr;": "\u2197",
"nearrow;": "\u2197",
"nedot;": "\u2250\u0338",
"nequiv;": "\u2262",
"nesear;": "\u2928",
"nesim;": "\u2242\u0338",
"nexist;": "\u2204",
"nexists;": "\u2204",
"nfr;": "\U0001d52b",
"ngE;": "\u2267\u0338",
"nge;": "\u2271",
"ngeq;": "\u2271",
"ngeqq;": "\u2267\u0338",
"ngeqslant;": "\u2a7e\u0338",
"nges;": "\u2a7e\u0338",
"ngsim;": "\u2275",
"ngt;": "\u226f",
"ngtr;": "\u226f",
"nhArr;": "\u21ce",
"nharr;": "\u21ae",
"nhpar;": "\u2af2",
"ni;": "\u220b",
"nis;": "\u22fc",
"nisd;": "\u22fa",
"niv;": "\u220b",
"njcy;": "\u045a",
"nlArr;": "\u21cd",
"nlE;": "\u2266\u0338",
"nlarr;": "\u219a",
"nldr;": "\u2025",
"nle;": "\u2270",
"nleftarrow;": "\u219a",
"nleftrightarrow;": "\u21ae",
"nleq;": "\u2270",
"nleqq;": "\u2266\u0338",
"nleqslant;": "\u2a7d\u0338",
"nles;": "\u2a7d\u0338",
"nless;": "\u226e",
"nlsim;": "\u2274",
"nlt;": "\u226e",
"nltri;": "\u22ea",
"nltrie;": "\u22ec",
"nmid;": "\u2224",
"nopf;": "\U0001d55f",
"not": "\xac",
"not;": "\xac",
"notin;": "\u2209",
"notinE;": "\u22f9\u0338",
"notindot;": "\u22f5\u0338",
"notinva;": "\u2209",
"notinvb;": "\u22f7",
"notinvc;": "\u22f6",
"notni;": "\u220c",
"notniva;": "\u220c",
"notnivb;": "\u22fe",
"notnivc;": "\u22fd",
"npar;": "\u2226",
"nparallel;": "\u2226",
"nparsl;": "\u2afd\u20e5",
"npart;": "\u2202\u0338",
"npolint;": "\u2a14",
"npr;": "\u2280",
"nprcue;": "\u22e0",
"npre;": "\u2aaf\u0338",
"nprec;": "\u2280",
"npreceq;": "\u2aaf\u0338",
"nrArr;": "\u21cf",
"nrarr;": "\u219b",
"nrarrc;": "\u2933\u0338",
"nrarrw;": "\u219d\u0338",
"nrightarrow;": "\u219b",
"nrtri;": "\u22eb",
"nrtrie;": "\u22ed",
"nsc;": "\u2281",
"nsccue;": "\u22e1",
"nsce;": "\u2ab0\u0338",
"nscr;": "\U0001d4c3",
"nshortmid;": "\u2224",
"nshortparallel;": "\u2226",
"nsim;": "\u2241",
"nsime;": "\u2244",
"nsimeq;": "\u2244",
"nsmid;": "\u2224",
"nspar;": "\u2226",
"nsqsube;": "\u22e2",
"nsqsupe;": "\u22e3",
"nsub;": "\u2284",
"nsubE;": "\u2ac5\u0338",
"nsube;": "\u2288",
"nsubset;": "\u2282\u20d2",
"nsubseteq;": "\u2288",
"nsubseteqq;": "\u2ac5\u0338",
"nsucc;": "\u2281",
"nsucceq;": "\u2ab0\u0338",
"nsup;": "\u2285",
"nsupE;": "\u2ac6\u0338",
"nsupe;": "\u2289",
"nsupset;": "\u2283\u20d2",
"nsupseteq;": "\u2289",
"nsupseteqq;": "\u2ac6\u0338",
"ntgl;": "\u2279",
"ntilde": "\xf1",
"ntilde;": "\xf1",
"ntlg;": "\u2278",
"ntriangleleft;": "\u22ea",
"ntrianglelefteq;": "\u22ec",
"ntriangleright;": "\u22eb",
"ntrianglerighteq;": "\u22ed",
"nu;": "\u03bd",
"num;": "#",
"numero;": "\u2116",
"numsp;": "\u2007",
"nvDash;": "\u22ad",
"nvHarr;": "\u2904",
"nvap;": "\u224d\u20d2",
"nvdash;": "\u22ac",
"nvge;": "\u2265\u20d2",
"nvgt;": ">\u20d2",
"nvinfin;": "\u29de",
"nvlArr;": "\u2902",
"nvle;": "\u2264\u20d2",
"nvlt;": "<\u20d2",
"nvltrie;": "\u22b4\u20d2",
"nvrArr;": "\u2903",
"nvrtrie;": "\u22b5\u20d2",
"nvsim;": "\u223c\u20d2",
"nwArr;": "\u21d6",
"nwarhk;": "\u2923",
"nwarr;": "\u2196",
"nwarrow;": "\u2196",
"nwnear;": "\u2927",
"oS;": "\u24c8",
"oacute": "\xf3",
"oacute;": "\xf3",
"oast;": "\u229b",
"ocir;": "\u229a",
"ocirc": "\xf4",
"ocirc;": "\xf4",
"ocy;": "\u043e",
"odash;": "\u229d",
"odblac;": "\u0151",
"odiv;": "\u2a38",
"odot;": "\u2299",
"odsold;": "\u29bc",
"oelig;": "\u0153",
"ofcir;": "\u29bf",
"ofr;": "\U0001d52c",
"ogon;": "\u02db",
"ograve": "\xf2",
"ograve;": "\xf2",
"ogt;": "\u29c1",
"ohbar;": "\u29b5",
"ohm;": "\u03a9",
"oint;": "\u222e",
"olarr;": "\u21ba",
"olcir;": "\u29be",
"olcross;": "\u29bb",
"oline;": "\u203e",
"olt;": "\u29c0",
"omacr;": "\u014d",
"omega;": "\u03c9",
"omicron;": "\u03bf",
"omid;": "\u29b6",
"ominus;": "\u2296",
"oopf;": "\U0001d560",
"opar;": "\u29b7",
"operp;": "\u29b9",
"oplus;": "\u2295",
"or;": "\u2228",
"orarr;": "\u21bb",
"ord;": "\u2a5d",
"order;": "\u2134",
"orderof;": "\u2134",
"ordf": "\xaa",
"ordf;": "\xaa",
"ordm": "\xba",
"ordm;": "\xba",
"origof;": "\u22b6",
"oror;": "\u2a56",
"orslope;": "\u2a57",
"orv;": "\u2a5b",
"oscr;": "\u2134",
"oslash": "\xf8",
"oslash;": "\xf8",
"osol;": "\u2298",
"otilde": "\xf5",
"otilde;": "\xf5",
"otimes;": "\u2297",
"otimesas;": "\u2a36",
"ouml": "\xf6",
"ouml;": "\xf6",
"ovbar;": "\u233d",
"par;": "\u2225",
"para": "\xb6",
"para;": "\xb6",
"parallel;": "\u2225",
"parsim;": "\u2af3",
"parsl;": "\u2afd",
"part;": "\u2202",
"pcy;": "\u043f",
"percnt;": "%",
"period;": ".",
"permil;": "\u2030",
"perp;": "\u22a5",
"pertenk;": "\u2031",
"pfr;": "\U0001d52d",
"phi;": "\u03c6",
"phiv;": "\u03d5",
"phmmat;": "\u2133",
"phone;": "\u260e",
"pi;": "\u03c0",
"pitchfork;": "\u22d4",
"piv;": "\u03d6",
"planck;": "\u210f",
"planckh;": "\u210e",
"plankv;": "\u210f",
"plus;": "+",
"plusacir;": "\u2a23",
"plusb;": "\u229e",
"pluscir;": "\u2a22",
"plusdo;": "\u2214",
"plusdu;": "\u2a25",
"pluse;": "\u2a72",
"plusmn": "\xb1",
"plusmn;": "\xb1",
"plussim;": "\u2a26",
"plustwo;": "\u2a27",
"pm;": "\xb1",
"pointint;": "\u2a15",
"popf;": "\U0001d561",
"pound": "\xa3",
"pound;": "\xa3",
"pr;": "\u227a",
"prE;": "\u2ab3",
"prap;": "\u2ab7",
"prcue;": "\u227c",
"pre;": "\u2aaf",
"prec;": "\u227a",
"precapprox;": "\u2ab7",
"preccurlyeq;": "\u227c",
"preceq;": "\u2aaf",
"precnapprox;": "\u2ab9",
"precneqq;": "\u2ab5",
"precnsim;": "\u22e8",
"precsim;": "\u227e",
"prime;": "\u2032",
"primes;": "\u2119",
"prnE;": "\u2ab5",
"prnap;": "\u2ab9",
"prnsim;": "\u22e8",
"prod;": "\u220f",
"profalar;": "\u232e",
"profline;": "\u2312",
"profsurf;": "\u2313",
"prop;": "\u221d",
"propto;": "\u221d",
"prsim;": "\u227e",
"prurel;": "\u22b0",
"pscr;": "\U0001d4c5",
"psi;": "\u03c8",
"puncsp;": "\u2008",
"qfr;": "\U0001d52e",
"qint;": "\u2a0c",
"qopf;": "\U0001d562",
"qprime;": "\u2057",
"qscr;": "\U0001d4c6",
"quaternions;": "\u210d",
"quatint;": "\u2a16",
"quest;": "?",
"questeq;": "\u225f",
"quot": "\"",
"quot;": "\"",
"rAarr;": "\u21db",
"rArr;": "\u21d2",
"rAtail;": "\u291c",
"rBarr;": "\u290f",
"rHar;": "\u2964",
"race;": "\u223d\u0331",
"racute;": "\u0155",
"radic;": "\u221a",
"raemptyv;": "\u29b3",
"rang;": "\u27e9",
"rangd;": "\u2992",
"range;": "\u29a5",
"rangle;": "\u27e9",
"raquo": "\xbb",
"raquo;": "\xbb",
"rarr;": "\u2192",
"rarrap;": "\u2975",
"rarrb;": "\u21e5",
"rarrbfs;": "\u2920",
"rarrc;": "\u2933",
"rarrfs;": "\u291e",
"rarrhk;": "\u21aa",
"rarrlp;": "\u21ac",
"rarrpl;": "\u2945",
"rarrsim;": "\u2974",
"rarrtl;": "\u21a3",
"rarrw;": "\u219d",
"ratail;": "\u291a",
"ratio;": "\u2236",
"rationals;": "\u211a",
"rbarr;": "\u290d",
"rbbrk;": "\u2773",
"rbrace;": "}",
"rbrack;": "]",
"rbrke;": "\u298c",
"rbrksld;": "\u298e",
"rbrkslu;": "\u2990",
"rcaron;": "\u0159",
"rcedil;": "\u0157",
"rceil;": "\u2309",
"rcub;": "}",
"rcy;": "\u0440",
"rdca;": "\u2937",
"rdldhar;": "\u2969",
"rdquo;": "\u201d",
"rdquor;": "\u201d",
"rdsh;": "\u21b3",
"real;": "\u211c",
"realine;": "\u211b",
"realpart;": "\u211c",
"reals;": "\u211d",
"rect;": "\u25ad",
"reg": "\xae",
"reg;": "\xae",
"rfisht;": "\u297d",
"rfloor;": "\u230b",
"rfr;": "\U0001d52f",
"rhard;": "\u21c1",
"rharu;": "\u21c0",
"rharul;": "\u296c",
"rho;": "\u03c1",
"rhov;": "\u03f1",
"rightarrow;": "\u2192",
"rightarrowtail;": "\u21a3",
"rightharpoondown;": "\u21c1",
"rightharpoonup;": "\u21c0",
"rightleftarrows;": "\u21c4",
"rightleftharpoons;": "\u21cc",
"rightrightarrows;": "\u21c9",
"rightsquigarrow;": "\u219d",
"rightthreetimes;": "\u22cc",
"ring;": "\u02da",
"risingdotseq;": "\u2253",
"rlarr;": "\u21c4",
"rlhar;": "\u21cc",
"rlm;": "\u200f",
"rmoust;": "\u23b1",
"rmoustache;": "\u23b1",
"rnmid;": "\u2aee",
"roang;": "\u27ed",
"roarr;": "\u21fe",
"robrk;": "\u27e7",
"ropar;": "\u2986",
"ropf;": "\U0001d563",
"roplus;": "\u2a2e",
"rotimes;": "\u2a35",
"rpar;": ")",
"rpargt;": "\u2994",
"rppolint;": "\u2a12",
"rrarr;": "\u21c9",
"rsaquo;": "\u203a",
"rscr;": "\U0001d4c7",
"rsh;": "\u21b1",
"rsqb;": "]",
"rsquo;": "\u2019",
"rsquor;": "\u2019",
"rthree;": "\u22cc",
"rtimes;": "\u22ca",
"rtri;": "\u25b9",
"rtrie;": "\u22b5",
"rtrif;": "\u25b8",
"rtriltri;": "\u29ce",
"ruluhar;": "\u2968",
"rx;": "\u211e",
"sacute;": "\u015b",
"sbquo;": "\u201a",
"sc;": "\u227b",
"scE;": "\u2ab4",
"scap;": "\u2ab8",
"scaron;": "\u0161",
"sccue;": "\u227d",
"sce;": "\u2ab0",
"scedil;": "\u015f",
"scirc;": "\u015d",
"scnE;": "\u2ab6",
"scnap;": "\u2aba",
"scnsim;": "\u22e9",
"scpolint;": "\u2a13",
"scsim;": "\u227f",
"scy;": "\u0441",
"sdot;": "\u22c5",
"sdotb;": "\u22a1",
"sdote;": "\u2a66",
"seArr;": "\u21d8",
"searhk;": "\u2925",
"searr;": "\u2198",
"searrow;": "\u2198",
"sect": "\xa7",
"sect;": "\xa7",
"semi;": ";",
"seswar;": "\u2929",
"setminus;": "\u2216",
"setmn;": "\u2216",
"sext;": "\u2736",
"sfr;": "\U0001d530",
"sfrown;": "\u2322",
"sharp;": "\u266f",
"shchcy;": "\u0449",
"shcy;": "\u0448",
"shortmid;": "\u2223",
"shortparallel;": "\u2225",
"shy": "\xad",
"shy;": "\xad",
"sigma;": "\u03c3",
"sigmaf;": "\u03c2",
"sigmav;": "\u03c2",
"sim;": "\u223c",
"simdot;": "\u2a6a",
"sime;": "\u2243",
"simeq;": "\u2243",
"simg;": "\u2a9e",
"simgE;": "\u2aa0",
"siml;": "\u2a9d",
"simlE;": "\u2a9f",
"simne;": "\u2246",
"simplus;": "\u2a24",
"simrarr;": "\u2972",
"slarr;": "\u2190",
"smallsetminus;": "\u2216",
"smashp;": "\u2a33",
"smeparsl;": "\u29e4",
"smid;": "\u2223",
"smile;": "\u2323",
"smt;": "\u2aaa",
"smte;": "\u2aac",
"smtes;": "\u2aac\ufe00",
"softcy;": "\u044c",
"sol;": "/",
"solb;": "\u29c4",
"solbar;": "\u233f",
"sopf;": "\U0001d564",
"spades;": "\u2660",
"spadesuit;": "\u2660",
"spar;": "\u2225",
"sqcap;": "\u2293",
"sqcaps;": "\u2293\ufe00",
"sqcup;": "\u2294",
"sqcups;": "\u2294\ufe00",
"sqsub;": "\u228f",
"sqsube;": "\u2291",
"sqsubset;": "\u228f",
"sqsubseteq;": "\u2291",
"sqsup;": "\u2290",
"sqsupe;": "\u2292",
"sqsupset;": "\u2290",
"sqsupseteq;": "\u2292",
"squ;": "\u25a1",
"square;": "\u25a1",
"squarf;": "\u25aa",
"squf;": "\u25aa",
"srarr;": "\u2192",
"sscr;": "\U0001d4c8",
"ssetmn;": "\u2216",
"ssmile;": "\u2323",
"sstarf;": "\u22c6",
"star;": "\u2606",
"starf;": "\u2605",
"straightepsilon;": "\u03f5",
"straightphi;": "\u03d5",
"strns;": "\xaf",
"sub;": "\u2282",
"subE;": "\u2ac5",
"subdot;": "\u2abd",
"sube;": "\u2286",
"subedot;": "\u2ac3",
"submult;": "\u2ac1",
"subnE;": "\u2acb",
"subne;": "\u228a",
"subplus;": "\u2abf",
"subrarr;": "\u2979",
"subset;": "\u2282",
"subseteq;": "\u2286",
"subseteqq;": "\u2ac5",
"subsetneq;": "\u228a",
"subsetneqq;": "\u2acb",
"subsim;": "\u2ac7",
"subsub;": "\u2ad5",
"subsup;": "\u2ad3",
"succ;": "\u227b",
"succapprox;": "\u2ab8",
"succcurlyeq;": "\u227d",
"succeq;": "\u2ab0",
"succnapprox;": "\u2aba",
"succneqq;": "\u2ab6",
"succnsim;": "\u22e9",
"succsim;": "\u227f",
"sum;": "\u2211",
"sung;": "\u266a",
"sup1": "\xb9",
"sup1;": "\xb9",
"sup2": "\xb2",
"sup2;": "\xb2",
"sup3": "\xb3",
"sup3;": "\xb3",
"sup;": "\u2283",
"supE;": "\u2ac6",
"supdot;": "\u2abe",
"supdsub;": "\u2ad8",
"supe;": "\u2287",
"supedot;": "\u2ac4",
"suphsol;": "\u27c9",
"suphsub;": "\u2ad7",
"suplarr;": "\u297b",
"supmult;": "\u2ac2",
"supnE;": "\u2acc",
"supne;": "\u228b",
"supplus;": "\u2ac0",
"supset;": "\u2283",
"supseteq;": "\u2287",
"supseteqq;": "\u2ac6",
"supsetneq;": "\u228b",
"supsetneqq;": "\u2acc",
"supsim;": "\u2ac8",
"supsub;": "\u2ad4",
"supsup;": "\u2ad6",
"swArr;": "\u21d9",
"swarhk;": "\u2926",
"swarr;": "\u2199",
"swarrow;": "\u2199",
"swnwar;": "\u292a",
"szlig": "\xdf",
"szlig;": "\xdf",
"target;": "\u2316",
"tau;": "\u03c4",
"tbrk;": "\u23b4",
"tcaron;": "\u0165",
"tcedil;": "\u0163",
"tcy;": "\u0442",
"tdot;": "\u20db",
"telrec;": "\u2315",
"tfr;": "\U0001d531",
"there4;": "\u2234",
"therefore;": "\u2234",
"theta;": "\u03b8",
"thetasym;": "\u03d1",
"thetav;": "\u03d1",
"thickapprox;": "\u2248",
"thicksim;": "\u223c",
"thinsp;": "\u2009",
"thkap;": "\u2248",
"thksim;": "\u223c",
"thorn": "\xfe",
"thorn;": "\xfe",
"tilde;": "\u02dc",
"times": "\xd7",
"times;": "\xd7",
"timesb;": "\u22a0",
"timesbar;": "\u2a31",
"timesd;": "\u2a30",
"tint;": "\u222d",
"toea;": "\u2928",
"top;": "\u22a4",
"topbot;": "\u2336",
"topcir;": "\u2af1",
"topf;": "\U0001d565",
"topfork;": "\u2ada",
"tosa;": "\u2929",
"tprime;": "\u2034",
"trade;": "\u2122",
"triangle;": "\u25b5",
"triangledown;": "\u25bf",
"triangleleft;": "\u25c3",
"trianglelefteq;": "\u22b4",
"triangleq;": "\u225c",
"triangleright;": "\u25b9",
"trianglerighteq;": "\u22b5",
"tridot;": "\u25ec",
"trie;": "\u225c",
"triminus;": "\u2a3a",
"triplus;": "\u2a39",
"trisb;": "\u29cd",
"tritime;": "\u2a3b",
"trpezium;": "\u23e2",
"tscr;": "\U0001d4c9",
"tscy;": "\u0446",
"tshcy;": "\u045b",
"tstrok;": "\u0167",
"twixt;": "\u226c",
"twoheadleftarrow;": "\u219e",
"twoheadrightarrow;": "\u21a0",
"uArr;": "\u21d1",
"uHar;": "\u2963",
"uacute": "\xfa",
"uacute;": "\xfa",
"uarr;": "\u2191",
"ubrcy;": "\u045e",
"ubreve;": "\u016d",
"ucirc": "\xfb",
"ucirc;": "\xfb",
"ucy;": "\u0443",
"udarr;": "\u21c5",
"udblac;": "\u0171",
"udhar;": "\u296e",
"ufisht;": "\u297e",
"ufr;": "\U0001d532",
"ugrave": "\xf9",
"ugrave;": "\xf9",
"uharl;": "\u21bf",
"uharr;": "\u21be",
"uhblk;": "\u2580",
"ulcorn;": "\u231c",
"ulcorner;": "\u231c",
"ulcrop;": "\u230f",
"ultri;": "\u25f8",
"umacr;": "\u016b",
"uml": "\xa8",
"uml;": "\xa8",
"uogon;": "\u0173",
"uopf;": "\U0001d566",
"uparrow;": "\u2191",
"updownarrow;": "\u2195",
"upharpoonleft;": "\u21bf",
"upharpoonright;": "\u21be",
"uplus;": "\u228e",
"upsi;": "\u03c5",
"upsih;": "\u03d2",
"upsilon;": "\u03c5",
"upuparrows;": "\u21c8",
"urcorn;": "\u231d",
"urcorner;": "\u231d",
"urcrop;": "\u230e",
"uring;": "\u016f",
"urtri;": "\u25f9",
"uscr;": "\U0001d4ca",
"utdot;": "\u22f0",
"utilde;": "\u0169",
"utri;": "\u25b5",
"utrif;": "\u25b4",
"uuarr;": "\u21c8",
"uuml": "\xfc",
"uuml;": "\xfc",
"uwangle;": "\u29a7",
"vArr;": "\u21d5",
"vBar;": "\u2ae8",
"vBarv;": "\u2ae9",
"vDash;": "\u22a8",
"vangrt;": "\u299c",
"varepsilon;": "\u03f5",
"varkappa;": "\u03f0",
"varnothing;": "\u2205",
"varphi;": "\u03d5",
"varpi;": "\u03d6",
"varpropto;": "\u221d",
"varr;": "\u2195",
"varrho;": "\u03f1",
"varsigma;": "\u03c2",
"varsubsetneq;": "\u228a\ufe00",
"varsubsetneqq;": "\u2acb\ufe00",
"varsupsetneq;": "\u228b\ufe00",
"varsupsetneqq;": "\u2acc\ufe00",
"vartheta;": "\u03d1",
"vartriangleleft;": "\u22b2",
"vartriangleright;": "\u22b3",
"vcy;": "\u0432",
"vdash;": "\u22a2",
"vee;": "\u2228",
"veebar;": "\u22bb",
"veeeq;": "\u225a",
"vellip;": "\u22ee",
"verbar;": "|",
"vert;": "|",
"vfr;": "\U0001d533",
"vltri;": "\u22b2",
"vnsub;": "\u2282\u20d2",
"vnsup;": "\u2283\u20d2",
"vopf;": "\U0001d567",
"vprop;": "\u221d",
"vrtri;": "\u22b3",
"vscr;": "\U0001d4cb",
"vsubnE;": "\u2acb\ufe00",
"vsubne;": "\u228a\ufe00",
"vsupnE;": "\u2acc\ufe00",
"vsupne;": "\u228b\ufe00",
"vzigzag;": "\u299a",
"wcirc;": "\u0175",
"wedbar;": "\u2a5f",
"wedge;": "\u2227",
"wedgeq;": "\u2259",
"weierp;": "\u2118",
"wfr;": "\U0001d534",
"wopf;": "\U0001d568",
"wp;": "\u2118",
"wr;": "\u2240",
"wreath;": "\u2240",
"wscr;": "\U0001d4cc",
"xcap;": "\u22c2",
"xcirc;": "\u25ef",
"xcup;": "\u22c3",
"xdtri;": "\u25bd",
"xfr;": "\U0001d535",
"xhArr;": "\u27fa",
"xharr;": "\u27f7",
"xi;": "\u03be",
"xlArr;": "\u27f8",
"xlarr;": "\u27f5",
"xmap;": "\u27fc",
"xnis;": "\u22fb",
"xodot;": "\u2a00",
"xopf;": "\U0001d569",
"xoplus;": "\u2a01",
"xotime;": "\u2a02",
"xrArr;": "\u27f9",
"xrarr;": "\u27f6",
"xscr;": "\U0001d4cd",
"xsqcup;": "\u2a06",
"xuplus;": "\u2a04",
"xutri;": "\u25b3",
"xvee;": "\u22c1",
"xwedge;": "\u22c0",
"yacute": "\xfd",
"yacute;": "\xfd",
"yacy;": "\u044f",
"ycirc;": "\u0177",
"ycy;": "\u044b",
"yen": "\xa5",
"yen;": "\xa5",
"yfr;": "\U0001d536",
"yicy;": "\u0457",
"yopf;": "\U0001d56a",
"yscr;": "\U0001d4ce",
"yucy;": "\u044e",
"yuml": "\xff",
"yuml;": "\xff",
"zacute;": "\u017a",
"zcaron;": "\u017e",
"zcy;": "\u0437",
"zdot;": "\u017c",
"zeetrf;": "\u2128",
"zeta;": "\u03b6",
"zfr;": "\U0001d537",
"zhcy;": "\u0436",
"zigrarr;": "\u21dd",
"zopf;": "\U0001d56b",
"zscr;": "\U0001d4cf",
"zwj;": "\u200d",
"zwnj;": "\u200c",
}
replacementCharacters = {
0x0: "\uFFFD",
0x0d: "\u000D",
0x80: "\u20AC",
0x81: "\u0081",
0x81: "\u0081",
0x82: "\u201A",
0x83: "\u0192",
0x84: "\u201E",
0x85: "\u2026",
0x86: "\u2020",
0x87: "\u2021",
0x88: "\u02C6",
0x89: "\u2030",
0x8A: "\u0160",
0x8B: "\u2039",
0x8C: "\u0152",
0x8D: "\u008D",
0x8E: "\u017D",
0x8F: "\u008F",
0x90: "\u0090",
0x91: "\u2018",
0x92: "\u2019",
0x93: "\u201C",
0x94: "\u201D",
0x95: "\u2022",
0x96: "\u2013",
0x97: "\u2014",
0x98: "\u02DC",
0x99: "\u2122",
0x9A: "\u0161",
0x9B: "\u203A",
0x9C: "\u0153",
0x9D: "\u009D",
0x9E: "\u017E",
0x9F: "\u0178",
}
encodings = {
'437': 'cp437',
'850': 'cp850',
'852': 'cp852',
'855': 'cp855',
'857': 'cp857',
'860': 'cp860',
'861': 'cp861',
'862': 'cp862',
'863': 'cp863',
'865': 'cp865',
'866': 'cp866',
'869': 'cp869',
'ansix341968': 'ascii',
'ansix341986': 'ascii',
'arabic': 'iso8859-6',
'ascii': 'ascii',
'asmo708': 'iso8859-6',
'big5': 'big5',
'big5hkscs': 'big5hkscs',
'chinese': 'gbk',
'cp037': 'cp037',
'cp1026': 'cp1026',
'cp154': 'ptcp154',
'cp367': 'ascii',
'cp424': 'cp424',
'cp437': 'cp437',
'cp500': 'cp500',
'cp775': 'cp775',
'cp819': 'windows-1252',
'cp850': 'cp850',
'cp852': 'cp852',
'cp855': 'cp855',
'cp857': 'cp857',
'cp860': 'cp860',
'cp861': 'cp861',
'cp862': 'cp862',
'cp863': 'cp863',
'cp864': 'cp864',
'cp865': 'cp865',
'cp866': 'cp866',
'cp869': 'cp869',
'cp936': 'gbk',
'cpgr': 'cp869',
'cpis': 'cp861',
'csascii': 'ascii',
'csbig5': 'big5',
'cseuckr': 'cp949',
'cseucpkdfmtjapanese': 'euc_jp',
'csgb2312': 'gbk',
'cshproman8': 'hp-roman8',
'csibm037': 'cp037',
'csibm1026': 'cp1026',
'csibm424': 'cp424',
'csibm500': 'cp500',
'csibm855': 'cp855',
'csibm857': 'cp857',
'csibm860': 'cp860',
'csibm861': 'cp861',
'csibm863': 'cp863',
'csibm864': 'cp864',
'csibm865': 'cp865',
'csibm866': 'cp866',
'csibm869': 'cp869',
'csiso2022jp': 'iso2022_jp',
'csiso2022jp2': 'iso2022_jp_2',
'csiso2022kr': 'iso2022_kr',
'csiso58gb231280': 'gbk',
'csisolatin1': 'windows-1252',
'csisolatin2': 'iso8859-2',
'csisolatin3': 'iso8859-3',
'csisolatin4': 'iso8859-4',
'csisolatin5': 'windows-1254',
'csisolatin6': 'iso8859-10',
'csisolatinarabic': 'iso8859-6',
'csisolatincyrillic': 'iso8859-5',
'csisolatingreek': 'iso8859-7',
'csisolatinhebrew': 'iso8859-8',
'cskoi8r': 'koi8-r',
'csksc56011987': 'cp949',
'cspc775baltic': 'cp775',
'cspc850multilingual': 'cp850',
'cspc862latinhebrew': 'cp862',
'cspc8codepage437': 'cp437',
'cspcp852': 'cp852',
'csptcp154': 'ptcp154',
'csshiftjis': 'shift_jis',
'csunicode11utf7': 'utf-7',
'cyrillic': 'iso8859-5',
'cyrillicasian': 'ptcp154',
'ebcdiccpbe': 'cp500',
'ebcdiccpca': 'cp037',
'ebcdiccpch': 'cp500',
'ebcdiccphe': 'cp424',
'ebcdiccpnl': 'cp037',
'ebcdiccpus': 'cp037',
'ebcdiccpwt': 'cp037',
'ecma114': 'iso8859-6',
'ecma118': 'iso8859-7',
'elot928': 'iso8859-7',
'eucjp': 'euc_jp',
'euckr': 'cp949',
'extendedunixcodepackedformatforjapanese': 'euc_jp',
'gb18030': 'gb18030',
'gb2312': 'gbk',
'gb231280': 'gbk',
'gbk': 'gbk',
'greek': 'iso8859-7',
'greek8': 'iso8859-7',
'hebrew': 'iso8859-8',
'hproman8': 'hp-roman8',
'hzgb2312': 'hz',
'ibm037': 'cp037',
'ibm1026': 'cp1026',
'ibm367': 'ascii',
'ibm424': 'cp424',
'ibm437': 'cp437',
'ibm500': 'cp500',
'ibm775': 'cp775',
'ibm819': 'windows-1252',
'ibm850': 'cp850',
'ibm852': 'cp852',
'ibm855': 'cp855',
'ibm857': 'cp857',
'ibm860': 'cp860',
'ibm861': 'cp861',
'ibm862': 'cp862',
'ibm863': 'cp863',
'ibm864': 'cp864',
'ibm865': 'cp865',
'ibm866': 'cp866',
'ibm869': 'cp869',
'iso2022jp': 'iso2022_jp',
'iso2022jp2': 'iso2022_jp_2',
'iso2022kr': 'iso2022_kr',
'iso646irv1991': 'ascii',
'iso646us': 'ascii',
'iso88591': 'windows-1252',
'iso885910': 'iso8859-10',
'iso8859101992': 'iso8859-10',
'iso885911987': 'windows-1252',
'iso885913': 'iso8859-13',
'iso885914': 'iso8859-14',
'iso8859141998': 'iso8859-14',
'iso885915': 'iso8859-15',
'iso885916': 'iso8859-16',
'iso8859162001': 'iso8859-16',
'iso88592': 'iso8859-2',
'iso885921987': 'iso8859-2',
'iso88593': 'iso8859-3',
'iso885931988': 'iso8859-3',
'iso88594': 'iso8859-4',
'iso885941988': 'iso8859-4',
'iso88595': 'iso8859-5',
'iso885951988': 'iso8859-5',
'iso88596': 'iso8859-6',
'iso885961987': 'iso8859-6',
'iso88597': 'iso8859-7',
'iso885971987': 'iso8859-7',
'iso88598': 'iso8859-8',
'iso885981988': 'iso8859-8',
'iso88599': 'windows-1254',
'iso885991989': 'windows-1254',
'isoceltic': 'iso8859-14',
'isoir100': 'windows-1252',
'isoir101': 'iso8859-2',
'isoir109': 'iso8859-3',
'isoir110': 'iso8859-4',
'isoir126': 'iso8859-7',
'isoir127': 'iso8859-6',
'isoir138': 'iso8859-8',
'isoir144': 'iso8859-5',
'isoir148': 'windows-1254',
'isoir149': 'cp949',
'isoir157': 'iso8859-10',
'isoir199': 'iso8859-14',
'isoir226': 'iso8859-16',
'isoir58': 'gbk',
'isoir6': 'ascii',
'koi8r': 'koi8-r',
'koi8u': 'koi8-u',
'korean': 'cp949',
'ksc5601': 'cp949',
'ksc56011987': 'cp949',
'ksc56011989': 'cp949',
'l1': 'windows-1252',
'l10': 'iso8859-16',
'l2': 'iso8859-2',
'l3': 'iso8859-3',
'l4': 'iso8859-4',
'l5': 'windows-1254',
'l6': 'iso8859-10',
'l8': 'iso8859-14',
'latin1': 'windows-1252',
'latin10': 'iso8859-16',
'latin2': 'iso8859-2',
'latin3': 'iso8859-3',
'latin4': 'iso8859-4',
'latin5': 'windows-1254',
'latin6': 'iso8859-10',
'latin8': 'iso8859-14',
'latin9': 'iso8859-15',
'ms936': 'gbk',
'mskanji': 'shift_jis',
'pt154': 'ptcp154',
'ptcp154': 'ptcp154',
'r8': 'hp-roman8',
'roman8': 'hp-roman8',
'shiftjis': 'shift_jis',
'tis620': 'cp874',
'unicode11utf7': 'utf-7',
'us': 'ascii',
'usascii': 'ascii',
'utf16': 'utf-16',
'utf16be': 'utf-16-be',
'utf16le': 'utf-16-le',
'utf8': 'utf-8',
'windows1250': 'cp1250',
'windows1251': 'cp1251',
'windows1252': 'cp1252',
'windows1253': 'cp1253',
'windows1254': 'cp1254',
'windows1255': 'cp1255',
'windows1256': 'cp1256',
'windows1257': 'cp1257',
'windows1258': 'cp1258',
'windows936': 'gbk',
'x-x-big5': 'big5'}
tokenTypes = {
"Doctype": 0,
"Characters": 1,
"SpaceCharacters": 2,
"StartTag": 3,
"EndTag": 4,
"EmptyTag": 5,
"Comment": 6,
"ParseError": 7
}
tagTokenTypes = frozenset((tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]))
prefixes = dict([(v, k) for k, v in namespaces.items()])
prefixes["http://www.w3.org/1998/Math/MathML"] = "math"
class DataLossWarning(UserWarning):
pass
class ReparseException(Exception):
pass
| mit |
sencha/chromium-spacewalk | third_party/libxml/src/check-xsddata-test-suite.py | 343 | 10682 | #!/usr/bin/python
import sys
import time
import os
import string
import StringIO
sys.path.insert(0, "python")
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
debug = 0
verbose = 0
quiet = 1
#
# the testsuite description
#
CONF=os.path.join(os.path.dirname(__file__), "test/xsdtest/xsdtestsuite.xml")
LOG="check-xsddata-test-suite.log"
log = open(LOG, "w")
nb_schemas_tests = 0
nb_schemas_success = 0
nb_schemas_failed = 0
nb_instances_tests = 0
nb_instances_success = 0
nb_instances_failed = 0
libxml2.lineNumbersDefault(1)
#
# Error and warnng callbacks
#
def callback(ctx, str):
global log
log.write("%s%s" % (ctx, str))
libxml2.registerErrorHandler(callback, "")
#
# Resolver callback
#
resources = {}
def resolver(URL, ID, ctxt):
global resources
if resources.has_key(URL):
return(StringIO.StringIO(resources[URL]))
log.write("Resolver failure: asked %s\n" % (URL))
log.write("resources: %s\n" % (resources))
return None
#
# handle a valid instance
#
def handle_valid(node, schema):
global log
global nb_instances_success
global nb_instances_failed
instance = node.prop("dtd")
if instance == None:
instance = ""
child = node.children
while child != None:
if child.type != 'text':
instance = instance + child.serialize()
child = child.next
mem = libxml2.debugMemory(1);
try:
doc = libxml2.parseDoc(instance)
except:
doc = None
if doc == None:
log.write("\nFailed to parse correct instance:\n-----\n")
log.write(instance)
log.write("\n-----\n")
nb_instances_failed = nb_instances_failed + 1
return
if debug:
print "instance line %d" % (node.lineNo())
try:
ctxt = schema.relaxNGNewValidCtxt()
ret = doc.relaxNGValidateDoc(ctxt)
del ctxt
except:
ret = -1
doc.freeDoc()
if mem != libxml2.debugMemory(1):
print "validating instance %d line %d leaks" % (
nb_instances_tests, node.lineNo())
if ret != 0:
log.write("\nFailed to validate correct instance:\n-----\n")
log.write(instance)
log.write("\n-----\n")
nb_instances_failed = nb_instances_failed + 1
else:
nb_instances_success = nb_instances_success + 1
#
# handle an invalid instance
#
def handle_invalid(node, schema):
global log
global nb_instances_success
global nb_instances_failed
instance = node.prop("dtd")
if instance == None:
instance = ""
child = node.children
while child != None:
if child.type != 'text':
instance = instance + child.serialize()
child = child.next
# mem = libxml2.debugMemory(1);
try:
doc = libxml2.parseDoc(instance)
except:
doc = None
if doc == None:
log.write("\nStrange: failed to parse incorrect instance:\n-----\n")
log.write(instance)
log.write("\n-----\n")
return
if debug:
print "instance line %d" % (node.lineNo())
try:
ctxt = schema.relaxNGNewValidCtxt()
ret = doc.relaxNGValidateDoc(ctxt)
del ctxt
except:
ret = -1
doc.freeDoc()
# if mem != libxml2.debugMemory(1):
# print "validating instance %d line %d leaks" % (
# nb_instances_tests, node.lineNo())
if ret == 0:
log.write("\nFailed to detect validation problem in instance:\n-----\n")
log.write(instance)
log.write("\n-----\n")
nb_instances_failed = nb_instances_failed + 1
else:
nb_instances_success = nb_instances_success + 1
#
# handle an incorrect test
#
def handle_correct(node):
global log
global nb_schemas_success
global nb_schemas_failed
schema = ""
child = node.children
while child != None:
if child.type != 'text':
schema = schema + child.serialize()
child = child.next
try:
rngp = libxml2.relaxNGNewMemParserCtxt(schema, len(schema))
rngs = rngp.relaxNGParse()
except:
rngs = None
if rngs == None:
log.write("\nFailed to compile correct schema:\n-----\n")
log.write(schema)
log.write("\n-----\n")
nb_schemas_failed = nb_schemas_failed + 1
else:
nb_schemas_success = nb_schemas_success + 1
return rngs
def handle_incorrect(node):
global log
global nb_schemas_success
global nb_schemas_failed
schema = ""
child = node.children
while child != None:
if child.type != 'text':
schema = schema + child.serialize()
child = child.next
try:
rngp = libxml2.relaxNGNewMemParserCtxt(schema, len(schema))
rngs = rngp.relaxNGParse()
except:
rngs = None
if rngs != None:
log.write("\nFailed to detect schema error in:\n-----\n")
log.write(schema)
log.write("\n-----\n")
nb_schemas_failed = nb_schemas_failed + 1
else:
# log.write("\nSuccess detecting schema error in:\n-----\n")
# log.write(schema)
# log.write("\n-----\n")
nb_schemas_success = nb_schemas_success + 1
return None
#
# resource handling: keep a dictionary of URL->string mappings
#
def handle_resource(node, dir):
global resources
try:
name = node.prop('name')
except:
name = None
if name == None or name == '':
log.write("resource has no name")
return;
if dir != None:
# name = libxml2.buildURI(name, dir)
name = dir + '/' + name
res = ""
child = node.children
while child != None:
if child.type != 'text':
res = res + child.serialize()
child = child.next
resources[name] = res
#
# dir handling: pseudo directory resources
#
def handle_dir(node, dir):
try:
name = node.prop('name')
except:
name = None
if name == None or name == '':
log.write("resource has no name")
return;
if dir != None:
# name = libxml2.buildURI(name, dir)
name = dir + '/' + name
dirs = node.xpathEval('dir')
for dir in dirs:
handle_dir(dir, name)
res = node.xpathEval('resource')
for r in res:
handle_resource(r, name)
#
# handle a testCase element
#
def handle_testCase(node):
global nb_schemas_tests
global nb_instances_tests
global resources
sections = node.xpathEval('string(section)')
log.write("\n ======== test %d line %d section %s ==========\n" % (
nb_schemas_tests, node.lineNo(), sections))
resources = {}
if debug:
print "test %d line %d" % (nb_schemas_tests, node.lineNo())
dirs = node.xpathEval('dir')
for dir in dirs:
handle_dir(dir, None)
res = node.xpathEval('resource')
for r in res:
handle_resource(r, None)
tsts = node.xpathEval('incorrect')
if tsts != []:
if len(tsts) != 1:
print "warning test line %d has more than one <incorrect> example" %(node.lineNo())
schema = handle_incorrect(tsts[0])
else:
tsts = node.xpathEval('correct')
if tsts != []:
if len(tsts) != 1:
print "warning test line %d has more than one <correct> example"% (node.lineNo())
schema = handle_correct(tsts[0])
else:
print "warning <testCase> line %d has no <correct> nor <incorrect> child" % (node.lineNo())
nb_schemas_tests = nb_schemas_tests + 1;
valids = node.xpathEval('valid')
invalids = node.xpathEval('invalid')
nb_instances_tests = nb_instances_tests + len(valids) + len(invalids)
if schema != None:
for valid in valids:
handle_valid(valid, schema)
for invalid in invalids:
handle_invalid(invalid, schema)
#
# handle a testSuite element
#
def handle_testSuite(node, level = 0):
global nb_schemas_tests, nb_schemas_success, nb_schemas_failed
global nb_instances_tests, nb_instances_success, nb_instances_failed
if verbose and level >= 0:
old_schemas_tests = nb_schemas_tests
old_schemas_success = nb_schemas_success
old_schemas_failed = nb_schemas_failed
old_instances_tests = nb_instances_tests
old_instances_success = nb_instances_success
old_instances_failed = nb_instances_failed
docs = node.xpathEval('documentation')
authors = node.xpathEval('author')
if docs != []:
msg = ""
for doc in docs:
msg = msg + doc.content + " "
if authors != []:
msg = msg + "written by "
for author in authors:
msg = msg + author.content + " "
if quiet == 0:
print msg
sections = node.xpathEval('section')
if verbose and sections != [] and level <= 0:
msg = ""
for section in sections:
msg = msg + section.content + " "
if quiet == 0:
print "Tests for section %s" % (msg)
for test in node.xpathEval('testCase'):
handle_testCase(test)
for test in node.xpathEval('testSuite'):
handle_testSuite(test, level + 1)
if verbose and level >= 0 :
if sections != []:
msg = ""
for section in sections:
msg = msg + section.content + " "
print "Result of tests for section %s" % (msg)
elif docs != []:
msg = ""
for doc in docs:
msg = msg + doc.content + " "
print "Result of tests for %s" % (msg)
if nb_schemas_tests != old_schemas_tests:
print "found %d test schemas: %d success %d failures" % (
nb_schemas_tests - old_schemas_tests,
nb_schemas_success - old_schemas_success,
nb_schemas_failed - old_schemas_failed)
if nb_instances_tests != old_instances_tests:
print "found %d test instances: %d success %d failures" % (
nb_instances_tests - old_instances_tests,
nb_instances_success - old_instances_success,
nb_instances_failed - old_instances_failed)
#
# Parse the conf file
#
libxml2.substituteEntitiesDefault(1);
testsuite = libxml2.parseFile(CONF)
#
# Error and warnng callbacks
#
def callback(ctx, str):
global log
log.write("%s%s" % (ctx, str))
libxml2.registerErrorHandler(callback, "")
libxml2.setEntityLoader(resolver)
root = testsuite.getRootElement()
if root.name != 'testSuite':
print "%s doesn't start with a testSuite element, aborting" % (CONF)
sys.exit(1)
if quiet == 0:
print "Running Relax NG testsuite"
handle_testSuite(root)
if quiet == 0 or nb_schemas_failed != 0:
print "\nTOTAL:\nfound %d test schemas: %d success %d failures" % (
nb_schemas_tests, nb_schemas_success, nb_schemas_failed)
if quiet == 0 or nb_instances_failed != 0:
print "found %d test instances: %d success %d failures" % (
nb_instances_tests, nb_instances_success, nb_instances_failed)
testsuite.freeDoc()
# Memory debug specific
libxml2.relaxNGCleanupTypes()
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
if quiet == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
| bsd-3-clause |
Learningtribes/edx-platform | openedx/core/djangolib/nose.py | 32 | 1212 | """
Utilities related to nose.
"""
from django.core.management import call_command
from django.db import DEFAULT_DB_ALIAS, connections, transaction
import django_nose
class NoseTestSuiteRunner(django_nose.NoseTestSuiteRunner):
"""Custom NoseTestSuiteRunner."""
def setup_databases(self):
""" Setup databases and then flush to remove data added by migrations. """
return_value = super(NoseTestSuiteRunner, self).setup_databases()
# Delete all data added by data migrations. Unit tests should setup their own data using factories.
call_command('flush', verbosity=0, interactive=False, load_initial_data=False)
# Through Django 1.8, auto increment sequences are not reset when calling flush on a SQLite db.
# So we do it ourselves.
# http://sqlite.org/autoinc.html
connection = connections[DEFAULT_DB_ALIAS]
if connection.vendor == 'sqlite' and not connection.features.supports_sequence_reset:
with transaction.atomic(using=DEFAULT_DB_ALIAS):
cursor = connection.cursor()
cursor.execute(
"delete from sqlite_sequence;"
)
return return_value
| agpl-3.0 |
ran5515/DeepDecision | tensorflow/contrib/rnn/python/kernel_tests/core_rnn_cell_test.py | 21 | 34945 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RNN cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
# TODO(ebrevdo): Remove once _linear is fully deprecated.
# pylint: disable=protected-access
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
# pylint: enable=protected-access
linear = rnn_cell_impl._linear
class RNNCellTest(test.TestCase):
def testLinear(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(1.0)):
x = array_ops.zeros([1, 2])
l = linear([x], 2, False)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([l], {x.name: np.array([[1., 2.]])})
self.assertAllClose(res[0], [[3.0, 3.0]])
# Checks prevent you from accidentally creating a shared function.
with self.assertRaises(ValueError):
l1 = linear([x], 2, False)
# But you can create a new one in a new scope and share the variables.
with variable_scope.variable_scope("l1") as new_scope:
l1 = linear([x], 2, False)
with variable_scope.variable_scope(new_scope, reuse=True):
linear([l1], 2, False)
self.assertEqual(len(variables_lib.trainable_variables()), 2)
def testBasicRNNCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = rnn_cell_impl.BasicRNNCell(2)
g, _ = cell(x, m)
self.assertEqual([
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._BIAS_VARIABLE_NAME
], [v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g], {x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
self.assertEqual(res[0].shape, (1, 2))
def testBasicRNNCellNotTrainable(self):
with self.test_session() as sess:
def not_trainable_getter(getter, *args, **kwargs):
kwargs["trainable"] = False
return getter(*args, **kwargs)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5),
custom_getter=not_trainable_getter):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = rnn_cell_impl.BasicRNNCell(2)
g, _ = cell(x, m)
self.assertFalse(cell.trainable_variables)
self.assertEqual([
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._BIAS_VARIABLE_NAME
], [v.name for v in cell.non_trainable_variables])
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g], {x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
self.assertEqual(res[0].shape, (1, 2))
def testGRUCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
g, _ = rnn_cell_impl.GRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g], {x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
# Smoke test
self.assertAllClose(res[0], [[0.175991, 0.175991]])
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros(
[1, 3]) # Test GRUCell with input_size != num_units.
m = array_ops.zeros([1, 2])
g, _ = rnn_cell_impl.GRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g],
{x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
# Smoke test
self.assertAllClose(res[0], [[0.156736, 0.156736]])
def testBasicLSTMCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 8])
cell = rnn_cell_impl.MultiRNNCell(
[
rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)
for _ in range(2)
],
state_is_tuple=False)
g, out_m = cell(x, m)
expected_variable_names = [
"root/multi_rnn_cell/cell_0/basic_lstm_cell/%s:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_0/basic_lstm_cell/%s:0" %
rnn_cell_impl._BIAS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/basic_lstm_cell/%s:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/basic_lstm_cell/%s:0" %
rnn_cell_impl._BIAS_VARIABLE_NAME
]
self.assertEqual(
expected_variable_names, [v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_m],
{x.name: np.array([[1., 1.]]),
m.name: 0.1 * np.ones([1, 8])})
self.assertEqual(len(res), 2)
variables = variables_lib.global_variables()
self.assertEqual(expected_variable_names, [v.name for v in variables])
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
expected_mem = np.array([[
0.68967271, 0.68967271, 0.44848421, 0.44848421, 0.39897051,
0.39897051, 0.24024698, 0.24024698
]])
self.assertAllClose(res[1], expected_mem)
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros(
[1, 3]) # Test BasicLSTMCell with input_size != num_units.
m = array_ops.zeros([1, 4])
g, out_m = rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_m],
{x.name: np.array([[1., 1., 1.]]),
m.name: 0.1 * np.ones([1, 4])})
self.assertEqual(len(res), 2)
def testBasicLSTMCellDimension0Error(self):
"""Tests that dimension 0 in both(x and m) shape must be equal."""
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
num_units = 2
state_size = num_units * 2
batch_size = 3
input_size = 4
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size - 1, state_size])
with self.assertRaises(ValueError):
g, out_m = rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
sess.run([g, out_m],
{x.name: 1 * np.ones([batch_size, input_size]),
m.name: 0.1 * np.ones([batch_size - 1, state_size])})
def testBasicLSTMCellStateSizeError(self):
"""Tests that state_size must be num_units * 2."""
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
num_units = 2
state_size = num_units * 3 # state_size must be num_units * 2
batch_size = 3
input_size = 4
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
with self.assertRaises(ValueError):
g, out_m = rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
sess.run([g, out_m],
{x.name: 1 * np.ones([batch_size, input_size]),
m.name: 0.1 * np.ones([batch_size, state_size])})
def testBasicLSTMCellStateTupleType(self):
with self.test_session():
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m0 = (array_ops.zeros([1, 2]),) * 2
m1 = (array_ops.zeros([1, 2]),) * 2
cell = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.BasicLSTMCell(2) for _ in range(2)],
state_is_tuple=True)
self.assertTrue(isinstance(cell.state_size, tuple))
self.assertTrue(
isinstance(cell.state_size[0], rnn_cell_impl.LSTMStateTuple))
self.assertTrue(
isinstance(cell.state_size[1], rnn_cell_impl.LSTMStateTuple))
# Pass in regular tuples
_, (out_m0, out_m1) = cell(x, (m0, m1))
self.assertTrue(isinstance(out_m0, rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(out_m1, rnn_cell_impl.LSTMStateTuple))
# Pass in LSTMStateTuples
variable_scope.get_variable_scope().reuse_variables()
zero_state = cell.zero_state(1, dtypes.float32)
self.assertTrue(isinstance(zero_state, tuple))
self.assertTrue(isinstance(zero_state[0], rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(zero_state[1], rnn_cell_impl.LSTMStateTuple))
_, (out_m0, out_m1) = cell(x, zero_state)
self.assertTrue(isinstance(out_m0, rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(out_m1, rnn_cell_impl.LSTMStateTuple))
def testBasicLSTMCellWithStateTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m0 = array_ops.zeros([1, 4])
m1 = array_ops.zeros([1, 4])
cell = rnn_cell_impl.MultiRNNCell(
[
rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)
for _ in range(2)
],
state_is_tuple=True)
g, (out_m0, out_m1) = cell(x, (m0, m1))
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, out_m0, out_m1], {
x.name: np.array([[1., 1.]]),
m0.name: 0.1 * np.ones([1, 4]),
m1.name: 0.1 * np.ones([1, 4])
})
self.assertEqual(len(res), 3)
# The numbers in results were not calculated, this is just a smoke test.
# Note, however, these values should match the original
# version having state_is_tuple=False.
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
expected_mem0 = np.array(
[[0.68967271, 0.68967271, 0.44848421, 0.44848421]])
expected_mem1 = np.array(
[[0.39897051, 0.39897051, 0.24024698, 0.24024698]])
self.assertAllClose(res[1], expected_mem0)
self.assertAllClose(res[2], expected_mem1)
def testLSTMCell(self):
with self.test_session() as sess:
num_units = 8
num_proj = 6
state_size = num_units + num_proj
batch_size = 3
input_size = 2
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
cell = rnn_cell_impl.LSTMCell(
num_units=num_units,
num_proj=num_proj,
forget_bias=1.0,
state_is_tuple=False)
output, state = cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([output, state], {
x.name: np.array([[1., 1.], [2., 2.], [3., 3.]]),
m.name: 0.1 * np.ones((batch_size, state_size))
})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_proj))
self.assertEqual(res[1].shape, (batch_size, state_size))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) > 1e-6)
def testLSTMCellVariables(self):
with self.test_session():
num_units = 8
num_proj = 6
state_size = num_units + num_proj
batch_size = 3
input_size = 2
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
cell = rnn_cell_impl.LSTMCell(
num_units=num_units,
num_proj=num_proj,
forget_bias=1.0,
state_is_tuple=False)
cell(x, m) # Execute to create variables
variables = variables_lib.global_variables()
self.assertEquals(variables[0].op.name, "root/lstm_cell/kernel")
self.assertEquals(variables[1].op.name, "root/lstm_cell/bias")
self.assertEquals(variables[2].op.name,
"root/lstm_cell/projection/kernel")
def testOutputProjectionWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
cell = contrib_rnn.OutputProjectionWrapper(rnn_cell_impl.GRUCell(3), 2)
g, new_m = cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, new_m], {
x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1]])
})
self.assertEqual(res[1].shape, (1, 3))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.231907, 0.231907]])
def testInputProjectionWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 3])
cell = contrib_rnn.InputProjectionWrapper(
rnn_cell_impl.GRUCell(3), num_proj=3)
g, new_m = cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, new_m],
{x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1]])})
self.assertEqual(res[1].shape, (1, 3))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.154605, 0.154605, 0.154605]])
def testResidualWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
base_cell = rnn_cell_impl.GRUCell(3)
g, m_new = base_cell(x, m)
variable_scope.get_variable_scope().reuse_variables()
g_res, m_new_res = rnn_cell_impl.ResidualWrapper(base_cell)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, g_res, m_new, m_new_res], {
x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.1, 0.1]])
})
# Residual connections
self.assertAllClose(res[1], res[0] + [1., 1., 1.])
# States are left untouched
self.assertAllClose(res[2], res[3])
def testDeviceWrapper(self):
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
cell = rnn_cell_impl.DeviceWrapper(rnn_cell_impl.GRUCell(3), "/cpu:14159")
outputs, _ = cell(x, m)
self.assertTrue("cpu:14159" in outputs.device.lower())
def testDeviceWrapperDynamicExecutionNodesAreAllProperlyLocated(self):
if not test.is_gpu_available():
# Can't perform this test w/o a GPU
return
with self.test_session(use_gpu=True) as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 1, 3])
cell = rnn_cell_impl.DeviceWrapper(rnn_cell_impl.GRUCell(3), "/gpu:0")
with ops.device("/cpu:0"):
outputs, _ = rnn.dynamic_rnn(
cell=cell, inputs=x, dtype=dtypes.float32)
run_metadata = config_pb2.RunMetadata()
opts = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
sess.run([variables_lib.global_variables_initializer()])
_ = sess.run(outputs, options=opts, run_metadata=run_metadata)
step_stats = run_metadata.step_stats
ix = 0 if "gpu" in step_stats.dev_stats[0].device else 1
gpu_stats = step_stats.dev_stats[ix].node_stats
cpu_stats = step_stats.dev_stats[1 - ix].node_stats
self.assertFalse([s for s in cpu_stats if "gru_cell" in s.node_name])
self.assertTrue([s for s in gpu_stats if "gru_cell" in s.node_name])
def testEmbeddingWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 1], dtype=dtypes.int32)
m = array_ops.zeros([1, 2])
embedding_cell = contrib_rnn.EmbeddingWrapper(
rnn_cell_impl.GRUCell(2), embedding_classes=3, embedding_size=2)
self.assertEqual(embedding_cell.output_size, 2)
g, new_m = embedding_cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, new_m],
{x.name: np.array([[1]]),
m.name: np.array([[0.1, 0.1]])})
self.assertEqual(res[1].shape, (1, 2))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.17139, 0.17139]])
def testEmbeddingWrapperWithDynamicRnn(self):
with self.test_session() as sess:
with variable_scope.variable_scope("root"):
inputs = ops.convert_to_tensor([[[0], [0]]], dtype=dtypes.int64)
input_lengths = ops.convert_to_tensor([2], dtype=dtypes.int64)
embedding_cell = contrib_rnn.EmbeddingWrapper(
rnn_cell_impl.BasicLSTMCell(1, state_is_tuple=True),
embedding_classes=1,
embedding_size=2)
outputs, _ = rnn.dynamic_rnn(
cell=embedding_cell,
inputs=inputs,
sequence_length=input_lengths,
dtype=dtypes.float32)
sess.run([variables_lib.global_variables_initializer()])
# This will fail if output's dtype is inferred from input's.
sess.run(outputs)
def testMultiRNNCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 4])
_, ml = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.GRUCell(2)
for _ in range(2)], state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(ml, {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1, 0.1]])
})
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res, [[0.175991, 0.175991, 0.13248, 0.13248]])
def testMultiRNNCellWithStateTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m_bad = array_ops.zeros([1, 4])
m_good = (array_ops.zeros([1, 2]), array_ops.zeros([1, 2]))
# Test incorrectness of state
with self.assertRaisesRegexp(ValueError, "Expected state .* a tuple"):
rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.GRUCell(2)
for _ in range(2)], state_is_tuple=True)(x, m_bad)
_, ml = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.GRUCell(2)
for _ in range(2)], state_is_tuple=True)(x, m_good)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(ml, {
x.name: np.array([[1., 1.]]),
m_good[0].name: np.array([[0.1, 0.1]]),
m_good[1].name: np.array([[0.1, 0.1]])
})
# The numbers in results were not calculated, this is just a
# smoke test. However, these numbers should match those of
# the test testMultiRNNCell.
self.assertAllClose(res[0], [[0.175991, 0.175991]])
self.assertAllClose(res[1], [[0.13248, 0.13248]])
class DropoutWrapperTest(test.TestCase):
def _testDropoutWrapper(self, batch_size=None, time_steps=None,
parallel_iterations=None, **kwargs):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
if batch_size is None and time_steps is None:
# 2 time steps, batch size 1, depth 3
batch_size = 1
time_steps = 2
x = constant_op.constant(
[[[2., 2., 2.]], [[1., 1., 1.]]], dtype=dtypes.float32)
m = rnn_cell_impl.LSTMStateTuple(
*[constant_op.constant([[0.1, 0.1, 0.1]], dtype=dtypes.float32)
] * 2)
else:
x = constant_op.constant(
np.random.randn(time_steps, batch_size, 3).astype(np.float32))
m = rnn_cell_impl.LSTMStateTuple(*[
constant_op.constant(
[[0.1, 0.1, 0.1]] * batch_size, dtype=dtypes.float32)
] * 2)
outputs, final_state = rnn.dynamic_rnn(
cell=rnn_cell_impl.DropoutWrapper(
rnn_cell_impl.LSTMCell(3), dtype=x.dtype, **kwargs),
time_major=True,
parallel_iterations=parallel_iterations,
inputs=x,
initial_state=m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([outputs, final_state])
self.assertEqual(res[0].shape, (time_steps, batch_size, 3))
self.assertEqual(res[1].c.shape, (batch_size, 3))
self.assertEqual(res[1].h.shape, (batch_size, 3))
return res
def testDropoutWrapperKeepAllConstantInput(self):
keep = array_ops.ones([])
res = self._testDropoutWrapper(
input_keep_prob=keep, output_keep_prob=keep, state_keep_prob=keep)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]],
[[0.895509, 0.895509, 0.895509]]], dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(true_full_output, res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperKeepAll(self):
keep = variable_scope.get_variable("all", initializer=1.0)
res = self._testDropoutWrapper(
input_keep_prob=keep, output_keep_prob=keep, state_keep_prob=keep)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]],
[[0.895509, 0.895509, 0.895509]]], dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(true_full_output, res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperWithSeed(self):
keep_some = 0.5
random_seed.set_random_seed(2)
## Use parallel_iterations = 1 in both calls to
## _testDropoutWrapper to ensure the (per-time step) dropout is
## consistent across both calls. Otherwise the seed may not end
## up being munged consistently across both graphs.
res_standard_1 = self._testDropoutWrapper(
input_keep_prob=keep_some, output_keep_prob=keep_some,
state_keep_prob=keep_some, seed=10,
parallel_iterations=1)
# Clear away the graph and the test session (which keeps variables around)
ops.reset_default_graph()
self._ClearCachedSession()
random_seed.set_random_seed(2)
res_standard_2 = self._testDropoutWrapper(
input_keep_prob=keep_some, output_keep_prob=keep_some,
state_keep_prob=keep_some, seed=10,
parallel_iterations=1)
self.assertAllClose(res_standard_1[0], res_standard_2[0])
self.assertAllClose(res_standard_1[1].c, res_standard_2[1].c)
self.assertAllClose(res_standard_1[1].h, res_standard_2[1].h)
def testDropoutWrapperKeepNoOutput(self):
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-10)
res = self._testDropoutWrapper(
input_keep_prob=keep_all, output_keep_prob=keep_none,
state_keep_prob=keep_all)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]],
[[0.895509, 0.895509, 0.895509]]], dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(np.zeros(res[0].shape), res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperKeepNoState(self):
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-10)
res = self._testDropoutWrapper(
input_keep_prob=keep_all, output_keep_prob=keep_all,
state_keep_prob=keep_none)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]],
[[0.895509, 0.895509, 0.895509]]], dtype=np.float32)
self.assertAllClose(true_full_output[0], res[0][0])
# Second output is modified by zero input state
self.assertGreater(np.linalg.norm(true_full_output[1] - res[0][1]), 1e-4)
self.assertAllClose(np.zeros(res[1].h.shape), res[1].h)
self.assertAllClose(np.zeros(res[1].c.shape), res[1].c)
def testDropoutWrapperKeepNoInput(self):
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-10)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]],
[[0.895509, 0.895509, 0.895509]]], dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
# All outputs are different because inputs are zeroed out
res = self._testDropoutWrapper(
input_keep_prob=keep_none, output_keep_prob=keep_all,
state_keep_prob=keep_all)
self.assertGreater(np.linalg.norm(res[0] - true_full_output), 1e-4)
self.assertGreater(np.linalg.norm(res[1].h - true_full_output[1]), 1e-4)
self.assertGreater(np.linalg.norm(res[1].c - true_full_final_c), 1e-4)
def testDropoutWrapperRecurrentOutput(self):
keep_some = 0.8
keep_all = variable_scope.get_variable("all", initializer=1.0)
res = self._testDropoutWrapper(
input_keep_prob=keep_all, output_keep_prob=keep_some,
state_keep_prob=keep_all, variational_recurrent=True,
input_size=3, batch_size=5, time_steps=7)
# Ensure the same dropout pattern for all time steps
output_mask = np.abs(res[0]) > 1e-6
for m in output_mask[1:]:
self.assertAllClose(output_mask[0], m)
def testDropoutWrapperRecurrentStateInputAndOutput(self):
keep_some = 0.9
res = self._testDropoutWrapper(
input_keep_prob=keep_some, output_keep_prob=keep_some,
state_keep_prob=keep_some, variational_recurrent=True,
input_size=3, batch_size=5, time_steps=7)
# Smoke test for the state/input masks.
output_mask = np.abs(res[0]) > 1e-6
for time_step in output_mask:
# Ensure the same dropout output pattern for all time steps
self.assertAllClose(output_mask[0], time_step)
for batch_entry in time_step:
# Assert all batch entries get the same mask
self.assertAllClose(batch_entry, time_step[0])
# For state, ensure all batch entries have the same mask
state_c_mask = np.abs(res[1].c) > 1e-6
state_h_mask = np.abs(res[1].h) > 1e-6
for batch_entry in state_c_mask:
self.assertAllClose(batch_entry, state_c_mask[0])
for batch_entry in state_h_mask:
self.assertAllClose(batch_entry, state_h_mask[0])
def testDropoutWrapperRecurrentStateInputAndOutputWithSeed(self):
keep_some = 0.9
random_seed.set_random_seed(2347)
np.random.seed(23487)
res0 = self._testDropoutWrapper(
input_keep_prob=keep_some, output_keep_prob=keep_some,
state_keep_prob=keep_some, variational_recurrent=True,
input_size=3, batch_size=5, time_steps=7, seed=-234987)
ops.reset_default_graph()
self._ClearCachedSession()
random_seed.set_random_seed(2347)
np.random.seed(23487)
res1 = self._testDropoutWrapper(
input_keep_prob=keep_some, output_keep_prob=keep_some,
state_keep_prob=keep_some, variational_recurrent=True,
input_size=3, batch_size=5, time_steps=7, seed=-234987)
output_mask = np.abs(res0[0]) > 1e-6
for time_step in output_mask:
# Ensure the same dropout output pattern for all time steps
self.assertAllClose(output_mask[0], time_step)
for batch_entry in time_step:
# Assert all batch entries get the same mask
self.assertAllClose(batch_entry, time_step[0])
# For state, ensure all batch entries have the same mask
state_c_mask = np.abs(res0[1].c) > 1e-6
state_h_mask = np.abs(res0[1].h) > 1e-6
for batch_entry in state_c_mask:
self.assertAllClose(batch_entry, state_c_mask[0])
for batch_entry in state_h_mask:
self.assertAllClose(batch_entry, state_h_mask[0])
# Ensure seeded calculation is identical.
self.assertAllClose(res0[0], res1[0])
self.assertAllClose(res0[1].c, res1[1].c)
self.assertAllClose(res0[1].h, res1[1].h)
class SlimRNNCellTest(test.TestCase):
def testBasicRNNCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
my_cell = functools.partial(basic_rnn_cell, num_units=2)
# pylint: disable=protected-access
g, _ = rnn_cell_impl._SlimRNNCell(my_cell)(x, m)
# pylint: enable=protected-access
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g], {x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
self.assertEqual(res[0].shape, (1, 2))
def testBasicRNNCellMatch(self):
batch_size = 32
input_size = 100
num_units = 10
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inputs = random_ops.random_uniform((batch_size, input_size))
_, initial_state = basic_rnn_cell(inputs, None, num_units)
rnn_cell = rnn_cell_impl.BasicRNNCell(num_units)
outputs, state = rnn_cell(inputs, initial_state)
variable_scope.get_variable_scope().reuse_variables()
my_cell = functools.partial(basic_rnn_cell, num_units=num_units)
# pylint: disable=protected-access
slim_cell = rnn_cell_impl._SlimRNNCell(my_cell)
# pylint: enable=protected-access
slim_outputs, slim_state = slim_cell(inputs, initial_state)
self.assertEqual(slim_outputs.get_shape(), outputs.get_shape())
self.assertEqual(slim_state.get_shape(), state.get_shape())
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([slim_outputs, slim_state, outputs, state])
self.assertAllClose(res[0], res[2])
self.assertAllClose(res[1], res[3])
def basic_rnn_cell(inputs, state, num_units, scope=None):
if state is None:
if inputs is not None:
batch_size = inputs.get_shape()[0]
dtype = inputs.dtype
else:
batch_size = 0
dtype = dtypes.float32
init_output = array_ops.zeros(
array_ops.stack([batch_size, num_units]), dtype=dtype)
init_state = array_ops.zeros(
array_ops.stack([batch_size, num_units]), dtype=dtype)
init_output.set_shape([batch_size, num_units])
init_state.set_shape([batch_size, num_units])
return init_output, init_state
else:
with variable_scope.variable_scope(scope, "basic_rnn_cell",
[inputs, state]):
output = math_ops.tanh(linear([inputs, state], num_units, True))
return output, output
if __name__ == "__main__":
test.main()
| apache-2.0 |
geseib/kubernetes | cluster/juju/return-node-ips.py | 310 | 1024 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import sys
# This script helps parse out the private IP addresses from the
# `juju run` command's JSON object, see cluster/juju/util.sh
if len(sys.argv) > 1:
# It takes the JSON output as the first argument.
nodes = json.loads(sys.argv[1])
# There can be multiple nodes to print the Stdout.
for num in nodes:
print num['Stdout'].rstrip()
else:
exit(1)
| apache-2.0 |
crisisking/udbraaains | brains/namelist/migrations/0006_auto.py | 1 | 2967 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'Player', fields ['is_dead']
db.create_index('namelist_player', ['is_dead'])
# Adding index on 'Player', fields ['profile_id']
db.create_index('namelist_player', ['profile_id'])
def backwards(self, orm):
# Removing index on 'Player', fields ['profile_id']
db.delete_index('namelist_player', ['profile_id'])
# Removing index on 'Player', fields ['is_dead']
db.delete_index('namelist_player', ['is_dead'])
models = {
'mapping.location': {
'Meta': {'unique_together': "(('x', 'y'),)", 'object_name': 'Location'},
'building_type': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'has_tree': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'suburb': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'x': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'y': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'namelist.category': {
'Meta': {'object_name': 'Category'},
'color_code': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'namelist.player': {
'Meta': {'object_name': 'Player'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['namelist.Category']", 'null': 'True', 'blank': 'True'}),
'group_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_dead': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'join_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mapping.Location']", 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'profile_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'db_index': 'True'}),
'scrape_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'})
}
}
complete_apps = ['namelist']
| bsd-3-clause |
SimVascular/VTK | ThirdParty/Twisted/twisted/test/test_manhole.py | 41 | 2092 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.manhole import service
from twisted.spread.util import LocalAsRemote
class Dummy:
pass
class DummyTransport:
def getHost(self):
return 'INET', '127.0.0.1', 0
class DummyManholeClient(LocalAsRemote):
zero = 0
broker = Dummy()
broker.transport = DummyTransport()
def __init__(self):
self.messages = []
def console(self, messages):
self.messages.extend(messages)
def receiveExplorer(self, xplorer):
pass
def setZero(self):
self.zero = len(self.messages)
def getMessages(self):
return self.messages[self.zero:]
# local interface
sync_console = console
sync_receiveExplorer = receiveExplorer
sync_setZero = setZero
sync_getMessages = getMessages
class ManholeTest(unittest.TestCase):
"""Various tests for the manhole service.
Both the the importIdentity and importMain tests are known to fail
when the __name__ in the manhole namespace is set to certain
values.
"""
def setUp(self):
self.service = service.Service()
self.p = service.Perspective(self.service)
self.client = DummyManholeClient()
self.p.attached(self.client, None)
def test_importIdentity(self):
"""Making sure imported module is the same as one previously loaded.
"""
self.p.perspective_do("from twisted.manhole import service")
self.client.setZero()
self.p.perspective_do("int(service is sys.modules['twisted.manhole.service'])")
msg = self.client.getMessages()[0]
self.assertEqual(msg, ('result',"1\n"))
def test_importMain(self):
"""Trying to import __main__"""
self.client.setZero()
self.p.perspective_do("import __main__")
if self.client.getMessages():
msg = self.client.getMessages()[0]
if msg[0] in ("exception","stderr"):
self.fail(msg[1])
#if __name__=='__main__':
# unittest.main()
| bsd-3-clause |
jab1982/opennsa | opennsa/config.py | 1 | 8668 | """
Configuration reader and defaults.
Author: Henrik Thostrup Jensen <[email protected]>
Copyright: NORDUnet (2011)
"""
import os
import ConfigParser
from opennsa import constants as cnt
# defaults
DEFAULT_CONFIG_FILE = '/etc/opennsa.conf'
DEFAULT_LOG_FILE = '/var/log/opennsa.log'
DEFAULT_TLS = 'true'
DEFAULT_TOPOLOGY_FILE = '/usr/local/share/nsi/topology.owl'
DEFAULT_TCP_PORT = 9080
DEFAULT_TLS_PORT = 9443
DEFAULT_VERIFY = True
DEFAULT_CERTIFICATE_DIR = '/etc/ssl/certs' # This will work on most mordern linux distros
# config blocks and options
BLOCK_SERVICE = 'service'
BLOCK_DUD = 'dud'
BLOCK_JUNIPER_EX = 'juniperex'
BLOCK_JUNOS = 'junos'
BLOCK_FORCE10 = 'force10'
BLOCK_ARGIA = 'argia'
BLOCK_BROCADE = 'brocade'
BLOCK_DELL = 'dell'
BLOCK_NCSVPN = 'ncsvpn'
# service block
NETWORK_NAME = 'network' # mandatory
LOG_FILE = 'logfile'
HOST = 'host'
PORT = 'port'
TLS = 'tls'
NRM_MAP_FILE = 'nrmmap'
PEERS = 'peers'
POLICY = 'policy'
PLUGIN = 'plugin'
# database
DATABASE = 'database' # mandatory
DATABASE_USER = 'dbuser' # mandatory
DATABASE_PASSWORD = 'dbpassword' # can be none (os auth)
# tls
KEY = 'key' # mandatory, if tls is set
CERTIFICATE = 'certificate' # mandatory, if tls is set
CERTIFICATE_DIR = 'certdir' # mandatory (but dir can be empty)
VERIFY_CERT = 'verify'
ALLOWED_HOSTS = 'allowedhosts' # comma seperated list
# generic ssh stuff, don't use directly
_SSH_HOST = 'host'
_SSH_PORT = 'port'
_SSH_HOST_FINGERPRINT = 'fingerprint'
_SSH_USER = 'user'
_SSH_PASSWORD = 'password'
_SSH_PUBLIC_KEY = 'publickey'
_SSH_PRIVATE_KEY = 'privatekey'
# juniper block - same for ex/qxf backend and mx backend
JUNIPER_HOST = _SSH_HOST
JUNIPER_PORT = _SSH_PORT
JUNIPER_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT
JUNIPER_USER = _SSH_USER
JUNIPER_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY
JUNIPER_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY
# force10 block
FORCE10_HOST = _SSH_HOST
FORCE10_PORT = _SSH_PORT
FORCE10_USER = _SSH_USER
FORCE10_PASSWORD = _SSH_PASSWORD
FORCE10_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT
FORCE10_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY
FORCE10_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY
# argia block
ARGIA_COMMAND_DIR = 'commanddir'
ARGIA_COMMAND_BIN = 'commandbin'
# Brocade block
BROCADE_HOST = _SSH_HOST
BROCADE_PORT = _SSH_PORT
BROCADE_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT
BROCADE_USER = _SSH_USER
BROCADE_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY
BROCADE_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY
BROCADE_ENABLE_PASSWORD = 'enablepassword'
# Dell PowerConnect
DELL_HOST = _SSH_HOST
DELL_PORT = _SSH_PORT
DELL_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT
DELL_USER = _SSH_USER
DELL_PASSWORD = _SSH_PASSWORD
# NCS VPN Backend
NCS_SERVICES_URL = 'url'
NCS_USER = 'user'
NCS_PASSWORD = 'password'
class ConfigurationError(Exception):
"""
Raised in case of invalid/inconsistent configuration.
"""
class Peer(object):
def __init__(self, url, cost):
self.url = url
self.cost = cost
def readConfig(filename):
cfg = ConfigParser.SafeConfigParser()
cfg.add_section(BLOCK_SERVICE)
cfg.read( [ filename ] )
return cfg
def readVerifyConfig(cfg):
"""
Read a config and verify that things are correct. Will also fill in
default values where applicable.
This is supposed to be used during application creation (before service
start) to ensure that simple configuration errors do not pop up efter
daemonization.
Returns a "verified" config, which is a dictionary.
"""
vc = {}
try:
vc[NETWORK_NAME] = cfg.get(BLOCK_SERVICE, NETWORK_NAME)
except ConfigParser.NoOptionError:
raise ConfigurationError('No network name specified in configuration file (mandatory)')
try:
vc[LOG_FILE] = cfg.get(BLOCK_SERVICE, LOG_FILE)
except ConfigParser.NoOptionError:
vc[LOG_FILE] = DEFAULT_LOG_FILE
try:
nrm_map_file = cfg.get(BLOCK_SERVICE, NRM_MAP_FILE)
if not os.path.exists(nrm_map_file):
raise ConfigurationError('Specified NRM mapping file does not exist (%s)' % nrm_map_file)
vc[NRM_MAP_FILE] = nrm_map_file
except ConfigParser.NoOptionError:
vc[NRM_MAP_FILE] = None
try:
peers_raw = cfg.get(BLOCK_SERVICE, PEERS)
vc[PEERS] = [ Peer(purl, 1) for purl in peers_raw.split('\n') ]
except ConfigParser.NoOptionError:
vc[PEERS] = None
try:
vc[HOST] = cfg.get(BLOCK_SERVICE, HOST)
except ConfigParser.NoOptionError:
vc[HOST] = None
try:
vc[TLS] = cfg.getboolean(BLOCK_SERVICE, TLS)
except ConfigParser.NoOptionError:
vc[TLS] = DEFAULT_TLS
try:
vc[PORT] = cfg.getint(BLOCK_SERVICE, PORT)
except ConfigParser.NoOptionError:
vc[PORT] = DEFAULT_TLS_PORT if vc[TLS] else DEFAULT_TCP_PORT
try:
policies = cfg.get(BLOCK_SERVICE, POLICY).split(',')
for policy in policies:
if not policy in (cnt.REQUIRE_USER, cnt.REQUIRE_TRACE):
raise ConfigurationError('Invalid policy: %s' % policy)
vc[POLICY] = policies
except ConfigParser.NoOptionError:
vc[POLICY] = []
try:
vc[PLUGIN] = cfg.get(BLOCK_SERVICE, PLUGIN)
except ConfigParser.NoOptionError:
vc[PLUGIN] = None
# database
try:
vc[DATABASE] = cfg.get(BLOCK_SERVICE, DATABASE)
except ConfigParser.NoOptionError:
raise ConfigurationError('No database specified in configuration file (mandatory)')
try:
vc[DATABASE_USER] = cfg.get(BLOCK_SERVICE, DATABASE_USER)
except ConfigParser.NoOptionError:
raise ConfigurationError('No database user specified in configuration file (mandatory)')
try:
vc[DATABASE_PASSWORD] = cfg.get(BLOCK_SERVICE, DATABASE_PASSWORD)
except ConfigParser.NoOptionError:
vc[DATABASE_PASSWORD] = None
# we always extract certdir and verify as we need that for performing https requests
try:
certdir = cfg.get(BLOCK_SERVICE, CERTIFICATE_DIR)
if not os.path.exists(certdir):
raise ConfigurationError('Specified certdir does not exist (%s)' % certdir)
vc[CERTIFICATE_DIR] = certdir
except ConfigParser.NoOptionError, e:
vc[CERTIFICATE_DIR] = DEFAULT_CERTIFICATE_DIR
try:
vc[VERIFY_CERT] = cfg.getboolean(BLOCK_SERVICE, VERIFY_CERT)
except ConfigParser.NoOptionError:
vc[VERIFY_CERT] = DEFAULT_VERIFY
# tls
if vc[TLS]:
try:
hostkey = cfg.get(BLOCK_SERVICE, KEY)
hostcert = cfg.get(BLOCK_SERVICE, CERTIFICATE)
if not os.path.exists(hostkey):
raise ConfigurationError('Specified hostkey does not exist (%s)' % hostkey)
if not os.path.exists(hostcert):
raise ConfigurationError('Specified hostcert does not exist (%s)' % hostcert)
vc[KEY] = hostkey
vc[CERTIFICATE] = hostcert
try:
allowed_hosts_cfg = cfg.get(BLOCK_SERVICE, ALLOWED_HOSTS)
vc[ALLOWED_HOSTS] = allowed_hosts_cfg.split(',')
except:
pass
except ConfigParser.NoOptionError, e:
# Not enough options for configuring tls context
raise ConfigurationError('Missing TLS option: %s' % str(e))
# backends
backends = {}
for section in cfg.sections():
if section == 'service':
continue
if ':' in section:
backend_type, name = section.split(':',2)
else:
backend_type = section
name = ''
if name in backends:
raise ConfigurationError('Can only have one backend named "%s"' % name)
if backend_type in (BLOCK_DUD, BLOCK_JUNIPER_EX, BLOCK_JUNOS, BLOCK_FORCE10, BLOCK_BROCADE, BLOCK_DELL, BLOCK_NCSVPN):
backend_conf = dict( cfg.items(section) )
backend_conf['_backend_type'] = backend_type
backends[name] = backend_conf
if not backends:
raise ConfigurationError('No or invalid backend specified')
vc['backend'] = backends
return vc
| bsd-3-clause |
jrwdunham/old-webapp | onlinelinguisticdatabase/lib/app_globals.py | 1 | 16584 | # −*− coding: UTF−8 −*−
# Copyright (C) 2010 Joel Dunham
#
# This file is part of OnlineLinguisticDatabase.
#
# OnlineLinguisticDatabase is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OnlineLinguisticDatabase is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OnlineLinguisticDatabase. If not, see
# <http://www.gnu.org/licenses/>.
"""The application's Globals object"""
import string
from pylons import session
from orthography import Orthography
class Globals(object):
"""Globals acts as a container for objects available throughout the
life of the application
"""
def __init__(self):
"""One instance of Globals is created during application
initialization and is available during requests via the
'app_globals' variable
"""
# Options for searchTypes: these are used by the queryBuilder module.
self.searchTypes = [
'as a phrase',
'all of these',
'any of these',
'as a reg exp',
'exactly'
]
# Options for dropdown menu between search expressions 1 and 2
self.andOrNot = [
('and_', 'and'),
('or_', 'or'),
('not_', 'and not')
]
# Search Locations - columns that can be searched in search expressions
self.searchLocations = {
'form': [
('transcription', 'orthographic transcription'),
('phoneticTranscription', 'broad phonetic transcription'),
('narrowPhoneticTranscription', 'narrow phonetic transcription'),
('gloss', 'gloss'),
('morphemeBreak', 'morpheme break'),
('morphemeGloss', 'morpheme gloss'),
('comments', 'general comments'),
('speakerComments', 'speaker comments'),
('context', 'context'),
('syntacticCategoryString', 'syntactic category string'),
('id', 'ID')
],
'file': [
('name', 'name'),
('description', 'description'),
('id', 'ID')
],
'collection': [
('title', 'title'),
('type', 'type'),
('description', 'description'),
('contents', 'contents'),
('id', 'ID')
]
}
# Search Integer Filter Locations - columns that can be searched in
# integer filters
self.searchIntegerFilterLocations = {
'form': [
('id', 'ID')
],
'file': [
('id', 'ID'),
('size', 'size')
],
'collection': [
('id', 'ID')
]
}
# Grammaticalities: possible values in grammaticality and
# glossGrammaticality fields
self.grammaticalities = [u'', u'*', u'?', u'#']
# Export Options: correspond to defs in /templates/base/exporter.html
self.exportOptions = [
('t', ' Plain text: transcription only'),
('t_g', ' Plain text: transcription & gloss'),
('t_mb_mg_g', """ Plain text: transcription, morpheme break,
morpheme gloss & gloss"""),
('all', ' Plain text: all fields')
]
# Number of Forms to display per page
self.form_items_per_page = 10
# Number of Files to display per page
self.file_items_per_page = 10
# Number of Collections to display per page
self.collection_items_per_page = 100
# Number of previous (Form) searches that are remembered in the session
self.maxNoPreviousSearches = 10
# The roles that users of the OLD may have
self.roles = ['administrator', 'contributor', 'viewer']
# The ways in which the content of a Collection (Forms and textual
# commentary) can be displayed
self.collectionViewTypes = ['long', 'short', 'columns']
# The MIME types of the Files that can be uploaded to the OLD
# Values are user-friendly names of the file types.
# Empty values indicate that key.split('/')[0] should be used.
# See http://en.wikipedia.org/wiki/Internet_media_type
self.allowedFileTypes = {
u'text/plain': u'plain text',
u'application/x-latex': u'LaTeX document',
u'application/msword': u'MS Word document',
u'application/vnd.ms-powerpoint': u'MS PowerPoint document',
u'application/vnd.openxmlformats-officedocument.wordprocessingml.document': u'Open Document Format (.odt)',
u'application/vnd.oasis.opendocument.text': u'Office Open XML (.docx)',
u'application/pdf': u'PDF',
u'image/gif': u'',
u'image/jpeg': u'',
u'image/png': u'',
u'audio/mpeg': u'',
u'audio/ogg': u'',
u'audio/x-wav': u'',
u'video/mpeg': u'',
u'video/mp4': u'',
u'video/ogg': u'',
u'video/quicktime': u'',
u'video/x-ms-wmv': u''
}
# Valid morpheme delimiters, i.e., characters that can occur between morphemes
self.morphDelimiters = ['-', '=']
# Valid punctuation.
self.punctuation = list(u""".,;:!?'"\u2018\u2019\u201C\u201D[]{}()-""")
# Collection types are the basic categories of Collections
self.collectionTypes = [u'story', u'elicitation', u'paper',
u'discourse', u'other']
self.collectionTypesPlurals = {
u'elicitation': u'elicitations',
u'story': u'stories',
u'paper': u'papers',
u'discourse': u'discourses',
u'other': u'other'
}
# Collection view types: long, short or column
self.collectionViewTypes = ['long', 'short', 'columns']
self.topPrimaryMenuItems = [
{
'id': 'database',
'name': 'Database',
'url': '/home',
'accesskey': 'h',
'title': 'Database mode'
},
{
'id': 'dictionary',
'name': 'Dictionary',
'url': '/dictionary/browse',
'accesskey': 'd',
'title': 'Dictionary mode'
},
{
'id': 'help',
'name': 'Help',
'url': '/help',
'title': 'Help with using the OLD'
},
{
'id': 'settings',
'name': 'Settings',
'url': '/settings',
'title': 'View and edit system-wide settings'
}
]
self.topSecondaryMenuItemChoices = {
'database': [
{
'id': 'people',
'name': 'People',
'url': '/people',
'accesskey': 'p',
'title': 'Info about Speakers and Researchers'
},
{
'id': 'tag',
'name': 'Tags',
'url': '/tag',
'title': 'Keywords, Categories and Elicitation Methods',
'accesskey':'t'
},
{
'id': 'source',
'name': 'Sources',
'url': '/source',
'title': 'Info about Sources'
},
{
'id': 'memory',
'name': 'Memory',
'url': '/memory',
'accesskey': 'm',
'title': 'Forms that you are currently interested in'
}
],
'dictionary': [
{
'id': 'dictionarybrowse',
'name': 'Browse',
'url': '/dictionary/browse',
'title': 'Browse the dictionary'
},
{
'id': 'dictionarysearch',
'name': 'Search',
'url':' /dictionary/search',
'title':'Search the dictionary'
}
],
'help': [
{
'id': 'helpolduserguide',
'name': 'OLD User Guide',
'url': '/help/olduserguide',
'title': 'View the OLD user guide'
},
{
'id': 'helpapplicationhelp',
'name': 'Help Page',
'url': '/help/applicationhelp',
'title': "View this OLD application's help page"
}
]
}
self.sideMenuItems = {
'form': [
{
'id': 'formadd',
'name': 'Add',
'url': '/form/add',
'accesskey': 'a',
'title': 'Add a Form'
},
{
'id': 'formsearch',
'name': 'Search',
'url': '/form/search',
'accesskey': 's',
'title': 'Search for Forms'
}
],
'file': [
{
'id': 'fileadd',
'name': 'Add',
'url': '/file/add',
'accesskey': 'q',
'title': 'Create a new File'
},
{
'id': 'filesearch',
'name': 'Search',
'url': '/file/search',
'accesskey': 'w',
'title': 'Search for Files'
}
],
'collection': [
{
'id': 'collectionadd',
'name': 'Add',
'url': '/collection/add',
'accesskey': 'z',
'title': 'Add a new Collection'
},
{
'id': 'collectionsearch',
'name': 'Search',
'url': '/collection/search',
'accesskey': 'x',
'title': 'Search for Collections'
}
]
}
# MUTABLE APP GLOBALS
# these attributes are set with defaults at initialization but
# may be changed over the lifespan of the application
# APPLICATION SETTINGS
# name of the object language, metalanguage, etc.
defaultOrthography = ','.join(list(string.ascii_lowercase))
self.objectLanguageName = u'Anonymous'
self.objectLanguageId = u''
self.metaLanguageName = u'Unknown'
self.headerImageName = u''
self.colorsCSS = 'green.css'
self.morphemeBreakIsObjectLanguageString = u'no'
self.metaLanguageOrthography = Orthography(defaultOrthography)
self.OLOrthographies = {
u'Orthography 1': (
u'Unnamed',
Orthography(
defaultOrthography, lowercase=1, initialGlottalStops=1
)
)
}
self.storageOrthography = self.OLOrthographies[
u'Orthography 1']
self.defaultInputOrthography = self.OLOrthographies[
u'Orthography 1']
self.defaultOutputOrthography = self.OLOrthographies[
u'Orthography 1']
self.inputToStorageTranslator = None
self.storageToInputTranslator = None
self.storageToOutputTranslator = None
# formCount is the number of Forms in the OLD application.
# This variable is updated on the deletion and addition of Forms.
# THIS IS PROBABLY NOT A GOOD IDEA BECAUSE OF MULTI-THREADING.
# JUST DO A SQLALCHEMY COUNT(ID) QUERY!
self.formCount = None
# Secondary Object Lists
# These variables are set by the function
# updateSecondaryObjectsInAppGlobals() in lib/functions.py
self.speakers = []
self.users = []
self.nonAdministrators = []
self.unrestrictedUsers = []
self.sources = []
self.syncats = []
self.keywords = []
self.elicitationMethods = []
def getActiveTopPrimaryMenuItem(self, url):
"""Given the url of the current page, return the appropriate active top
primary menu item.
"""
result = ''
controller = url.split('/')[1]
controllerToPrimaryMenuItem = {
'form': 'database',
'file': 'database',
'collection': 'database',
'people': 'database',
'tag': 'database',
'source': 'database',
'memory': 'database',
'speaker': 'database',
'researcher': 'database',
'key': 'database',
'category': 'database',
'method': 'database',
'home': 'database',
'settings': 'settings',
'dictionary': 'dictionary',
'help': 'help'
}
try:
result = controllerToPrimaryMenuItem[controller]
except KeyError:
pass
return result
def getMenuItemsTurnedOnByController(self, url):
"""Certain controllers need to make certain menu items active; encode
that here.
"""
result = []
controller = url.split('/')[1]
controllerXTurnsOn = {
'speaker': ['people'],
'researcher': ['people'],
'key': ['tag'],
'category': ['tag'],
'method': ['tag']
}
try:
result = controllerXTurnsOn[controller]
except KeyError:
pass
return result
def getActiveMenuItems(self, url):
""" Function returns the ID of each menu item that should be active
given a particular URL.
Partially logical, partially ad hoc specification.
"""
activeMenuItems = []
controller = url.split('/')[1]
controllerAction = ''.join(url.split('/')[1:3])
activeMenuItems.append(self.getActiveTopPrimaryMenuItem(url))
activeMenuItems += self.getMenuItemsTurnedOnByController(url)
activeMenuItems.append(controllerAction)
activeMenuItems.append(controller)
return activeMenuItems
def authorizedMenuItem(self, menuItem):
"""Return True if menu item should be viewable by current user;
else False.
"""
if 'authorizationLevel' not in menuItem or (
'user_role' in session and session['user_role'] in menuItem[
'authorizationLevel']
) :
return True
else:
return False
def getTopSecondaryMenuItems(self, url):
"""The menu items in the top secondary tier are determined by the active
menu item in the top primary tier. Return an empty list if the top
secondary tier should be omitted.
"""
activeTopPrimaryMenuItem = self.getActiveTopPrimaryMenuItem(url)
topSecondaryMenuItems = []
try:
temp = self.topSecondaryMenuItemChoices[activeTopPrimaryMenuItem]
topSecondaryMenuItems = [x for x in temp
if self.authorizedMenuItem(x)]
except KeyError:
pass
return topSecondaryMenuItems
def getTopPrimaryMenuItems(self):
"""Return top priamry menu items for which the current user is
authorized.
"""
return [x for x in self.topPrimaryMenuItems
if self.authorizedMenuItem(x)]
| gpl-3.0 |
Hellowlol/PyTunes | libs/mutagen/monkeysaudio.py | 16 | 2785 | # A Monkey's Audio (APE) reader/tagger
#
# Copyright 2006 Lukas Lalinsky <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Monkey's Audio streams with APEv2 tags.
Monkey's Audio is a very efficient lossless audio compressor developed
by Matt Ashland.
For more information, see http://www.monkeysaudio.com/.
"""
__all__ = ["MonkeysAudio", "Open", "delete"]
import struct
from mutagen.apev2 import APEv2File, error, delete
from mutagen._util import cdata
class MonkeysAudioHeaderError(error):
pass
class MonkeysAudioInfo(object):
"""Monkey's Audio stream information.
Attributes:
* channels -- number of audio channels
* length -- file length in seconds, as a float
* sample_rate -- audio sampling rate in Hz
* bits_per_sample -- bits per sample
* version -- Monkey's Audio stream version, as a float (eg: 3.99)
"""
def __init__(self, fileobj):
header = fileobj.read(76)
if len(header) != 76 or not header.startswith("MAC "):
raise MonkeysAudioHeaderError("not a Monkey's Audio file")
self.version = cdata.ushort_le(header[4:6])
if self.version >= 3980:
(blocks_per_frame, final_frame_blocks, total_frames,
self.bits_per_sample, self.channels,
self.sample_rate) = struct.unpack("<IIIHHI", header[56:76])
else:
compression_level = cdata.ushort_le(header[6:8])
self.channels, self.sample_rate = struct.unpack(
"<HI", header[10:16])
total_frames, final_frame_blocks = struct.unpack(
"<II", header[24:32])
if self.version >= 3950:
blocks_per_frame = 73728 * 4
elif self.version >= 3900 or (self.version >= 3800 and
compression_level == 4):
blocks_per_frame = 73728
else:
blocks_per_frame = 9216
self.version /= 1000.0
self.length = 0.0
if self.sample_rate != 0 and total_frames > 0:
total_blocks = ((total_frames - 1) * blocks_per_frame +
final_frame_blocks)
self.length = float(total_blocks) / self.sample_rate
def pprint(self):
return "Monkey's Audio %.2f, %.2f seconds, %d Hz" % (
self.version, self.length, self.sample_rate)
class MonkeysAudio(APEv2File):
_Info = MonkeysAudioInfo
_mimes = ["audio/ape", "audio/x-ape"]
@staticmethod
def score(filename, fileobj, header):
return header.startswith("MAC ") + filename.lower().endswith(".ape")
Open = MonkeysAudio
| gpl-3.0 |
bdang2012/taiga-back-casting | taiga/projects/mixins/on_destroy.py | 1 | 1879 | # Copyright (C) 2014-2015 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2015 Jesús Espino <[email protected]>
# Copyright (C) 2014-2015 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import transaction as tx
from taiga.base.api.utils import get_object_or_404
#############################################
# ViewSets
#############################################
class MoveOnDestroyMixin:
@tx.atomic
def destroy(self, request, *args, **kwargs):
move_to = self.request.QUERY_PARAMS.get('moveTo', None)
if move_to is None:
return super().destroy(request, *args, **kwargs)
obj = self.get_object_or_none()
move_item = get_object_or_404(self.model, id=move_to)
self.check_permissions(request, 'destroy', obj)
qs = self.move_on_destroy_related_class.objects.filter(**{self.move_on_destroy_related_field: obj})
qs.update(**{self.move_on_destroy_related_field: move_item})
if getattr(obj.project, self.move_on_destroy_project_default_field) == obj:
setattr(obj.project, self.move_on_destroy_project_default_field, move_item)
obj.project.save()
return super().destroy(request, *args, **kwargs)
| agpl-3.0 |
klundberg/swift-corelibs-foundation | lib/target.py | 2 | 12322 | # This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
from .config import Configuration
import platform
class ArchType:
UnknownArch = 0
armv7 = 1
armeb = 2
aarch64 = 3
aarch64_be = 4
bpfel = 5
bpfeb = 6
hexagon = 7
mips = 8
mipsel = 9
mips64 = 10
mips64el = 11
msp430 = 12
ppc = 13
ppc64 = 14
ppc64le = 15
r600 = 16
amdgcn = 17
sparc = 18
sparcv9 = 19
sparcel = 20
systemz = 21
tce = 22
thumb = 23
thumbeb = 24
x86 = 25
x86_64 = 26
xcore = 27
nvptx = 28
nvptx64 = 29
le32 = 30
le64 = 31
amdil = 32
amdil64 = 33
hsail = 34
hsail64 = 35
spir = 36
spir64 = 37
kalimba = 38
shave = 39
armv6 = 40
s390x = 41
# Do not assume that these are 1:1 mapping. This should follow
# canonical naming conventions for arm, etc. architectures.
# See apple/swift PR #608
@staticmethod
def to_string(value):
if value == ArchType.armv7:
return "armv7"
if value == ArchType.armv6:
return "armv6"
if value == ArchType.armeb:
return "armeb"
if value == ArchType.aarch64:
return "aarch64"
if value == ArchType.aarch64_be:
return "aarch64_be"
if value == ArchType.bpfel:
return "bpfel"
if value == ArchType.bpfeb:
return "bpfeb"
if value == ArchType.hexagon:
return "hexagon"
if value == ArchType.mips:
return "mips"
if value == ArchType.mipsel:
return "mipsel"
if value == ArchType.mips64:
return "mips64"
if value == ArchType.mips64el:
return "mips64el"
if value == ArchType.msp430:
return "msp430"
if value == ArchType.ppc:
return "ppc"
if value == ArchType.ppc64:
return "ppc64"
if value == ArchType.ppc64le:
return "ppc64le"
if value == ArchType.r600:
return "r600"
if value == ArchType.amdgcn:
return "amdgcn"
if value == ArchType.sparc:
return "sparc"
if value == ArchType.sparcv9:
return "sparcv9"
if value == ArchType.sparcel:
return "sparcel"
if value == ArchType.systemz:
return "systemz"
if value == ArchType.tce:
return "tce"
if value == ArchType.thumb:
return "armv7"
if value == ArchType.thumbeb:
return "thumbeb"
if value == ArchType.x86:
return "i386"
if value == ArchType.x86_64:
return "x86_64"
if value == ArchType.xcore:
return "xcore"
if value == ArchType.nvptx:
return "nvptx"
if value == ArchType.nvptx64:
return "nvptx64"
if value == ArchType.le32:
return "le32"
if value == ArchType.le64:
return "le64"
if value == ArchType.amdil:
return "amdil"
if value == ArchType.amdil64:
return "amdil64"
if value == ArchType.hsail:
return "hsail"
if value == ArchType.hsail64:
return "hsail64"
if value == ArchType.spir:
return "spir"
if value == ArchType.spir64:
return "spir64"
if value == ArchType.kalimba:
return "kalimba"
if value == ArchType.shave:
return "shave"
if value == ArchType.s390x:
return "s390x"
return "unknown"
# Not 1:1, See to_string
@staticmethod
def from_string(string):
if string == "armeb":
return ArchType.armeb
if string == "arm":
return ArchType.armv7
if string == "armv7":
return ArchType.armv7
if string == "armv7l":
return ArchType.armv7
if string == "armv6":
return ArchType.armv6
if string == "armv6l":
return ArchType.armv6
if string == "aarch64":
return ArchType.aarch64
if string == "aarch64_be":
return ArchType.aarch64_be
if string == "bpfel":
return ArchType.bpfel
if string == "bpfeb":
return ArchType.bpfeb
if string == "hexagon":
return ArchType.hexagon
if string == "mips":
return ArchType.mips
if string == "mipsel":
return ArchType.mipsel
if string == "mips64":
return ArchType.mips64
if string == "mips64el":
return ArchType.mips64el
if string == "msp430":
return ArchType.msp430
if string == "ppc":
return ArchType.ppc
if string == "ppc64":
return ArchType.ppc64
if string == "ppc64le":
return ArchType.ppc64le
if string == "r600":
return ArchType.r600
if string == "amdgcn":
return ArchType.amdgcn
if string == "sparc":
return ArchType.sparc
if string == "sparcv9":
return ArchType.sparcv9
if string == "sparcel":
return ArchType.sparcel
if string == "systemz":
return ArchType.systemz
if string == "tce":
return ArchType.tce
if string == "thumb":
return ArchType.thumb
if string == "thumbeb":
return ArchType.thumbeb
if string == "x86":
return ArchType.x86
if string == "x86_64":
return ArchType.x86_64
if string == "xcore":
return ArchType.xcore
if string == "nvptx":
return ArchType.nvptx
if string == "nvptx64":
return ArchType.nvptx64
if string == "le32":
return ArchType.le32
if string == "le64":
return ArchType.le64
if string == "amdil":
return ArchType.amdil
if string == "amdil64":
return ArchType.amdil64
if string == "hsail":
return ArchType.hsail
if string == "hsail64":
return ArchType.hsail64
if string == "spir":
return ArchType.spir
if string == "spir64":
return ArchType.spir64
if string == "kalimba":
return ArchType.kalimba
if string == "shave":
return ArchType.shave
if string == "s390x":
return ArchType.s390x
return ArchType.UnknownArch
class ArchSubType:
NoSubArch = 0
ARMSubArch_v8_1a = 1
ARMSubArch_v8 = 2
ARMSubArch_v7 = 3
ARMSubArch_v7em = 4
ARMSubArch_v7m = 5
ARMSubArch_v7s = 6
ARMSubArch_v6 = 7
ARMSubArch_v6m = 8
ARMSubArch_v6k = 9
ARMSubArch_v6t2 = 10
ARMSubArch_v5 = 11
ARMSubArch_v5te = 12
ARMSubArch_v4t = 13
KalimbaSubArch_v3 = 14
KalimbaSubArch_v4 = 15
KalimbaSubArch_v5 = 16
class OSType:
UnknownOS = 0
CloudABI = 1
Darwin = 2
DragonFly = 3
FreeBSD = 4
IOS = 5
KFreeBSD = 6
Linux = 7
Lv2 = 8
MacOSX = 9
NetBSD = 10
OpenBSD = 11
Solaris = 12
Win32 = 13
Haiku = 14
Minix = 15
RTEMS = 16
NaCl = 17
CNK = 18
Bitrig = 19
AIX = 20
CUDA = 21
NVCL = 22
AMDHSA = 23
PS4 = 24
class ObjectFormat:
UnknownObjectFormat = 0
COFF = 1
ELF = 2
MachO = 3
class EnvironmentType:
UnknownEnvironment = 0
GNU = 1
GNUEABI = 2
GNUEABIHF = 3
GNUX32 = 4
CODE16 = 5
EABI = 6
EABIHF = 7
Android = 8
MSVC = 9
Itanium = 10
Cygnus = 11
class Vendor:
UnknownVendor = 0
Apple = 1
PC = 2
SCEI = 3
BGP = 4
BGQ = 5
Freescale = 6
IBM = 7
ImaginationTechnologies = 8
MipsTechnologies = 9
NVIDIA = 10
CSR = 11
class Target:
triple = None
sdk = None
arch = None
executable_suffix = ""
dynamic_library_prefix = "lib"
dynamic_library_suffix = ".dylib"
static_library_prefix = "lib"
static_library_suffix = ".a"
def __init__(self, triple):
if "linux" in triple:
self.sdk = OSType.Linux
self.dynamic_library_suffix = ".so"
elif "freebsd" in triple:
self.sdk = OSType.FreeBSD
self.dynamic_library_suffix = ".so"
elif "windows" in triple or "win32" in triple:
self.sdk = OSType.Win32
self.dynamic_library_suffix = ".dll"
self.executable_suffix = ".exe"
elif "darwin" in triple:
self.sdk = OSType.MacOSX
else:
print("Unknown platform")
self.triple = triple
comps = triple.split('-')
self.arch = ArchType.from_string(comps[0])
@staticmethod
def default():
arch = ArchType.from_string(platform.machine())
triple = ArchType.to_string(arch)
if platform.system() == "Linux":
if (arch == ArchType.armv6) or (arch == ArchType.armv7):
triple += "-linux-gnueabihf"
else:
triple += "-linux-gnu"
elif platform.system() == "Darwin":
triple += "-apple-darwin"
elif platform.system() == "FreeBSD":
# Make this work on 10 as well.
triple += "-freebsd11.0"
else:
# TODO: This should be a bit more exhaustive
print("unknown host os")
return None
return triple
@property
def swift_triple(self):
triple = ArchType.to_string(self.arch)
if self.sdk == OSType.MacOSX:
return None
elif self.sdk == OSType.Linux:
# FIXME: It would be nice to detect the host ABI here
if (self.arch == ArchType.armv6) or (self.arch == ArchType.armv7):
triple += "-unknown-linux-gnueabihf"
else:
triple += "-unknown-linux"
elif self.sdk == OSType.FreeBSD:
triple += "-unknown-freebsd"
else:
print("unknown sdk for swift")
return None
return triple
@property
def swift_sdk_name(self):
if self.sdk == OSType.MacOSX:
return "macosx"
elif self.sdk == OSType.Linux:
return "linux"
elif self.sdk == OSType.FreeBSD:
return "freebsd"
else:
print("unknown sdk for swift")
return None
@property
def swift_arch(self):
return ArchType.to_string(self.arch)
class TargetConditional:
_sdk = None
_arch = None
_default = None
def __init__(self, sdk = None, arch = None, default = None):
self._sdk = sdk
self._arch = arch
self._default = default
def evalulate(self, target):
if self._sdk is not None and target.sdk in self._sdk:
return self._sdk[target.sdk]
if self._arch is not None and target.arch in self._arch:
return self._arch[target.arch]
return self._default
@staticmethod
def value(value):
if type(value) is TargetConditional:
return value.evalulate(Configuration.current.target)
return value
| apache-2.0 |
marcelometal/Django-facebook | facebook_example/facebook_example/urls.py | 2 | 1535 | try:
from django.conf.urls import include, patterns, url
except ImportError:
from django.conf.urls.defaults import include, patterns, url
from django.conf import settings
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# facebook and registration urls
(r'^facebook/', include('django_facebook.urls')),
(r'^accounts/', include('django_facebook.auth_urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/', include(admin.site.urls)),
)
if settings.MODE == 'userena':
urlpatterns += patterns('',
(r'^accounts/', include('userena.urls')),
)
elif settings.MODE == 'django_registration':
urlpatterns += patterns('',
(r'^accounts/', include(
'registration.backends.default.urls')),
)
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
)
| bsd-3-clause |
hirokihamasaki/irma | probe/modules/antivirus/avg/avg.py | 1 | 3787 | #
# Copyright (c) 2013-2016 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
import re
import os
import stat
from ..base import Antivirus
log = logging.getLogger(__name__)
class AVGAntiVirusFree(Antivirus):
_name = "AVG AntiVirus Free (Linux)"
# ==================================
# Constructor and destructor stuff
# ==================================
def __init__(self, *args, **kwargs):
# class super class constructor
super(AVGAntiVirusFree, self).__init__(*args, **kwargs)
# scan tool variables
self._scan_args = (
"--heur " # use heuristics for scanning
"--paranoid " # Enable paranoid mode. Scan for less dangerous
# malware and more time consuming algoritms.
"--arc " # scan through archives
"--macrow " # report documents with macros.
"--pwdw " # report password protected files
"--pup " # scan for Potentially Unwanted Programs
)
self._scan_patterns = [
re.compile(r'(?P<file>.*)'
r'\s+(Found|Virus found|Potentially harmful program|'
r'Virus identified|Trojan horse)\s+'
r'(?P<name>.*)(\\n)*.*$', re.IGNORECASE)
]
def is_error_fn(x):
return x in [1, 2, 3, 6, 7, 8, 9, 10]
# NOTE: do 'man avgscan' for return codes
self._scan_retcodes[self.ScanResult.CLEAN] = lambda x: x in [0]
self._scan_retcodes[self.ScanResult.INFECTED] = lambda x: x in [4, 5]
self._scan_retcodes[self.ScanResult.ERROR] = lambda x: is_error_fn(x)
# ==========================================
# Antivirus methods (need to be overriden)
# ==========================================
def get_version(self):
"""return the version of the antivirus"""
result = None
if self.scan_path:
cmd = self.build_cmd(self.scan_path, '-v')
retcode, stdout, stderr = self.run_cmd(cmd)
if not retcode:
matches = re.search(r'(?P<version>\d+(\.\d+)+)',
stdout,
re.IGNORECASE)
if matches:
result = matches.group('version').strip()
return result
def get_database(self):
"""return list of files in the database"""
# extract folder where are installed definition files
avg_path = '/opt/avg/'
# NOTE: the structure/location of the update folders are documented in
# the /var/lib/avast/Setup/avast.setup script.
search_paths = map(lambda x:
'{avg_path}/av/update/{folder}/'
''.format(avg_path=avg_path, folder=x),
['backup', 'download', 'prepare'])
database_patterns = [
'*',
]
results = []
for pattern in database_patterns:
result = self.locate(pattern, search_paths, syspath=False)
results.extend(result)
return results if results else None
def get_scan_path(self):
"""return the full path of the scan tool"""
paths = self.locate("avgscan")
return paths[0] if paths else None
| apache-2.0 |
nullishzero/Portage | pym/_emerge/create_depgraph_params.py | 2 | 3934 | # Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import logging
from portage.util import writemsg_level
def create_depgraph_params(myopts, myaction):
#configure emerge engine parameters
#
# self: include _this_ package regardless of if it is merged.
# selective: exclude the package if it is merged
# recurse: go into the dependencies
# deep: go into the dependencies of already merged packages
# empty: pretend nothing is merged
# complete: completely account for all known dependencies
# remove: build graph for use in removing packages
# rebuilt_binaries: replace installed packages with rebuilt binaries
# rebuild_if_new_slot: rebuild or reinstall packages when
# slot/sub-slot := operator dependencies can be satisfied by a newer
# slot/sub-slot, so that older packages slots will become eligible for
# removal by the --depclean action as soon as possible
# ignore_built_slot_operator_deps: ignore the slot/sub-slot := operator parts
# of dependencies that have been recorded when packages where built
myparams = {"recurse" : True}
bdeps = myopts.get("--with-bdeps")
if bdeps is not None:
myparams["bdeps"] = bdeps
ignore_built_slot_operator_deps = myopts.get("--ignore-built-slot-operator-deps")
if ignore_built_slot_operator_deps is not None:
myparams["ignore_built_slot_operator_deps"] = ignore_built_slot_operator_deps
dynamic_deps = myopts.get("--dynamic-deps")
if dynamic_deps is not None:
myparams["dynamic_deps"] = dynamic_deps
if myaction == "remove":
myparams["remove"] = True
myparams["complete"] = True
myparams["selective"] = True
return myparams
rebuild_if_new_slot = myopts.get('--rebuild-if-new-slot')
if rebuild_if_new_slot is not None:
myparams['rebuild_if_new_slot'] = rebuild_if_new_slot
if "--update" in myopts or \
"--newrepo" in myopts or \
"--newuse" in myopts or \
"--reinstall" in myopts or \
"--noreplace" in myopts or \
myopts.get("--selective", "n") != "n":
myparams["selective"] = True
deep = myopts.get("--deep")
if deep is not None and deep != 0:
myparams["deep"] = deep
complete_if_new_use = \
myopts.get("--complete-graph-if-new-use")
if complete_if_new_use is not None:
myparams["complete_if_new_use"] = complete_if_new_use
complete_if_new_ver = \
myopts.get("--complete-graph-if-new-ver")
if complete_if_new_ver is not None:
myparams["complete_if_new_ver"] = complete_if_new_ver
if ("--complete-graph" in myopts or "--rebuild-if-new-rev" in myopts or
"--rebuild-if-new-ver" in myopts or "--rebuild-if-unbuilt" in myopts):
myparams["complete"] = True
if "--emptytree" in myopts:
myparams["empty"] = True
myparams["deep"] = True
myparams.pop("selective", None)
if "--nodeps" in myopts:
myparams.pop("recurse", None)
myparams.pop("deep", None)
myparams.pop("complete", None)
rebuilt_binaries = myopts.get('--rebuilt-binaries')
if rebuilt_binaries is True or \
rebuilt_binaries != 'n' and \
'--usepkgonly' in myopts and \
myopts.get('--deep') is True and \
'--update' in myopts:
myparams['rebuilt_binaries'] = True
binpkg_respect_use = myopts.get('--binpkg-respect-use')
if binpkg_respect_use is not None:
myparams['binpkg_respect_use'] = binpkg_respect_use
elif '--usepkgonly' not in myopts:
# If --binpkg-respect-use is not explicitly specified, we enable
# the behavior automatically (like requested in bug #297549), as
# long as it doesn't strongly conflict with other options that
# have been specified.
myparams['binpkg_respect_use'] = 'auto'
if myopts.get("--selective") == "n":
# --selective=n can be used to remove selective
# behavior that may have been implied by some
# other option like --update.
myparams.pop("selective", None)
if '--debug' in myopts:
writemsg_level('\n\nmyparams %s\n\n' % myparams,
noiselevel=-1, level=logging.DEBUG)
return myparams
| gpl-2.0 |
ghold/OneKeySql | onekey/oracle/OkSqlHandler.py | 1 | 1458 | import cx_Oracle
import logging
import os
os.environ['NLS_LANG'] = 'SIMPLIFIED CHINESE_CHINA.UTF8'
class OkSqlHandler(object):
@classmethod
def setupConn(cls):
# dsn = cx_Oracle.makedsn("10.0.44.99", "1521", "ompdb")
dsn = cx_Oracle.makedsn("10.0.76.128", "1521", "omp2st")
conn = cx_Oracle.connect('OMPBASE', 'OMPBASE', dsn)
return conn
@classmethod
def insertAction(cls, sql):
conn = cls.setupConn()
cursor = conn.cursor()
#logging
logging.basicConfig(filename='onkey.log',level=logging.DEBUG, format='%(asctime)s %(message)s')
logging.info(sql)
try:
cursor.execute(sql)
except Exception as ex:
logging.error(ex)
finally:
cursor.close()
conn.commit()
conn.close()
#if __name__ == "__main__":
# OkSqlHandler.insertAction("insert into omp.tt_bar_record (BAR_RECORD_ID, OP_CODE, ZONE_CODE, WAYBILL_NO, CONTNR_CODE, OP_ATTACH_INFO, STAY_WHY_CODE, BAR_SCAN_TM, BAR_OPR_CODE, COURIER_CODE, PHONE_ZONE, PHONE, SUBBILL_PIECE_QTY, BAR_UPLOAD_TYPE_CODE, WEIGHT_QTY, OTHER_INFO, AUTOLOADING, OBJ_TYPE_CODE, CREATE_TM) values (1989012000004, '30', '755R', '960837100044', '333124100065', '755R021R0430', '', to_date('05-12-2013 04:10:39', 'mm-dd-yyyy hh24:mi:ss'), '243099', '', '', '', 0, 0, 0.00, '', '1', 30, to_date('05-12-2013 04:35:39', 'mm-dd-yyyy hh24:mi:ss'))")
| apache-2.0 |
einaru/cconverter | cconverter.py | 1 | 1284 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:Date: Thu Jun 30 17:17:35 CEST 2011
:Version: 1
:Authors: Einar Uvsløkk <[email protected]>
:Copyright: (c) 2011 Einar Uvsløkk
:License: GNU General Public License (GPL) version 3 or later
vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
"""
import gettext
import locale
import os
import cconverter
from cconverter import app
try:
from etrainer.defs import (DATA_DIR, PKG_DATA_DIR, LOCALE_DIR)
DEFS_PRESENT = True
except ImportError:
DATA_DIR = PKG_DATA_DIR = LOCALE_DIR = ''
DEFS_PRESENT = False
if not DEFS_PRESENT:
_prefix = '/usr'
DATA_DIR = os.path.join(_prefix, 'share')
LOCALE_DIR = os.path.join(_prefix, 'share', 'locale')
_me = os.path.abspath(os.path.dirname(__file__))
PKG_DATA_DIR = os.path.join(_me, 'data')
cconverter.DATA_DIR = DATA_DIR
cconverter.PKG_DATA_DIR = PKG_DATA_DIR
cconverter.LOCALE_DIR = LOCALE_DIR
cconverter.APP_NAME = 'cconverter'
locale.setlocale(locale.LC_ALL, None)
gettext.bindtextdomain(cconverter.APP_NAME, LOCALE_DIR)
gettext.textdomain(cconverter.APP_NAME)
gettext.install(cconverter.APP_NAME)
dirs = {'DATA_DIR': DATA_DIR,
'PKG_DATA_DIR': PKG_DATA_DIR,
'LOCALE_DIR': LOCALE_DIR,}
kwargs = {'data': dirs,}
app.run(**kwargs)
| gpl-3.0 |
Thraxis/SickRage | lib/hachoir_parser/image/bmp.py | 95 | 6682 | """
Microsoft Bitmap picture parser.
- file extension: ".bmp"
Author: Victor Stinner
Creation: 16 december 2005
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet,
UInt8, UInt16, UInt32, Bits,
String, RawBytes, Enum,
PaddingBytes, NullBytes, createPaddingField)
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_core.text_handler import textHandler, hexadecimal
from hachoir_parser.image.common import RGB, PaletteRGBA
from hachoir_core.tools import alignValue
class Pixel4bit(Bits):
static_size = 4
def __init__(self, parent, name):
Bits.__init__(self, parent, name, 4)
class ImageLine(FieldSet):
def __init__(self, parent, name, width, pixel_class):
FieldSet.__init__(self, parent, name)
self._pixel = pixel_class
self._width = width
self._size = alignValue(self._width * self._pixel.static_size, 32)
def createFields(self):
for x in xrange(self._width):
yield self._pixel(self, "pixel[]")
size = self.size - self.current_size
if size:
yield createPaddingField(self, size)
class ImagePixels(FieldSet):
def __init__(self, parent, name, width, height, pixel_class, size=None):
FieldSet.__init__(self, parent, name, size=size)
self._width = width
self._height = height
self._pixel = pixel_class
def createFields(self):
for y in xrange(self._height-1, -1, -1):
yield ImageLine(self, "line[%u]" % y, self._width, self._pixel)
size = (self.size - self.current_size) // 8
if size:
yield NullBytes(self, "padding", size)
class CIEXYZ(FieldSet):
def createFields(self):
yield UInt32(self, "x")
yield UInt32(self, "y")
yield UInt32(self, "z")
class BmpHeader(FieldSet):
color_space_name = {
1: "Business (Saturation)",
2: "Graphics (Relative)",
4: "Images (Perceptual)",
8: "Absolute colormetric (Absolute)",
}
def getFormatVersion(self):
if "gamma_blue" in self:
return 4
if "important_color" in self:
return 3
return 2
def createFields(self):
# Version 2 (12 bytes)
yield UInt32(self, "header_size", "Header size")
yield UInt32(self, "width", "Width (pixels)")
yield UInt32(self, "height", "Height (pixels)")
yield UInt16(self, "nb_plan", "Number of plan (=1)")
yield UInt16(self, "bpp", "Bits per pixel") # may be zero for PNG/JPEG picture
# Version 3 (40 bytes)
if self["header_size"].value < 40:
return
yield Enum(UInt32(self, "compression", "Compression method"), BmpFile.COMPRESSION_NAME)
yield UInt32(self, "image_size", "Image size (bytes)")
yield UInt32(self, "horizontal_dpi", "Horizontal DPI")
yield UInt32(self, "vertical_dpi", "Vertical DPI")
yield UInt32(self, "used_colors", "Number of color used")
yield UInt32(self, "important_color", "Number of import colors")
# Version 4 (108 bytes)
if self["header_size"].value < 108:
return
yield textHandler(UInt32(self, "red_mask"), hexadecimal)
yield textHandler(UInt32(self, "green_mask"), hexadecimal)
yield textHandler(UInt32(self, "blue_mask"), hexadecimal)
yield textHandler(UInt32(self, "alpha_mask"), hexadecimal)
yield Enum(UInt32(self, "color_space"), self.color_space_name)
yield CIEXYZ(self, "red_primary")
yield CIEXYZ(self, "green_primary")
yield CIEXYZ(self, "blue_primary")
yield UInt32(self, "gamma_red")
yield UInt32(self, "gamma_green")
yield UInt32(self, "gamma_blue")
def parseImageData(parent, name, size, header):
if ("compression" not in header) or (header["compression"].value in (0, 3)):
width = header["width"].value
height = header["height"].value
bpp = header["bpp"].value
if bpp == 32:
cls = UInt32
elif bpp == 24:
cls = RGB
elif bpp == 8:
cls = UInt8
elif bpp == 4:
cls = Pixel4bit
else:
cls = None
if cls:
return ImagePixels(parent, name, width, height, cls, size=size*8)
return RawBytes(parent, name, size)
class BmpFile(Parser):
PARSER_TAGS = {
"id": "bmp",
"category": "image",
"file_ext": ("bmp",),
"mime": (u"image/x-ms-bmp", u"image/x-bmp"),
"min_size": 30*8,
# "magic": (("BM", 0),),
"magic_regex": ((
# "BM", <filesize>, <reserved>, header_size=(12|40|108)
"BM.{4}.{8}[\x0C\x28\x6C]\0{3}",
0),),
"description": "Microsoft bitmap (BMP) picture"
}
endian = LITTLE_ENDIAN
COMPRESSION_NAME = {
0: u"Uncompressed",
1: u"RLE 8-bit",
2: u"RLE 4-bit",
3: u"Bitfields",
4: u"JPEG",
5: u"PNG",
}
def validate(self):
if self.stream.readBytes(0, 2) != 'BM':
return "Wrong file signature"
if self["header/header_size"].value not in (12, 40, 108):
return "Unknown header size (%s)" % self["header_size"].value
if self["header/nb_plan"].value != 1:
return "Invalid number of planes"
return True
def createFields(self):
yield String(self, "signature", 2, "Header (\"BM\")", charset="ASCII")
yield UInt32(self, "file_size", "File size (bytes)")
yield PaddingBytes(self, "reserved", 4, "Reserved")
yield UInt32(self, "data_start", "Data start position")
yield BmpHeader(self, "header")
# Compute number of color
header = self["header"]
bpp = header["bpp"].value
if 0 < bpp <= 8:
if "used_colors" in header and header["used_colors"].value:
nb_color = header["used_colors"].value
else:
nb_color = (1 << bpp)
else:
nb_color = 0
# Color palette (if any)
if nb_color:
yield PaletteRGBA(self, "palette", nb_color)
# Seek to data start
field = self.seekByte(self["data_start"].value)
if field:
yield field
# Image pixels
size = min(self["file_size"].value-self["data_start"].value, (self.size - self.current_size)//8)
yield parseImageData(self, "pixels", size, header)
def createDescription(self):
return u"Microsoft Bitmap version %s" % self["header"].getFormatVersion()
def createContentSize(self):
return self["file_size"].value * 8
| gpl-3.0 |
pshen/ansible | docs/docsite/rst/conf.py | 37 | 7361 | # -*- coding: utf-8 -*-
#
# documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 27 13:23:22 2008-2009.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed
# automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys
import os
# pip install sphinx_rtd_theme
# import sphinx_rtd_theme
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
# sys.path.append(os.path.abspath('some/directory'))
#
sys.path.insert(0, os.path.join('ansible', 'lib'))
sys.path.append(os.path.abspath('_themes'))
VERSION = '2.4'
AUTHOR = 'Ansible, Inc'
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings.
# They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
# Later on, add 'sphinx.ext.viewcode' to the list if you want to have
# colorized code generated too for references.
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Ansible Documentation'
copyright = "2013-2017 Ansible, Inc"
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directories, that shouldn't be
# searched for source files.
# exclude_dirs = []
# A list of glob-style patterns that should be excluded when looking
# for source files.
exclude_patterns = ['modules']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'YAML+Jinja'
# Substitutions, variables, entities, & shortcuts for text which do not need to link to anything.
# For titles which should be a link, use the intersphinx anchors set at the index, chapter, and section levels, such as qi_start_:
rst_epilog = """
.. |acapi| replace:: *Ansible Core API Guide*
.. |acrn| replace:: *Ansible Core Release Notes*
.. |ac| replace:: Ansible Core
.. |acversion| replace:: Ansible Core Version 2.1
.. |acversionshort| replace:: Ansible Core 2.1
.. |versionshortest| replace:: 2.2
.. |versiondev| replace:: 2.3
.. |pubdate| replace:: July 19, 2016
.. |rhel| replace:: Red Hat Enterprise Linux
"""
# Options for HTML output
# -----------------------
html_theme_path = ['../_themes']
html_theme = 'srtd'
html_short_title = 'Ansible Documentation'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
# html_style = 'solar.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Ansible Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Poseidodoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
('index', 'ansible.tex', 'Ansible 2.2 Documentation', AUTHOR, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
autoclass_content = 'both'
intersphinx_mapping = {'python': ('https://docs.python.org/2', (None, '../python2-2.7.13.inv')),
'python3': ('https://docs.python.org/3', (None, '../python3-3.6.1.inv')),
'jinja2': ('http://jinja.pocoo.org/docs', (None, 'jinja2-2.9.6.inv'))}
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.