repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
repotvsupertuga/repo | refs/heads/master | instal/script.module.stream.tvsupertuga.addon/resources/lib/sources/en/rlsmovies.py | 8 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['rls-movies.com']
self.base_link = 'http://www.rls-movies.com'
self.search_link = '/search/%s/feed/rss2/'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
posts = client.parseDOM(r, 'item')
hostDict = hostprDict + hostDict
items = []
for post in posts:
try:
t = client.parseDOM(post, 'title')[0]
u = client.parseDOM(post, 'enclosure', ret='url', attrs={'type': 'video.+?'})
s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GiB|MiB))', post)
s = s[0] if s else '0'
items += [(t, i, s) for i in u]
except:
pass
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if not y == hdlr: raise Exception()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
if any(i in ['extras'] for i in fmt): raise Exception()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt: info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GiB|MiB))', item[2])[-1]
div = 1 if size.endswith('GiB') else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
info = ' | '.join(info)
url = item[1]
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check: sources = check
return sources
except:
return sources
def resolve(self, url):
return url
|
Tithen-Firion/youtube-dl | refs/heads/master | youtube_dl/extractor/dispeak.py | 48 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_duration,
remove_end,
xpath_element,
xpath_text,
)
class DigitallySpeakingIE(InfoExtractor):
_VALID_URL = r'https?://(?:evt\.dispeak|events\.digitallyspeaking)\.com/(?:[^/]+/)+xml/(?P<id>[^.]+)\.xml'
_TESTS = [{
# From http://gdcvault.com/play/1023460/Tenacious-Design-and-The-Interface
'url': 'http://evt.dispeak.com/ubm/gdc/sf16/xml/840376_BQRC.xml',
'md5': 'a8efb6c31ed06ca8739294960b2dbabd',
'info_dict': {
'id': '840376_BQRC',
'ext': 'mp4',
'title': 'Tenacious Design and The Interface of \'Destiny\'',
},
}, {
# From http://www.gdcvault.com/play/1014631/Classic-Game-Postmortem-PAC
'url': 'http://events.digitallyspeaking.com/gdc/sf11/xml/12396_1299111843500GMPX.xml',
'only_matching': True,
}]
def _parse_mp4(self, metadata):
video_formats = []
video_root = None
mp4_video = xpath_text(metadata, './mp4video', default=None)
if mp4_video is not None:
mobj = re.match(r'(?P<root>https?://.*?/).*', mp4_video)
video_root = mobj.group('root')
if video_root is None:
http_host = xpath_text(metadata, 'httpHost', default=None)
if http_host:
video_root = 'http://%s/' % http_host
if video_root is None:
# Hard-coded in http://evt.dispeak.com/ubm/gdc/sf16/custom/player2.js
# Works for GPUTechConf, too
video_root = 'http://s3-2u.digitallyspeaking.com/'
formats = metadata.findall('./MBRVideos/MBRVideo')
if not formats:
return None
for a_format in formats:
stream_name = xpath_text(a_format, 'streamName', fatal=True)
video_path = re.match(r'mp4\:(?P<path>.*)', stream_name).group('path')
url = video_root + video_path
vbr = xpath_text(a_format, 'bitrate')
video_formats.append({
'url': url,
'vbr': int_or_none(vbr),
})
return video_formats
def _parse_flv(self, metadata):
formats = []
akamai_url = xpath_text(metadata, './akamaiHost', fatal=True)
audios = metadata.findall('./audios/audio')
for audio in audios:
formats.append({
'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url,
'play_path': remove_end(audio.get('url'), '.flv'),
'ext': 'flv',
'vcodec': 'none',
'format_id': audio.get('code'),
})
slide_video_path = xpath_text(metadata, './slideVideo', fatal=True)
formats.append({
'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url,
'play_path': remove_end(slide_video_path, '.flv'),
'ext': 'flv',
'format_note': 'slide deck video',
'quality': -2,
'preference': -2,
'format_id': 'slides',
})
speaker_video_path = xpath_text(metadata, './speakerVideo', fatal=True)
formats.append({
'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url,
'play_path': remove_end(speaker_video_path, '.flv'),
'ext': 'flv',
'format_note': 'speaker video',
'quality': -1,
'preference': -1,
'format_id': 'speaker',
})
return formats
def _real_extract(self, url):
video_id = self._match_id(url)
xml_description = self._download_xml(url, video_id)
metadata = xpath_element(xml_description, 'metadata')
video_formats = self._parse_mp4(metadata)
if video_formats is None:
video_formats = self._parse_flv(metadata)
return {
'id': video_id,
'formats': video_formats,
'title': xpath_text(metadata, 'title', fatal=True),
'duration': parse_duration(xpath_text(metadata, 'endTime')),
'creator': xpath_text(metadata, 'speaker'),
}
|
volatilityfoundation/volatility | refs/heads/master | volatility/plugins/mac/ip_filters.py | 9 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: [email protected]
@organization:
"""
import volatility.obj as obj
import volatility.plugins.mac.common as common
import volatility.plugins.mac.lsmod as lsmod
from volatility.renderers import TreeGrid
from volatility.renderers.basic import Address
class mac_ip_filters(lsmod.mac_lsmod):
""" Reports any hooked IP filters """
def check_filter(self, context, fname, ptr, kernel_symbol_addresses, kmods):
if ptr == None:
return
# change the last paramter to 1 to get messages about which good modules hooks were found in
good = common.is_known_address_name(ptr, kernel_symbol_addresses, kmods)
return (good, context, fname, ptr)
def calculate(self):
common.set_plugin_members(self)
# get the symbols need to check for if rootkit or not
(kernel_symbol_addresses, kmods) = common.get_kernel_addrs(self)
list_addrs = [self.addr_space.profile.get_symbol("_ipv4_filters"), self.addr_space.profile.get_symbol("_ipv6_filters")]
for list_addr in list_addrs:
plist = obj.Object("ipfilter_list", offset = list_addr, vm = self.addr_space)
# type 'ipfilter'
cur = plist.tqh_first
while cur:
filter = cur.ipf_filter
name = filter.name.dereference()
yield self.check_filter("INPUT", name, filter.ipf_input, kernel_symbol_addresses, kmods)
yield self.check_filter("OUTPUT", name, filter.ipf_output, kernel_symbol_addresses, kmods)
yield self.check_filter("DETACH", name, filter.ipf_detach, kernel_symbol_addresses, kmods)
cur = cur.ipf_link.tqe_next
def unified_output(self, data):
return TreeGrid([("Context", str),
("Filter", str),
("Pointer", Address),
("Status", str)
], self.generator(data))
def generator(self, data):
for (good, context, fname, ptr) in data:
status = "OK"
if good == 0:
status = "UNKNOWN"
yield (0,[
str(context),
str(fname),
Address(ptr),
str(status),
])
def render_text(self, outfd, data):
self.table_header(outfd, [("Context", "10"),
("Filter", "16"),
("Pointer", "[addrpad]"),
("Status", "")])
for (good, context, fname, ptr) in data:
status = "OK"
if good == 0:
status = "UNKNOWN"
self.table_row(outfd, context, fname, ptr, status)
|
piyushroshan/xen-4.3.2 | refs/heads/master | tools/python/xen/util/ip.py | 51 | import os
import re
import socket
import struct
import errno
##### Networking-related functions
def get_defaultroute():
fd = os.popen('/sbin/ip route list 2>/dev/null')
for line in fd.readlines():
m = re.search('^default via ([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+) dev ([^ ]*)',
line)
if m:
return [m.group(1), m.group(2)]
return [None, None]
def get_current_ipaddr(dev='defaultroute'):
"""Get the primary IP address for the given network interface.
dev network interface (default: default route device)
returns interface address as a string
"""
if dev == 'defaultroute':
dev = get_defaultroute()[1]
if not dev:
return
fd = os.popen( '/sbin/ifconfig ' + dev + ' 2>/dev/null' )
for line in fd.readlines():
m = re.search( '^\s+inet addr:([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+).*',
line )
if m:
return m.group(1)
return None
def get_current_ipmask(dev='defaultroute'):
"""Get the primary IP netmask for a network interface.
dev network interface (default: default route device)
returns interface netmask as a string
"""
if dev == 'defaultroute':
dev = get_defaultroute()[1]
if not dev:
return
fd = os.popen( '/sbin/ifconfig ' + dev + ' 2>/dev/null' )
for line in fd.readlines():
m = re.search( '^.+Mask:([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+).*',
line )
if m:
return m.group(1)
return None
def get_current_ipgw(dev='defaultroute'):
"""Get the IP gateway for a network interface.
dev network interface (default: default route device)
returns gateway address as a string
"""
if dev == 'defaultroute':
return get_defaultroute()[0]
if not dev:
return
fd = os.popen( '/sbin/route -n' )
for line in fd.readlines():
m = re.search( '^\S+\s+([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)' +
'\s+\S+\s+\S*G.*' + dev + '.*', line )
if m:
return m.group(1)
return None
def inet_aton(addr):
"""Convert an IP addr in IPv4 dot notation into an int.
addr IP address as a string
returns integer
"""
b = socket.inet_aton(addr)
return struct.unpack('!I', b)[0]
def inet_ntoa(n):
"""Convert an int into an IP addr in IPv4 dot notation.
n IP address
returns string
"""
b = struct.pack('!I', n)
return socket.inet_ntoa(b)
def add_offset_to_ip(addr, offset):
"""Add a numerical offset to an IP addr in IPv4 dot notation.
addr IP address
offset offset to add
returns new address
"""
n = inet_aton(addr)
n += offset
return inet_ntoa(n)
def check_subnet( ip, network, netmask ):
"""Check if an IP address is in the subnet defined by
a network address and mask'.
ip IP adress
network network address
netmask network mask
returns 1 if it is in the subnet, 0 if not
"""
n_ip = inet_aton(ip)
n_net = inet_aton(network)
n_mask = inet_aton(netmask)
return (n_ip & n_mask) == (n_net & n_mask)
|
gento/dionaea | refs/heads/master | modules/python/scripts/smb/include/helpers.py | 9 | #********************************************************************************
#* Dionaea
#* - catches bugs -
#*
#*
#*
#* Copyright (C) 2010 Markus Koetter
#* Copyright (C) 2009 Paul Baecher & Markus Koetter & Mark Schloesser
#*
#* This program is free software; you can redistribute it and/or
#* modify it under the terms of the GNU General Public License
#* as published by the Free Software Foundation; either version 2
#* of the License, or (at your option) any later version.
#*
#* This program is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#* GNU General Public License for more details.
#*
#* You should have received a copy of the GNU General Public License
#* along with this program; if not, write to the Free Software
#* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#*
#*
#* contact [email protected]
#*
#*******************************************************************************/
#* This file was part of Scapy
#* See http://www.secdev.org/projects/scapy for more informations
#* Copyright (C) Philippe Biondi <[email protected]>
#* This program is published under a GPLv2 license
#*******************************************************************************
import re
class VolatileValue:
def __repr__(self):
return "<%s>" % self.__class__.__name__
def __getattr__(self, attr):
if attr == "__setstate__":
raise AttributeError(attr)
return getattr(self._fix(),attr)
def _fix(self):
return None
class Gen(object):
def __iter__(self):
return iter([])
class Net(Gen):
"""Generate a list of IPs from a network address or a name"""
name = "ip"
ipaddress = re.compile(r"^(\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\.(\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\.(\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\.(\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)(/[0-3]?[0-9])?$")
def __init__(self, net):
self.repr=net
tmp=net.split('/')+["32"]
if not self.ipaddress.match(net):
tmp[0]=socket.gethostbyname(tmp[0])
netmask = int(tmp[1])
def parse_digit(a,netmask):
netmask = min(8,max(netmask,0))
if a == "*":
a = (0,256)
elif a.find("-") >= 0:
x,y = list(map(int,a.split("-")))
if x > y:
y = x
a = (x & (0xff<<netmask) , max(y, (x | (0xff>>(8-netmask))))+1)
else:
a = (int(a) & (0xff<<netmask),(int(a) | (0xff>>(8-netmask)))+1)
return a
self.parsed = list(map(lambda x,y: parse_digit(x,y), tmp[0].split("."), list(map(lambda x,nm=netmask: x-nm, (8,16,24,32)))))
def __iter__(self):
for d in range(*self.parsed[3]):
for c in range(*self.parsed[2]):
for b in range(*self.parsed[1]):
for a in range(*self.parsed[0]):
yield "%i.%i.%i.%i" % (a,b,c,d)
def choice(self):
ip = []
for v in self.parsed:
ip.append(str(random.randint(v[0],v[1]-1)))
return ".".join(ip)
def __repr__(self):
return "Net(%r)" % self.repr
class SetGen(Gen):
def __init__(self, set, _iterpacket=1):
self._iterpacket=_iterpacket
if type(set) is list:
self.set = set
elif isinstance(set, BasePacketList):
self.set = list(set)
else:
self.set = [set]
def transf(self, element):
return element
def __iter__(self):
for i in self.set:
if (type(i) is tuple) and (len(i) == 2) and type(i[0]) is int and type(i[1]) is int:
if (i[0] <= i[1]):
j=i[0]
while j <= i[1]:
yield j
j += 1
elif isinstance(i, Gen) and (self._iterpacket or not isinstance(i,BasePacket)):
for j in i:
yield j
else:
yield i
def __repr__(self):
return "<SetGen %s>" % self.set.__repr__()
class BasePacket(Gen):
pass
class BasePacketList:
pass
def lhex(x):
if type(x) in (int,int):
return hex(x)
elif type(x) is tuple:
return "(%s)" % ", ".join(map(lhex, x))
elif type(x) is list:
return "[%s]" % ", ".join(map(lhex, x))
else:
return x
#########################
#### Enum management ####
#########################
class EnumElement:
_value=None
def __init__(self, key, value):
self._key = key
self._value = value
def __repr__(self):
return "<%s %s[%r]>" % (self.__dict__.get("_name", self.__class__.__name__), self._key, self._value)
def __getattr__(self, attr):
return getattr(self._value, attr)
def __int__(self):
return self._value
def __str__(self):
return self._key
def __eq__(self, other):
return self._value == int(other)
class Enum_metaclass(type):
element_class = EnumElement
def __new__(cls, name, bases, dct):
rdict={}
for k,v in dct.items():
if type(v) is int:
v = cls.element_class(k,v)
dct[k] = v
rdict[type(v)] = k
dct["__rdict__"] = rdict
return super(Enum_metaclass, cls).__new__(cls, name, bases, dct)
def __getitem__(self, attr):
return self.__rdict__[attr]
def __contains__(self, val):
return val in self.__rdict__
def get(self, attr, val=None):
return self._rdict__.get(attr, val)
def __repr__(self):
return "<%s>" % self.__dict__.get("name", self.__name__)
|
nkgilley/home-assistant | refs/heads/dev | tests/components/asuswrt/test_sensor.py | 11 | """The tests for the AsusWrt sensor platform."""
from datetime import timedelta
from aioasuswrt.asuswrt import Device
from homeassistant.components import sensor
from homeassistant.components.asuswrt import (
CONF_DNSMASQ,
CONF_INTERFACE,
CONF_MODE,
CONF_PORT,
CONF_PROTOCOL,
CONF_SENSORS,
DOMAIN,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from tests.async_mock import AsyncMock, patch
from tests.common import async_fire_time_changed
VALID_CONFIG_ROUTER_SSH = {
DOMAIN: {
CONF_DNSMASQ: "/",
CONF_HOST: "fake_host",
CONF_INTERFACE: "eth0",
CONF_MODE: "router",
CONF_PORT: "22",
CONF_PROTOCOL: "ssh",
CONF_USERNAME: "fake_user",
CONF_PASSWORD: "fake_pass",
CONF_SENSORS: [
"devices",
"download_speed",
"download",
"upload_speed",
"upload",
],
}
}
MOCK_DEVICES = {
"a1:b1:c1:d1:e1:f1": Device("a1:b1:c1:d1:e1:f1", "192.168.1.2", "Test"),
"a2:b2:c2:d2:e2:f2": Device("a2:b2:c2:d2:e2:f2", "192.168.1.3", "TestTwo"),
"a3:b3:c3:d3:e3:f3": Device("a3:b3:c3:d3:e3:f3", "192.168.1.4", "TestThree"),
}
MOCK_BYTES_TOTAL = [60000000000, 50000000000]
MOCK_CURRENT_TRANSFER_RATES = [20000000, 10000000]
async def test_sensors(hass: HomeAssistant, mock_device_tracker_conf):
"""Test creating an AsusWRT sensor."""
with patch("homeassistant.components.asuswrt.AsusWrt") as AsusWrt:
AsusWrt().connection.async_connect = AsyncMock()
AsusWrt().async_get_connected_devices = AsyncMock(return_value=MOCK_DEVICES)
AsusWrt().async_get_bytes_total = AsyncMock(return_value=MOCK_BYTES_TOTAL)
AsusWrt().async_get_current_transfer_rates = AsyncMock(
return_value=MOCK_CURRENT_TRANSFER_RATES
)
assert await async_setup_component(hass, DOMAIN, VALID_CONFIG_ROUTER_SSH)
await hass.async_block_till_done()
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert (
hass.states.get(f"{sensor.DOMAIN}.asuswrt_devices_connected").state == "3"
)
assert (
hass.states.get(f"{sensor.DOMAIN}.asuswrt_download_speed").state == "160.0"
)
assert hass.states.get(f"{sensor.DOMAIN}.asuswrt_download").state == "60.0"
assert hass.states.get(f"{sensor.DOMAIN}.asuswrt_upload_speed").state == "80.0"
assert hass.states.get(f"{sensor.DOMAIN}.asuswrt_upload").state == "50.0"
|
jbkkd/django-taggit | refs/heads/master | tests/tests.py | 1 | from __future__ import absolute_import, unicode_literals
from unittest import TestCase as UnitTestCase
import django
from django.contrib.contenttypes.models import ContentType
from django.core import serializers
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.db import connection
from django.test import TestCase, TransactionTestCase
from django.utils.encoding import force_text
from .forms import CustomPKFoodForm, DirectFoodForm, FoodForm, OfficialFoodForm
from .models import (Article, Child, CustomManager, CustomPKFood,
CustomPKHousePet, CustomPKPet, DirectFood,
DirectHousePet, DirectPet, Food, HousePet, Movie,
OfficialFood, OfficialHousePet, OfficialPet,
OfficialTag, OfficialThroughModel, Pet, Photo,
TaggedCustomPKFood, TaggedCustomPKPet, TaggedFood,
TaggedPet)
from taggit.managers import _model_name, _TaggableManager, TaggableManager
from taggit.models import Tag, TaggedItem
from taggit.utils import edit_string_for_tags, parse_tags
try:
from unittest import skipIf, skipUnless
except ImportError:
from django.utils.unittest import skipIf, skipUnless
class BaseTaggingTest(object):
def assert_tags_equal(self, qs, tags, sort=True, attr="name"):
got = [getattr(obj, attr) for obj in qs]
if sort:
got.sort()
tags.sort()
self.assertEqual(got, tags)
def _get_form_str(self, form_str):
if django.VERSION >= (1, 3):
form_str %= {
"help_start": '<span class="helptext">',
"help_stop": "</span>"
}
else:
form_str %= {
"help_start": "",
"help_stop": ""
}
return form_str
def assert_form_renders(self, form, html):
self.assertHTMLEqual(str(form), self._get_form_str(html))
class BaseTaggingTestCase(TestCase, BaseTaggingTest):
pass
class BaseTaggingTransactionTestCase(TransactionTestCase, BaseTaggingTest):
pass
class TagModelTestCase(BaseTaggingTransactionTestCase):
food_model = Food
tag_model = Tag
def test_unique_slug(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add("Red", "red")
def test_update(self):
special = self.tag_model.objects.create(name="special")
special.save()
def test_add(self):
apple = self.food_model.objects.create(name="apple")
yummy = self.tag_model.objects.create(name="yummy")
apple.tags.add(yummy)
def test_slugify(self):
a = Article.objects.create(title="django-taggit 1.0 Released")
a.tags.add("awesome", "release", "AWESOME")
self.assert_tags_equal(a.tags.all(), [
"category-awesome",
"category-release",
"category-awesome-1"
], attr="slug")
def test_integers(self):
"""Adding an integer as a tag should raise a ValueError (#237)."""
apple = self.food_model.objects.create(name="apple")
with self.assertRaisesRegexp(ValueError, (
r"Cannot add 1 \(<(type|class) 'int'>\). "
r"Expected <class 'django.db.models.base.ModelBase'> or str.")):
apple.tags.add(1)
class TagModelDirectTestCase(TagModelTestCase):
food_model = DirectFood
tag_model = Tag
class TagModelCustomPKTestCase(TagModelTestCase):
food_model = CustomPKFood
tag_model = Tag
class TagModelOfficialTestCase(TagModelTestCase):
food_model = OfficialFood
tag_model = OfficialTag
class TaggableManagerTestCase(BaseTaggingTestCase):
food_model = Food
pet_model = Pet
housepet_model = HousePet
taggeditem_model = TaggedItem
tag_model = Tag
def test_add_tag(self):
apple = self.food_model.objects.create(name="apple")
self.assertEqual(list(apple.tags.all()), [])
self.assertEqual(list(self.food_model.tags.all()), [])
apple.tags.add('green')
self.assert_tags_equal(apple.tags.all(), ['green'])
self.assert_tags_equal(self.food_model.tags.all(), ['green'])
pear = self.food_model.objects.create(name="pear")
pear.tags.add('green')
self.assert_tags_equal(pear.tags.all(), ['green'])
self.assert_tags_equal(self.food_model.tags.all(), ['green'])
apple.tags.add('red')
self.assert_tags_equal(apple.tags.all(), ['green', 'red'])
self.assert_tags_equal(self.food_model.tags.all(), ['green', 'red'])
self.assert_tags_equal(
self.food_model.tags.most_common(),
['green', 'red'],
sort=False
)
apple.tags.remove('green')
self.assert_tags_equal(apple.tags.all(), ['red'])
self.assert_tags_equal(self.food_model.tags.all(), ['green', 'red'])
tag = self.tag_model.objects.create(name="delicious")
apple.tags.add(tag)
self.assert_tags_equal(apple.tags.all(), ["red", "delicious"])
apple.delete()
self.assert_tags_equal(self.food_model.tags.all(), ["green"])
def test_add_queries(self):
# Prefill content type cache:
ContentType.objects.get_for_model(self.food_model)
apple = self.food_model.objects.create(name="apple")
# 1 query to see which tags exist
# + 3 queries to create the tags.
# + 6 queries to create the intermediary things (including SELECTs, to
# make sure we don't double create.
# + 12 on Django 1.6 for save points.
queries = 22
if django.VERSION < (1, 6):
queries -= 12
self.assertNumQueries(queries, apple.tags.add, "red", "delicious", "green")
pear = self.food_model.objects.create(name="pear")
# 1 query to see which tags exist
# + 4 queries to create the intermeidary things (including SELECTs, to
# make sure we dont't double create.
# + 4 on Django 1.6 for save points.
queries = 9
if django.VERSION < (1, 6):
queries -= 4
self.assertNumQueries(queries, pear.tags.add, "green", "delicious")
self.assertNumQueries(0, pear.tags.add)
def test_require_pk(self):
food_instance = self.food_model()
self.assertRaises(ValueError, lambda: food_instance.tags.all())
def test_delete_obj(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add("red")
self.assert_tags_equal(apple.tags.all(), ["red"])
strawberry = self.food_model.objects.create(name="strawberry")
strawberry.tags.add("red")
apple.delete()
self.assert_tags_equal(strawberry.tags.all(), ["red"])
def test_delete_bulk(self):
apple = self.food_model.objects.create(name="apple")
kitty = self.pet_model.objects.create(pk=apple.pk, name="kitty")
apple.tags.add("red", "delicious", "fruit")
kitty.tags.add("feline")
self.food_model.objects.all().delete()
self.assert_tags_equal(kitty.tags.all(), ["feline"])
def test_lookup_by_tag(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add("red", "green")
pear = self.food_model.objects.create(name="pear")
pear.tags.add("green")
self.assertEqual(
list(self.food_model.objects.filter(tags__name__in=["red"])),
[apple]
)
self.assertEqual(
list(self.food_model.objects.filter(tags__name__in=["green"])),
[apple, pear]
)
kitty = self.pet_model.objects.create(name="kitty")
kitty.tags.add("fuzzy", "red")
dog = self.pet_model.objects.create(name="dog")
dog.tags.add("woof", "red")
self.assertEqual(
list(self.food_model.objects.filter(tags__name__in=["red"]).distinct()),
[apple]
)
tag = self.tag_model.objects.get(name="woof")
self.assertEqual(list(self.pet_model.objects.filter(tags__in=[tag])), [dog])
cat = self.housepet_model.objects.create(name="cat", trained=True)
cat.tags.add("fuzzy")
pks = self.pet_model.objects.filter(tags__name__in=["fuzzy"])
model_name = self.pet_model.__name__
self.assertQuerysetEqual(pks,
['<{0}: kitty>'.format(model_name),
'<{0}: cat>'.format(model_name)],
ordered=False)
def test_lookup_bulk(self):
apple = self.food_model.objects.create(name="apple")
pear = self.food_model.objects.create(name="pear")
apple.tags.add('fruit', 'green')
pear.tags.add('fruit', 'yummie')
def lookup_qs():
# New fix: directly allow WHERE object_id IN (SELECT id FROM ..)
objects = self.food_model.objects.all()
lookup = self.taggeditem_model.bulk_lookup_kwargs(objects)
list(self.taggeditem_model.objects.filter(**lookup))
def lookup_list():
# Simulate old situation: iterate over a list.
objects = list(self.food_model.objects.all())
lookup = self.taggeditem_model.bulk_lookup_kwargs(objects)
list(self.taggeditem_model.objects.filter(**lookup))
self.assertNumQueries(1, lookup_qs)
self.assertNumQueries(2, lookup_list)
def test_exclude(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add("red", "green", "delicious")
pear = self.food_model.objects.create(name="pear")
pear.tags.add("green", "delicious")
self.food_model.objects.create(name="guava")
pks = self.food_model.objects.exclude(tags__name__in=["red"])
model_name = self.food_model.__name__
self.assertQuerysetEqual(pks,
['<{0}: pear>'.format(model_name),
'<{0}: guava>'.format(model_name)],
ordered=False)
def test_similarity_by_tag(self):
"""Test that pears are more similar to apples than watermelons"""
apple = self.food_model.objects.create(name="apple")
apple.tags.add("green", "juicy", "small", "sour")
pear = self.food_model.objects.create(name="pear")
pear.tags.add("green", "juicy", "small", "sweet")
watermelon = self.food_model.objects.create(name="watermelon")
watermelon.tags.add("green", "juicy", "large", "sweet")
similar_objs = apple.tags.similar_objects()
self.assertEqual(similar_objs, [pear, watermelon])
self.assertEqual([obj.similar_tags for obj in similar_objs],
[3, 2])
def test_tag_reuse(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add("juicy", "juicy")
self.assert_tags_equal(apple.tags.all(), ['juicy'])
def test_query_traverse(self):
spot = self.pet_model.objects.create(name='Spot')
spike = self.pet_model.objects.create(name='Spike')
spot.tags.add('scary')
spike.tags.add('fluffy')
lookup_kwargs = {
'%s__name' % _model_name(self.pet_model): 'Spot'
}
self.assert_tags_equal(
self.tag_model.objects.filter(**lookup_kwargs),
['scary']
)
def test_taggeditem_unicode(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add("juicy")
self.assertEqual(
force_text(self.taggeditem_model.objects.all()[0]),
"apple tagged with juicy"
)
def test_abstract_subclasses(self):
p = Photo.objects.create()
p.tags.add("outdoors", "pretty")
self.assert_tags_equal(
p.tags.all(),
["outdoors", "pretty"]
)
m = Movie.objects.create()
m.tags.add("hd")
self.assert_tags_equal(
m.tags.all(),
["hd"],
)
def test_field_api(self):
# Check if tag field, which simulates m2m, has django-like api.
field = self.food_model._meta.get_field('tags')
self.assertTrue(hasattr(field, 'rel'))
self.assertTrue(hasattr(field.rel, 'to'))
self.assertTrue(hasattr(field, 'related'))
# This API has changed in Django 1.8
# https://code.djangoproject.com/ticket/21414
if django.VERSION >= (1, 8):
self.assertEqual(self.food_model, field.model)
self.assertEqual(self.tag_model, field.related.model)
else:
self.assertEqual(self.food_model, field.related.model)
def test_names_method(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add('green')
apple.tags.add('red')
self.assertEqual(list(apple.tags.names()), ['green', 'red'])
def test_slugs_method(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add('green and juicy')
apple.tags.add('red')
self.assertEqual(list(apple.tags.slugs()), ['green-and-juicy', 'red'])
def test_serializes(self):
apple = self.food_model.objects.create(name="apple")
serializers.serialize("json", (apple,))
def test_prefetch_related(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add('1', '2')
orange = self.food_model.objects.create(name="orange")
orange.tags.add('2', '4')
with self.assertNumQueries(2):
l = list(self.food_model.objects.prefetch_related('tags').all())
with self.assertNumQueries(0):
foods = dict((f.name, set(t.name for t in f.tags.all())) for f in l)
self.assertEqual(foods, {
'orange': set(['2', '4']),
'apple': set(['1', '2'])
})
def test_internal_type_is_manytomany(self):
self.assertEqual(
TaggableManager().get_internal_type(), 'ManyToManyField'
)
def test_prefetch_no_extra_join(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add('1', '2')
with self.assertNumQueries(2):
l = list(self.food_model.objects.prefetch_related('tags').all())
join_clause = 'INNER JOIN "%s"' % self.taggeditem_model._meta.db_table
self.assertEqual(connection.queries[-1]['sql'].count(join_clause), 1, connection.queries[-2:])
class TaggableManagerDirectTestCase(TaggableManagerTestCase):
food_model = DirectFood
pet_model = DirectPet
housepet_model = DirectHousePet
taggeditem_model = TaggedFood
class TaggableManagerCustomPKTestCase(TaggableManagerTestCase):
food_model = CustomPKFood
pet_model = CustomPKPet
housepet_model = CustomPKHousePet
taggeditem_model = TaggedCustomPKFood
def test_require_pk(self):
# TODO with a charfield pk, pk is never None, so taggit has no way to
# tell if the instance is saved or not
pass
class TaggableManagerOfficialTestCase(TaggableManagerTestCase):
food_model = OfficialFood
pet_model = OfficialPet
housepet_model = OfficialHousePet
taggeditem_model = OfficialThroughModel
tag_model = OfficialTag
def test_extra_fields(self):
self.tag_model.objects.create(name="red")
self.tag_model.objects.create(name="delicious", official=True)
apple = self.food_model.objects.create(name="apple")
apple.tags.add("delicious", "red")
pear = self.food_model.objects.create(name="Pear")
pear.tags.add("delicious")
self.assertEqual(apple, self.food_model.objects.get(tags__official=False))
class TaggableManagerInitializationTestCase(TaggableManagerTestCase):
"""Make sure manager override defaults and sets correctly."""
food_model = Food
custom_manager_model = CustomManager
def test_default_manager(self):
self.assertEqual(self.food_model.tags.__class__, _TaggableManager)
def test_custom_manager(self):
self.assertEqual(self.custom_manager_model.tags.__class__, CustomManager.Foo)
class TaggableFormTestCase(BaseTaggingTestCase):
form_class = FoodForm
food_model = Food
def test_form(self):
self.assertEqual(list(self.form_class.base_fields), ['name', 'tags'])
f = self.form_class({'name': 'apple', 'tags': 'green, red, yummy'})
self.assert_form_renders(f, """<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" value="apple" maxlength="50" /></td></tr>
<tr><th><label for="id_tags">Tags:</label></th><td><input type="text" name="tags" value="green, red, yummy" id="id_tags" /><br />%(help_start)sA comma-separated list of tags.%(help_stop)s</td></tr>""")
f.save()
apple = self.food_model.objects.get(name='apple')
self.assert_tags_equal(apple.tags.all(), ['green', 'red', 'yummy'])
f = self.form_class({'name': 'apple', 'tags': 'green, red, yummy, delicious'}, instance=apple)
f.save()
apple = self.food_model.objects.get(name='apple')
self.assert_tags_equal(apple.tags.all(), ['green', 'red', 'yummy', 'delicious'])
self.assertEqual(self.food_model.objects.count(), 1)
f = self.form_class({"name": "raspberry"})
self.assertFalse(f.is_valid())
f = self.form_class(instance=apple)
self.assert_form_renders(f, """<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" value="apple" maxlength="50" /></td></tr>
<tr><th><label for="id_tags">Tags:</label></th><td><input type="text" name="tags" value="delicious, green, red, yummy" id="id_tags" /><br />%(help_start)sA comma-separated list of tags.%(help_stop)s</td></tr>""")
apple.tags.add('has,comma')
f = self.form_class(instance=apple)
self.assert_form_renders(f, """<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" value="apple" maxlength="50" /></td></tr>
<tr><th><label for="id_tags">Tags:</label></th><td><input type="text" name="tags" value=""has,comma", delicious, green, red, yummy" id="id_tags" /><br />%(help_start)sA comma-separated list of tags.%(help_stop)s</td></tr>""")
apple.tags.add('has space')
f = self.form_class(instance=apple)
self.assert_form_renders(f, """<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" value="apple" maxlength="50" /></td></tr>
<tr><th><label for="id_tags">Tags:</label></th><td><input type="text" name="tags" value=""has space", "has,comma", delicious, green, red, yummy" id="id_tags" /><br />%(help_start)sA comma-separated list of tags.%(help_stop)s</td></tr>""")
def test_formfield(self):
tm = TaggableManager(verbose_name='categories', help_text='Add some categories', blank=True)
ff = tm.formfield()
self.assertEqual(ff.label, 'Categories')
self.assertEqual(ff.help_text, 'Add some categories')
self.assertEqual(ff.required, False)
self.assertEqual(ff.clean(""), [])
tm = TaggableManager()
ff = tm.formfield()
self.assertRaises(ValidationError, ff.clean, "")
class TaggableFormDirectTestCase(TaggableFormTestCase):
form_class = DirectFoodForm
food_model = DirectFood
class TaggableFormCustomPKTestCase(TaggableFormTestCase):
form_class = CustomPKFoodForm
food_model = CustomPKFood
class TaggableFormOfficialTestCase(TaggableFormTestCase):
form_class = OfficialFoodForm
food_model = OfficialFood
class TagStringParseTestCase(UnitTestCase):
"""
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
"""
def test_with_simple_space_delimited_tags(self):
"""
Test with simple space-delimited tags.
"""
self.assertEqual(parse_tags('one'), ['one'])
self.assertEqual(parse_tags('one two'), ['one', 'two'])
self.assertEqual(parse_tags('one two three'), ['one', 'three', 'two'])
self.assertEqual(parse_tags('one one two two'), ['one', 'two'])
def test_with_comma_delimited_multiple_words(self):
"""
Test with comma-delimited multiple words.
An unquoted comma in the input will trigger this.
"""
self.assertEqual(parse_tags(',one'), ['one'])
self.assertEqual(parse_tags(',one two'), ['one two'])
self.assertEqual(parse_tags(',one two three'), ['one two three'])
self.assertEqual(parse_tags('a-one, a-two and a-three'),
['a-one', 'a-two and a-three'])
def test_with_double_quoted_multiple_words(self):
"""
Test with double-quoted multiple words.
A completed quote will trigger this. Unclosed quotes are ignored.
"""
self.assertEqual(parse_tags('"one'), ['one'])
self.assertEqual(parse_tags('"one two'), ['one', 'two'])
self.assertEqual(parse_tags('"one two three'), ['one', 'three', 'two'])
self.assertEqual(parse_tags('"one two"'), ['one two'])
self.assertEqual(parse_tags('a-one "a-two and a-three"'),
['a-one', 'a-two and a-three'])
def test_with_no_loose_commas(self):
"""
Test with no loose commas -- split on spaces.
"""
self.assertEqual(parse_tags('one two "thr,ee"'), ['one', 'thr,ee', 'two'])
def test_with_loose_commas(self):
"""
Loose commas - split on commas
"""
self.assertEqual(parse_tags('"one", two three'), ['one', 'two three'])
def test_tags_with_double_quotes_can_contain_commas(self):
"""
Double quotes can contain commas
"""
self.assertEqual(parse_tags('a-one "a-two, and a-three"'),
['a-one', 'a-two, and a-three'])
self.assertEqual(parse_tags('"two", one, one, two, "one"'),
['one', 'two'])
def test_with_naughty_input(self):
"""
Test with naughty input.
"""
# Bad users! Naughty users!
self.assertEqual(parse_tags(None), [])
self.assertEqual(parse_tags(''), [])
self.assertEqual(parse_tags('"'), [])
self.assertEqual(parse_tags('""'), [])
self.assertEqual(parse_tags('"' * 7), [])
self.assertEqual(parse_tags(',,,,,,'), [])
self.assertEqual(parse_tags('",",",",",",","'), [','])
self.assertEqual(parse_tags('a-one "a-two" and "a-three'),
['a-one', 'a-three', 'a-two', 'and'])
def test_recreation_of_tag_list_string_representations(self):
plain = Tag.objects.create(name='plain')
spaces = Tag.objects.create(name='spa ces')
comma = Tag.objects.create(name='com,ma')
self.assertEqual(edit_string_for_tags([plain]), 'plain')
self.assertEqual(edit_string_for_tags([plain, spaces]), '"spa ces", plain')
self.assertEqual(edit_string_for_tags([plain, spaces, comma]), '"com,ma", "spa ces", plain')
self.assertEqual(edit_string_for_tags([plain, comma]), '"com,ma", plain')
self.assertEqual(edit_string_for_tags([comma, spaces]), '"com,ma", "spa ces"')
@skipIf(django.VERSION < (1, 7), "not relevant for Django < 1.7")
class DeconstructTestCase(UnitTestCase):
def test_deconstruct_kwargs_kept(self):
instance = TaggableManager(through=OfficialThroughModel, to='dummy.To')
name, path, args, kwargs = instance.deconstruct()
new_instance = TaggableManager(*args, **kwargs)
self.assertEqual('tests.OfficialThroughModel', new_instance.rel.through)
self.assertEqual('dummy.To', new_instance.rel.to)
@skipUnless(django.VERSION < (1, 7), "test only applies to 1.6 and below")
class SouthSupportTests(TestCase):
def test_import_migrations_module(self):
try:
from taggit.migrations import __doc__ # noqa
except ImproperlyConfigured as e:
exception = e
self.assertIn("SOUTH_MIGRATION_MODULES", exception.args[0])
class InheritedPrefetchTests(TestCase):
def test_inherited_tags_with_prefetch(self):
child = Child()
child.save()
child.tags.add('tag 1', 'tag 2', 'tag 3', 'tag 4')
child = Child.objects.get()
no_prefetch_tags = child.tags.all()
self.assertEquals(4, no_prefetch_tags.count())
child = Child.objects.prefetch_related('tags').get()
prefetch_tags = child.tags.all()
self.assertEquals(4, prefetch_tags.count())
self.assertEquals(set([t.name for t in no_prefetch_tags]),
set([t.name for t in prefetch_tags]))
|
cdht/GitPython | refs/heads/master | git/objects/blob.py | 20 | # blob.py
# Copyright (C) 2008, 2009 Michael Trier ([email protected]) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from mimetypes import guess_type
from . import base
__all__ = ('Blob', )
class Blob(base.IndexObject):
"""A Blob encapsulates a git blob object"""
DEFAULT_MIME_TYPE = "text/plain"
type = "blob"
# valid blob modes
executable_mode = 0o100755
file_mode = 0o100644
link_mode = 0o120000
__slots__ = tuple()
@property
def mime_type(self):
"""
:return: String describing the mime type of this file (based on the filename)
:note: Defaults to 'text/plain' in case the actual file type is unknown. """
guesses = None
if self.path:
guesses = guess_type(self.path)
return guesses and guesses[0] or self.DEFAULT_MIME_TYPE
|
NL66278/odoo | refs/heads/8.0 | openerp/addons/base/workflow/workflow.py | 74 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2014 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.exceptions import Warning
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.workflow
class workflow(osv.osv):
_name = "workflow"
_table = "wkf"
_order = "name"
_columns = {
'name': fields.char('Name', required=True),
'osv': fields.char('Resource Object', required=True,select=True),
'on_create': fields.boolean('On Create', select=True),
'activities': fields.one2many('workflow.activity', 'wkf_id', 'Activities'),
}
_defaults = {
'on_create': lambda *a: True
}
def copy(self, cr, uid, id, values, context=None):
raise Warning(_("Duplicating workflows is not possible, please create a new workflow"))
def write(self, cr, user, ids, vals, context=None):
if not context:
context={}
openerp.workflow.clear_cache(cr, user)
return super(workflow, self).write(cr, user, ids, vals, context=context)
def get_active_workitems(self, cr, uid, res, res_id, context=None):
cr.execute('select * from wkf where osv=%s limit 1',(res,))
wkfinfo = cr.dictfetchone()
workitems = []
if wkfinfo:
cr.execute('SELECT id FROM wkf_instance \
WHERE res_id=%s AND wkf_id=%s \
ORDER BY state LIMIT 1',
(res_id, wkfinfo['id']))
inst_id = cr.fetchone()
cr.execute('select act_id,count(*) from wkf_workitem where inst_id=%s group by act_id', (inst_id,))
workitems = dict(cr.fetchall())
return {'wkf': wkfinfo, 'workitems': workitems}
def create(self, cr, user, vals, context=None):
if not context:
context={}
openerp.workflow.clear_cache(cr, user)
return super(workflow, self).create(cr, user, vals, context=context)
workflow()
class wkf_activity(osv.osv):
_name = "workflow.activity"
_table = "wkf_activity"
_order = "name"
_columns = {
'name': fields.char('Name', required=True),
'wkf_id': fields.many2one('workflow', 'Workflow', required=True, select=True, ondelete='cascade'),
'split_mode': fields.selection([('XOR', 'Xor'), ('OR','Or'), ('AND','And')], 'Split Mode', size=3, required=True),
'join_mode': fields.selection([('XOR', 'Xor'), ('AND', 'And')], 'Join Mode', size=3, required=True),
'kind': fields.selection([('dummy', 'Dummy'), ('function', 'Function'), ('subflow', 'Subflow'), ('stopall', 'Stop All')], 'Kind', required=True),
'action': fields.text('Python Action'),
'action_id': fields.many2one('ir.actions.server', 'Server Action', ondelete='set null'),
'flow_start': fields.boolean('Flow Start'),
'flow_stop': fields.boolean('Flow Stop'),
'subflow_id': fields.many2one('workflow', 'Subflow'),
'signal_send': fields.char('Signal (subflow.*)'),
'out_transitions': fields.one2many('workflow.transition', 'act_from', 'Outgoing Transitions'),
'in_transitions': fields.one2many('workflow.transition', 'act_to', 'Incoming Transitions'),
}
_defaults = {
'kind': lambda *a: 'dummy',
'join_mode': lambda *a: 'XOR',
'split_mode': lambda *a: 'XOR',
}
def unlink(self, cr, uid, ids, context=None):
if context is None: context = {}
if not context.get('_force_unlink') and self.pool.get('workflow.workitem').search(cr, uid, [('act_id', 'in', ids)]):
raise osv.except_osv(_('Operation Forbidden'),
_('Please make sure no workitems refer to an activity before deleting it!'))
super(wkf_activity, self).unlink(cr, uid, ids, context=context)
wkf_activity()
class wkf_transition(osv.osv):
_table = "wkf_transition"
_name = "workflow.transition"
_rec_name = 'signal'
_columns = {
'trigger_model': fields.char('Trigger Object'),
'trigger_expr_id': fields.char('Trigger Expression'),
'sequence': fields.integer('Sequence'),
'signal': fields.char('Signal (Button Name)',
help="When the operation of transition comes from a button pressed in the client form, "\
"signal tests the name of the pressed button. If signal is NULL, no button is necessary to validate this transition."),
'group_id': fields.many2one('res.groups', 'Group Required',
help="The group that a user must have to be authorized to validate this transition."),
'condition': fields.char('Condition', required=True,
help="Expression to be satisfied if we want the transition done."),
'act_from': fields.many2one('workflow.activity', 'Source Activity', required=True, select=True, ondelete='cascade',
help="Source activity. When this activity is over, the condition is tested to determine if we can start the ACT_TO activity."),
'act_to': fields.many2one('workflow.activity', 'Destination Activity', required=True, select=True, ondelete='cascade',
help="The destination activity."),
'wkf_id': fields.related('act_from','wkf_id', type='many2one', relation='workflow', string='Workflow', select=True),
}
_defaults = {
'condition': lambda *a: 'True',
'sequence': 10,
}
_order = 'sequence,id'
def name_get(self, cr, uid, ids, context=None):
return [(line.id, (line.act_from.name) + '+' + (line.act_to.name)) if line.signal == False else (line.id, line.signal) for line in self.browse(cr, uid, ids, context=context)]
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if args is None:
args = []
if name:
ids = self.search(cr, user, ['|',('act_from', operator, name),('act_to', operator, name)] + args, limit=limit)
return self.name_get(cr, user, ids, context=context)
return super(wkf_transition, self).name_search(cr, user, name, args=args, operator=operator, context=context, limit=limit)
wkf_transition()
class wkf_instance(osv.osv):
_table = "wkf_instance"
_name = "workflow.instance"
_rec_name = 'res_type'
_log_access = False
_columns = {
'uid': fields.integer('User'), # FIXME no constraint??
'wkf_id': fields.many2one('workflow', 'Workflow', ondelete='cascade', select=True),
'res_id': fields.integer('Resource ID'),
'res_type': fields.char('Resource Object'),
'state': fields.char('Status'),
'transition_ids': fields.many2many('workflow.transition', 'wkf_witm_trans', 'inst_id', 'trans_id'),
}
def _auto_init(self, cr, context=None):
super(wkf_instance, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'wkf_instance_res_type_res_id_state_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX wkf_instance_res_type_res_id_state_index ON wkf_instance (res_type, res_id, state)')
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'wkf_instance_res_id_wkf_id_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX wkf_instance_res_id_wkf_id_index ON wkf_instance (res_id, wkf_id)')
wkf_instance()
class wkf_workitem(osv.osv):
_table = "wkf_workitem"
_name = "workflow.workitem"
_log_access = False
_rec_name = 'state'
_columns = {
'act_id': fields.many2one('workflow.activity', 'Activity', required=True, ondelete="cascade", select=True),
'wkf_id': fields.related('act_id','wkf_id', type='many2one', relation='workflow', string='Workflow'),
'subflow_id': fields.many2one('workflow.instance', 'Subflow', ondelete="cascade", select=True),
'inst_id': fields.many2one('workflow.instance', 'Instance', required=True, ondelete="cascade", select=True),
'state': fields.char('Status', select=True),
}
wkf_workitem()
class wkf_triggers(osv.osv):
_table = "wkf_triggers"
_name = "workflow.triggers"
_log_access = False
_columns = {
'res_id': fields.integer('Resource ID', size=128),
'model': fields.char('Object'),
'instance_id': fields.many2one('workflow.instance', 'Destination Instance', ondelete="cascade"),
'workitem_id': fields.many2one('workflow.workitem', 'Workitem', required=True, ondelete="cascade"),
}
def _auto_init(self, cr, context=None):
super(wkf_triggers, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'wkf_triggers_res_id_model_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX wkf_triggers_res_id_model_index ON wkf_triggers (res_id, model)')
wkf_triggers()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
blindroot/django | refs/heads/master | tests/file_uploads/urls.py | 452 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^upload/$', views.file_upload_view),
url(r'^verify/$', views.file_upload_view_verify),
url(r'^unicode_name/$', views.file_upload_unicode_name),
url(r'^echo/$', views.file_upload_echo),
url(r'^echo_content_type_extra/$', views.file_upload_content_type_extra),
url(r'^echo_content/$', views.file_upload_echo_content),
url(r'^quota/$', views.file_upload_quota),
url(r'^quota/broken/$', views.file_upload_quota_broken),
url(r'^getlist_count/$', views.file_upload_getlist_count),
url(r'^upload_errors/$', views.file_upload_errors),
url(r'^filename_case/$', views.file_upload_filename_case_view),
url(r'^fd_closing/(?P<access>t|f)/$', views.file_upload_fd_closing),
]
|
GNOME/kupfer | refs/heads/master | kupfer/obj/contacts.py | 4 | # -*- encoding: utf-8 -*-
"""
Kupfer's Contacts API
Main definition and *constructor* classes.
Constructor classes such as EmailContact are used to conveniently construct
contacts with common traits. To *use* contacts, always use ContactLeaf, asking
for specific slots to be filled.
"""
import re
from kupfer import icons
from kupfer.obj.grouping import GroupingLeaf
__author__ = ("Ulrik Sverdrup <[email protected]>, "
"Karol Będkowski <[email protected]>",
"Adi Sieker <[email protected]>",
)
EMAIL_KEY = "EMAIL"
NAME_KEY = "NAME"
PHONE_KEY = "PHONE"
ADDRESS_KEY = "ADDRESS"
LABEL_KEY = "LABEL"
JABBER_JID_KEY = "JID"
JABBER_STATUS_KEY = "JABBER_STATUS"
JABBER_RESOURCE_KEY = "JABBER_RESOURCE"
AIM_KEY = "AIM"
GOOGLE_TALK_KEY = "GOOGLE_TALK"
ICQ_KEY = "ICQ"
MSN_KEY = "MSN"
QQ_KEY = "QQ"
SKYPE_KEY = "SKYPE"
YAHOO_KEY = "YAHOO"
class ContactLeaf(GroupingLeaf):
grouping_slots = (NAME_KEY, )
def __init__(self, obj, name, image=None):
self.image = image
GroupingLeaf.__init__(self, obj, name)
def get_icon_name(self):
return "stock_person"
def get_text_representation(self):
return self.get_description()
def get_thumbnail(self, width, height):
if self.image:
return icons.get_pixbuf_from_data(self.image, width, height)
return GroupingLeaf.get_thumbnail(self, width, height)
## E-mail convenience and constructors
def _get_email_from_url(url):
''' convert http://[email protected] -> [email protected] '''
sep = url.find('://')
return url[sep + 3:] if sep > -1 else url
# FIXME: Find a more robust (less strict?) approach than regex
_CHECK_EMAIL_RE = re.compile(r"^[a-z0-9\._%-+]+\@[a-z0-9._%-]+\.[a-z]{2,}$")
def is_valid_email(email):
''' simple email check '''
return len(email) > 7 and _CHECK_EMAIL_RE.match(email.lower()) is not None
def email_from_leaf(leaf):
"""
Return an email address string if @leaf has a valid email address.
@leaf may also be a TextLeaf or UrlLeaf.
Return a false value if no valid email is found.
"""
if isinstance(leaf, ContactLeaf):
return EMAIL_KEY in leaf and leaf[EMAIL_KEY]
email = _get_email_from_url(leaf.object)
return is_valid_email(email) and email
class EmailContact (ContactLeaf):
grouping_slots = ContactLeaf.grouping_slots + (EMAIL_KEY, )
def __init__(self, email, name, image=None):
slots = {EMAIL_KEY: email, NAME_KEY: name}
ContactLeaf.__init__(self, slots, name, image)
def repr_key(self):
return self.object[EMAIL_KEY]
def get_description(self):
return self.object[EMAIL_KEY]
def get_gicon(self):
return icons.ComposedIconSmall(self.get_icon_name(), "stock_mail")
class IMContact (ContactLeaf):
grouping_slots = ContactLeaf.grouping_slots + (EMAIL_KEY, )
def __init__(self, im_id_kind, im_id, name, label=None, other_slots=None,
image=None):
self.im_id_kind = im_id_kind
slots = {im_id_kind: im_id, NAME_KEY: name, LABEL_KEY: label}
if other_slots:
slots.update(other_slots)
ContactLeaf.__init__(self, slots, name, image)
self.kupfer_add_alias(im_id)
def repr_key(self):
return self.object[self.im_id_kind]
def get_description(self):
return self.object[LABEL_KEY] or self.object[self.im_id_kind]
class JabberContact (IMContact):
''' Minimal class for all Jabber contacts. '''
grouping_slots = IMContact.grouping_slots + (JABBER_JID_KEY, )
def __init__(self, jid, name, status=None, resource=None, slots=None,
image=None):
IMContact.__init__(self, JABBER_JID_KEY, jid, name or jid,
other_slots=slots, image=image)
self._description = _("[%(status)s] %(userid)s/%(service)s") % \
{
"status": status or _("unknown"),
"userid": jid,
"service": resource or u"",
}
def get_description(self):
return self._description
class AIMContact(IMContact):
grouping_slots = IMContact.grouping_slots + (AIM_KEY, )
def __init__(self, id_, name, slots=None, image=None):
IMContact.__init__(self, AIM_KEY, id_, name, _("Aim"), slots, image)
class GoogleTalkContact(IMContact):
grouping_slots = IMContact.grouping_slots + (GOOGLE_TALK_KEY, )
def __init__(self, id_, name, slots=None, image=None):
IMContact.__init__(self, GOOGLE_TALK_KEY, id_, name, _("Google Talk"),
slots, image)
class ICQContact(IMContact):
grouping_slots = IMContact.grouping_slots + (ICQ_KEY, )
def __init__(self, id_, name, slots=None, image=None):
IMContact.__init__(self, ICQ_KEY, id_, name, _("ICQ"), slots, image)
class MSNContact(IMContact):
grouping_slots = IMContact.grouping_slots + (MSN_KEY, )
def __init__(self, id_, name, slots=None, image=None):
IMContact.__init__(self, MSN_KEY, id_, name, _("MSN"), slots, image)
class QQContact(IMContact):
grouping_slots = IMContact.grouping_slots + (QQ_KEY, )
def __init__(self, id_, name, slots=None, image=None):
IMContact.__init__(self, QQ_KEY, id_, name, _("QQ"), slots, image)
class YahooContact(IMContact):
grouping_slots = IMContact.grouping_slots + (YAHOO_KEY, )
def __init__(self, id_, name, slots=None, image=None):
IMContact.__init__(self, YAHOO_KEY, id_, name, _("Yahoo"), slots, image)
class SkypeContact(IMContact):
grouping_slots = IMContact.grouping_slots + (SKYPE_KEY, )
def __init__(self, id_, name, slots=None, image=None):
IMContact.__init__(self, SKYPE_KEY, id_, name, _("Skype"), slots, image)
class PhoneContact(ContactLeaf):
grouping_slots = ContactLeaf.grouping_slots + (EMAIL_KEY, )
def __init__(self, number, name, label, slots=None, image=None):
pslots = {PHONE_KEY: number, NAME_KEY: name, LABEL_KEY: label}
if slots:
pslots.update(slots)
ContactLeaf.__init__(self, pslots, name, image)
def repr_key(self):
return self.object[PHONE_KEY]
def get_description(self):
return '%s: %s' % (self.object[LABEL_KEY], self.object[PHONE_KEY])
class AddressContact(ContactLeaf):
grouping_slots = ContactLeaf.grouping_slots + (EMAIL_KEY, )
def __init__(self, address, name, label, slots=None, image=None):
aslots = {ADDRESS_KEY: address, NAME_KEY: name, LABEL_KEY: label}
if slots:
aslots.update(slots)
ContactLeaf.__init__(self, aslots, name, image)
def repr_key(self):
return self.object[ADDRESS_KEY]
|
IndonesiaX/edx-platform | refs/heads/master | common/lib/xmodule/xmodule/course_metadata_utils.py | 22 | """
Simple utility functions that operate on course metadata.
This is a place to put simple functions that operate on course metadata. It
allows us to share code between the CourseDescriptor and CourseOverview
classes, which both need these type of functions.
"""
from base64 import b32encode
from datetime import datetime, timedelta
import dateutil.parser
from math import exp
from django.utils.timezone import UTC
from .fields import Date
DEFAULT_START_DATE = datetime(2030, 1, 1, tzinfo=UTC())
def clean_course_key(course_key, padding_char):
"""
Encode a course's key into a unique, deterministic base32-encoded ID for
the course.
Arguments:
course_key (CourseKey): A course key.
padding_char (str): Character used for padding at end of the encoded
string. The standard value for this is '='.
"""
return "course_{}".format(
b32encode(unicode(course_key)).replace('=', padding_char)
)
def url_name_for_course_location(location):
"""
Given a course's usage locator, returns the course's URL name.
Arguments:
location (BlockUsageLocator): The course's usage locator.
"""
return location.name
def display_name_with_default(course):
"""
Calculates the display name for a course.
Default to the display_name if it isn't None, else fall back to creating
a name based on the URL.
Unlike the rest of this module's functions, this function takes an entire
course descriptor/overview as a parameter. This is because a few test cases
(specifically, {Text|Image|Video}AnnotationModuleTestCase.test_student_view)
create scenarios where course.display_name is not None but course.location
is None, which causes calling course.url_name to fail. So, although we'd
like to just pass course.display_name and course.url_name as arguments to
this function, we can't do so without breaking those tests.
Note: This method no longer escapes as it once did, so the caller must
ensure it is properly escaped where necessary.
Arguments:
course (CourseDescriptor|CourseOverview): descriptor or overview of
said course.
"""
return (
course.display_name if course.display_name is not None
else course.url_name.replace('_', ' ')
)
def display_name_with_default_escaped(course):
"""
DEPRECATED: use display_name_with_default
Calculates the display name for a course with some HTML escaping.
This follows the same logic as display_name_with_default, with
the addition of the escaping.
Here is an example of how to move away from this method in Mako html:
Before:
<span class="course-name">${course.display_name_with_default_escaped}</span>
After:
<span class="course-name">${course.display_name_with_default | h}</span>
If the context is Javascript in Mako, you'll need to follow other best practices.
Note: Switch to display_name_with_default, and ensure the caller
properly escapes where necessary.
Note: This newly introduced method should not be used. It was only
introduced to enable a quick search/replace and the ability to slowly
migrate and test switching to display_name_with_default, which is no
longer escaped.
Arguments:
course (CourseDescriptor|CourseOverview): descriptor or overview of
said course.
"""
# This escaping is incomplete. However, rather than switching this to use
# markupsafe.escape() and fixing issues, better to put that energy toward
# migrating away from this method altogether.
return course.display_name_with_default.replace('<', '<').replace('>', '>')
def number_for_course_location(location):
"""
Given a course's block usage locator, returns the course's number.
This is a "number" in the sense of the "course numbers" that you see at
lots of universities. For example, given a course
"Intro to Computer Science" with the course key "edX/CS-101/2014", the
course number would be "CS-101"
Arguments:
location (BlockUsageLocator): The usage locator of the course in
question.
"""
return location.course
def has_course_started(start_date):
"""
Given a course's start datetime, returns whether the current time's past it.
Arguments:
start_date (datetime): The start datetime of the course in question.
"""
# TODO: This will throw if start_date is None... consider changing this behavior?
return datetime.now(UTC()) > start_date
def has_course_ended(end_date):
"""
Given a course's end datetime, returns whether
(a) it is not None, and
(b) the current time is past it.
Arguments:
end_date (datetime): The end datetime of the course in question.
"""
return datetime.now(UTC()) > end_date if end_date is not None else False
def course_starts_within(start_date, look_ahead_days):
"""
Given a course's start datetime and look ahead days, returns True if
course's start date falls within look ahead days otherwise False
Arguments:
start_date (datetime): The start datetime of the course in question.
look_ahead_days (int): number of days to see in future for course start date.
"""
return datetime.now(UTC()) + timedelta(days=look_ahead_days) > start_date
def course_start_date_is_default(start, advertised_start):
"""
Returns whether a course's start date hasn't yet been set.
Arguments:
start (datetime): The start datetime of the course in question.
advertised_start (str): The advertised start date of the course
in question.
"""
return advertised_start is None and start == DEFAULT_START_DATE
def _datetime_to_string(date_time, format_string, strftime_localized):
"""
Formats the given datetime with the given function and format string.
Adds UTC to the resulting string if the format is DATE_TIME or TIME.
Arguments:
date_time (datetime): the datetime to be formatted
format_string (str): the date format type, as passed to strftime
strftime_localized ((datetime, str) -> str): a nm localized string
formatting function
"""
# TODO: Is manually appending UTC really the right thing to do here? What if date_time isn't UTC?
result = strftime_localized(date_time, format_string)
return (
result + u" UTC" if format_string in ['DATE_TIME', 'TIME', 'DAY_AND_TIME']
else result
)
def course_start_datetime_text(start_date, advertised_start, format_string, ugettext, strftime_localized):
"""
Calculates text to be shown to user regarding a course's start
datetime in UTC.
Prefers .advertised_start, then falls back to .start.
Arguments:
start_date (datetime): the course's start datetime
advertised_start (str): the course's advertised start date
format_string (str): the date format type, as passed to strftime
ugettext ((str) -> str): a text localization function
strftime_localized ((datetime, str) -> str): a localized string
formatting function
"""
if advertised_start is not None:
# TODO: This will return an empty string if advertised_start == ""... consider changing this behavior?
try:
# from_json either returns a Date, returns None, or raises a ValueError
parsed_advertised_start = Date().from_json(advertised_start)
if parsed_advertised_start is not None:
# In the Django implementation of strftime_localized, if
# the year is <1900, _datetime_to_string will raise a ValueError.
return _datetime_to_string(parsed_advertised_start, format_string, strftime_localized)
except ValueError:
pass
return advertised_start.title()
elif start_date != DEFAULT_START_DATE:
return _datetime_to_string(start_date, format_string, strftime_localized)
else:
_ = ugettext
# Translators: TBD stands for 'To Be Determined' and is used when a course
# does not yet have an announced start date.
return _('TBD')
def course_end_datetime_text(end_date, format_string, strftime_localized):
"""
Returns a formatted string for a course's end date or datetime.
If end_date is None, an empty string will be returned.
Arguments:
end_date (datetime): the end datetime of a course
format_string (str): the date format type, as passed to strftime
strftime_localized ((datetime, str) -> str): a localized string
formatting function
"""
return (
_datetime_to_string(end_date, format_string, strftime_localized) if end_date is not None
else ''
)
def may_certify_for_course(certificates_display_behavior, certificates_show_before_end, has_ended):
"""
Returns whether it is acceptable to show the student a certificate download
link for a course.
Arguments:
certificates_display_behavior (str): string describing the course's
certificate display behavior.
See CourseFields.certificates_display_behavior.help for more detail.
certificates_show_before_end (bool): whether user can download the
course's certificates before the course has ended.
has_ended (bool): Whether the course has ended.
"""
show_early = (
certificates_display_behavior in ('early_with_info', 'early_no_info')
or certificates_show_before_end
)
return show_early or has_ended
def sorting_score(start, advertised_start, announcement):
"""
Returns a tuple that can be used to sort the courses according
to how "new" they are. The "newness" score is computed using a
heuristic that takes into account the announcement and
(advertised) start dates of the course if available.
The lower the number the "newer" the course.
"""
# Make courses that have an announcement date have a lower
# score than courses than don't, older courses should have a
# higher score.
announcement, start, now = sorting_dates(start, advertised_start, announcement)
scale = 300.0 # about a year
if announcement:
days = (now - announcement).days
score = -exp(-days / scale)
else:
days = (now - start).days
score = exp(days / scale)
return score
def sorting_dates(start, advertised_start, announcement):
"""
Utility function to get datetime objects for dates used to
compute the is_new flag and the sorting_score.
"""
try:
start = dateutil.parser.parse(advertised_start)
if start.tzinfo is None:
start = start.replace(tzinfo=UTC())
except (ValueError, AttributeError):
start = start
now = datetime.now(UTC())
return announcement, start, now
|
arcean/telepathy-python | refs/heads/master | debian/python-telepathy/usr/share/pyshared/telepathy/_generated/Account_Interface_Avatar.py | 4 | # -*- coding: utf-8 -*-
# Generated from the Telepathy spec
"""Copyright (C) 2008 Collabora Ltd.
Copyright (C) 2008 Nokia Corporation
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import dbus.service
class AccountInterfaceAvatar(dbus.service.Interface):
"""\
This interface extends the core Account interface to provide a
user-settable avatar image.
The avatar could have been a property on the core Account interface,
but was moved to a separate interface because it is likely to be
large. This means that clients can safely use GetAll to get
properties on the core Account interface without flooding the
session bus with large images.
"""
def __init__(self):
self._interfaces.add('org.freedesktop.Telepathy.Account.Interface.Avatar')
@dbus.service.signal('org.freedesktop.Telepathy.Account.Interface.Avatar', signature='')
def AvatarChanged(self):
"""
Emitted when the Avatar property changes.
The avatar itself is deliberately not included in this
signal, to reduce bus traffic in the (likely common) case where no
running application cares about the user's own avatar.
"""
pass
|
tuxfux-hlp-notes/python-batches | refs/heads/master | archieves/batch-61/files/myenv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/eucjpprober.py | 2918 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
# PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
YuriyIlyin/ansible-modules-core | refs/heads/devel | cloud/docker/__init__.py | 12133432 | |
franosincic/edx-platform | refs/heads/master | common/djangoapps/monitoring/__init__.py | 12133432 | |
uestcxl/OnlineJudge | refs/heads/master | judge/judger/__init__.py | 12133432 | |
mrunge/horizon | refs/heads/master | openstack_dashboard/test/test_plugins/__init__.py | 12133432 | |
teemulehtinen/a-plus | refs/heads/master | apps/templatetags/__init__.py | 12133432 | |
elkingtonmcb/django | refs/heads/master | tests/template_backends/apps/good/__init__.py | 12133432 | |
ineffablebob/script.trakt | refs/heads/master | tests/__init__.py | 12133432 | |
rsalmond/seabus | refs/heads/master | seabus/__init__.py | 12133432 | |
MartinHjelmare/home-assistant | refs/heads/dev | homeassistant/components/cloud/binary_sensor.py | 7 | """Support for Home Assistant Cloud binary sensors."""
import asyncio
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import DISPATCHER_REMOTE_UPDATE, DOMAIN
WAIT_UNTIL_CHANGE = 3
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the cloud binary sensors."""
if discovery_info is None:
return
cloud = hass.data[DOMAIN]
async_add_entities([CloudRemoteBinary(cloud)])
class CloudRemoteBinary(BinarySensorDevice):
"""Representation of an Cloud Remote UI Connection binary sensor."""
def __init__(self, cloud):
"""Initialize the binary sensor."""
self.cloud = cloud
self._unsub_dispatcher = None
@property
def name(self) -> str:
"""Return the name of the binary sensor, if any."""
return "Remote UI"
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return "cloud-remote-ui-connectivity"
@property
def is_on(self) -> bool:
"""Return true if the binary sensor is on."""
return self.cloud.remote.is_connected
@property
def device_class(self) -> str:
"""Return the class of this device, from component DEVICE_CLASSES."""
return 'connectivity'
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self.cloud.remote.certificate is not None
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state."""
return False
async def async_added_to_hass(self):
"""Register update dispatcher."""
async def async_state_update(data):
"""Update callback."""
await asyncio.sleep(WAIT_UNTIL_CHANGE)
self.async_schedule_update_ha_state()
self._unsub_dispatcher = async_dispatcher_connect(
self.hass, DISPATCHER_REMOTE_UPDATE, async_state_update)
async def async_will_remove_from_hass(self):
"""Register update dispatcher."""
if self._unsub_dispatcher is not None:
self._unsub_dispatcher()
self._unsub_dispatcher = None
|
mw46d/ardupilot | refs/heads/master | Tools/LogAnalyzer/DataflashLog.py | 19 | #
# Code to abstract the parsing of APM Dataflash log files, currently only used by the LogAnalyzer
#
# Initial code by Andrew Chapman ([email protected]), 16th Jan 2014
#
from __future__ import print_function
import collections
import os
import numpy
import bisect
import sys
import ctypes
from VehicleType import VehicleType, VehicleTypeString
class Format(object):
'''Data channel format as specified by the FMT lines in the log file'''
def __init__(self,msgType,msgLen,name,types,labels):
self.NAME = 'FMT'
self.msgType = msgType
self.msgLen = msgLen
self.name = name
self.types = types
self.labels = labels.split(',')
def __str__(self):
return "%8s %s" % (self.name, repr(self.labels))
@staticmethod
def trycastToFormatType(value,valueType):
'''using format characters from libraries/DataFlash/DataFlash.h to cast strings to basic python int/float/string types
tries a cast, if it does not work, well, acceptable as the text logs do not match the format, e.g. MODE is expected to be int'''
try:
if valueType in "fcCeELd":
return float(value)
elif valueType in "bBhHiIMQq":
return int(value)
elif valueType in "nNZ":
return str(value)
except:
pass
return value
def to_class(self):
members = dict(
NAME = self.name,
labels = self.labels[:],
)
fieldtypes = [i for i in self.types]
fieldlabels = self.labels[:]
# field access
for (label, _type) in zip(fieldlabels, fieldtypes):
def createproperty(name, format):
# extra scope for variable sanity
# scaling via _NAME and def NAME(self): return self._NAME / SCALE
propertyname = name
attributename = '_' + name
p = property(lambda x:getattr(x, attributename),
lambda x, v:setattr(x,attributename, Format.trycastToFormatType(v,format)))
members[propertyname] = p
members[attributename] = None
createproperty(label, _type)
# repr shows all values but the header
members['__repr__'] = lambda x: "<{cls} {data}>".format(cls=x.__class__.__name__, data = ' '.join(["{}:{}".format(k,getattr(x,'_'+k)) for k in x.labels]))
def init(a, *x):
if len(x) != len(a.labels):
raise ValueError("Invalid Length")
#print(list(zip(a.labels, x)))
for (l,v) in zip(a.labels, x):
try:
setattr(a, l, v)
except Exception as e:
print("{} {} {} failed".format(a,l,v))
print(e)
members['__init__'] = init
# finally, create the class
cls = type(\
'Log__{:s}'.format(self.name),
(object,),
members
)
#print(members)
return cls
class logheader(ctypes.LittleEndianStructure):
_fields_ = [ \
('head1', ctypes.c_uint8),
('head2', ctypes.c_uint8),
('msgid', ctypes.c_uint8),
]
def __repr__(self):
return "<logheader head1=0x{self.head1:x} head2=0x{self.head2:x} msgid=0x{self.msgid:x} ({self.msgid})>".format(self=self)
class BinaryFormat(ctypes.LittleEndianStructure):
NAME = 'FMT'
MSG = 128
SIZE = 0
FIELD_FORMAT = {
'b': ctypes.c_int8,
'B': ctypes.c_uint8,
'h': ctypes.c_int16,
'H': ctypes.c_uint16,
'i': ctypes.c_int32,
'I': ctypes.c_uint32,
'f': ctypes.c_float,
'd': ctypes.c_double,
'n': ctypes.c_char * 4,
'N': ctypes.c_char * 16,
'Z': ctypes.c_char * 64,
'c': ctypes.c_int16,# * 100,
'C': ctypes.c_uint16,# * 100,
'e': ctypes.c_int32,# * 100,
'E': ctypes.c_uint32,# * 100,
'L': ctypes.c_int32,
'M': ctypes.c_uint8,
'q': ctypes.c_int64,
'Q': ctypes.c_uint64,
}
FIELD_SCALE = {
'c': 100,
'C': 100,
'e': 100,
'E': 100,
}
_packed_ = True
_fields_ = [ \
('head', logheader),
('type', ctypes.c_uint8),
('length', ctypes.c_uint8),
('name', ctypes.c_char * 4),
('types', ctypes.c_char * 16),
('labels', ctypes.c_char * 64),
]
def __repr__(self):
return "<{cls} {data}>".format(cls=self.__class__.__name__, data = ' '.join(["{}:{}".format(k,getattr(self,k)) for (k,_) in self._fields_[1:]]))
def to_class(self):
members = dict(
NAME = self.name,
MSG = self.type,
SIZE = self.length,
labels = self.labels.split(",") if self.labels else [],
_pack_ = True)
fieldtypes = [i for i in self.types]
fieldlabels = self.labels.split(",")
if self.labels and (len(fieldtypes) != len(fieldlabels)):
print("Broken FMT message for {} .. ignoring".format(self.name), file=sys.stderr)
return None
fields = [('head',logheader)]
# field access
for (label, _type) in zip(fieldlabels, fieldtypes):
def createproperty(name, format):
# extra scope for variable sanity
# scaling via _NAME and def NAME(self): return self._NAME / SCALE
propertyname = name
attributename = '_' + name
scale = BinaryFormat.FIELD_SCALE.get(format, None)
p = property(lambda x:getattr(x, attributename))
if scale is not None:
p = property(lambda x:getattr(x, attributename) / scale)
members[propertyname] = p
try:
fields.append((attributename, BinaryFormat.FIELD_FORMAT[format]))
except KeyError:
print('ERROR: Failed to add FMT type: {}, with format: {}'.format(attributename, format))
raise
createproperty(label, _type)
members['_fields_'] = fields
# repr shows all values but the header
members['__repr__'] = lambda x: "<{cls} {data}>".format(cls=x.__class__.__name__, data = ' '.join(["{}:{}".format(k,getattr(x,k)) for k in x.labels]))
# finally, create the class
cls = type(\
'Log__{:s}'.format(self.name),
(ctypes.LittleEndianStructure,),
members
)
if ctypes.sizeof(cls) != cls.SIZE:
print("size mismatch for {} expected {} got {}".format(cls, ctypes.sizeof(cls), cls.SIZE), file=sys.stderr)
# for i in cls.labels:
# print("{} = {}".format(i,getattr(cls,'_'+i)))
return None
return cls
BinaryFormat.SIZE = ctypes.sizeof(BinaryFormat)
class Channel(object):
'''storage for a single stream of data, i.e. all GPS.RelAlt values'''
# TODO: rethink data storage, but do more thorough regression testing before refactoring it
# TODO: store data as a scipy spline curve so we can more easily interpolate and sample the slope?
def __init__(self):
self.dictData = {} # dict of linenum->value # store dupe data in dict and list for now, until we decide which is the better way to go
self.listData = [] # list of (linenum,value) # store dupe data in dict and list for now, until we decide which is the better way to go
def getSegment(self, startLine, endLine):
'''returns a segment of this data (from startLine to endLine, inclusive) as a new Channel instance'''
segment = Channel()
segment.dictData = {k:v for k,v in self.dictData.iteritems() if k >= startLine and k <= endLine}
return segment
def min(self):
return min(self.dictData.values())
def max(self):
return max(self.dictData.values())
def avg(self):
return numpy.mean(self.dictData.values())
def getNearestValueFwd(self, lineNumber):
'''Returns (value,lineNumber)'''
index = bisect.bisect_left(self.listData, (lineNumber,-99999))
while index<len(self.listData):
line = self.listData[index][0]
#print("Looking forwards for nearest value to line number %d, starting at line %d" % (lineNumber,line)) # TEMP
if line >= lineNumber:
return (self.listData[index][1],line)
index += 1
raise Exception("Error finding nearest value for line %d" % lineNumber)
def getNearestValueBack(self, lineNumber):
'''Returns (value,lineNumber)'''
index = bisect.bisect_left(self.listData, (lineNumber,-99999)) - 1
while index>=0:
line = self.listData[index][0]
#print("Looking backwards for nearest value to line number %d, starting at line %d" % (lineNumber,line)) # TEMP
if line <= lineNumber:
return (self.listData[index][1],line)
index -= 1
raise Exception("Error finding nearest value for line %d" % lineNumber)
def getNearestValue(self, lineNumber, lookForwards=True):
'''find the nearest data value to the given lineNumber, defaults to first looking forwards. Returns (value,lineNumber)'''
if lookForwards:
try:
return self.getNearestValueFwd(lineNumber)
except:
return self.getNearestValueBack(lineNumber)
else:
try:
return self.getNearestValueBack(lineNumber)
except:
return self.getNearestValueFwd(lineNumber)
raise Exception("Error finding nearest value for line %d" % lineNumber)
def getInterpolatedValue(self, lineNumber):
(prevValue,prevValueLine) = self.getNearestValue(lineNumber, lookForwards=False)
(nextValue,nextValueLine) = self.getNearestValue(lineNumber, lookForwards=True)
if prevValueLine == nextValueLine:
return prevValue
weight = (lineNumber-prevValueLine) / float(nextValueLine-prevValueLine)
return ((weight*prevValue) + ((1-weight)*nextValue))
def getIndexOf(self, lineNumber):
'''returns the index within this channel's listData of the given lineNumber, or raises an Exception if not found'''
index = bisect.bisect_left(self.listData, (lineNumber,-99999))
#print("INDEX of line %d: %d" % (lineNumber,index))
#print("self.listData[index][0]: %d" % self.listData[index][0])
if (self.listData[index][0] == lineNumber):
return index
else:
raise Exception("Error finding index for line %d" % lineNumber)
class LogIterator:
'''Smart iterator that can move through a log by line number and maintain an index into the nearest values of all data channels'''
# TODO: LogIterator currently indexes the next available value rather than the nearest value, we should make it configurable between next/nearest
class LogIteratorSubValue:
'''syntactic sugar to allow access by LogIterator[lineLabel][dataLabel]'''
logdata = None
iterators = None
lineLabel = None
def __init__(self, logdata, iterators, lineLabel):
self.logdata = logdata
self.lineLabel = lineLabel
self.iterators = iterators
def __getitem__(self, dataLabel):
index = self.iterators[self.lineLabel][0]
return self.logdata.channels[self.lineLabel][dataLabel].listData[index][1]
iterators = {} # lineLabel -> (listIndex,lineNumber)
logdata = None
currentLine = None
def __init__(self, logdata, lineNumber=0):
self.logdata = logdata
self.currentLine = lineNumber
for lineLabel in self.logdata.formats:
if lineLabel in self.logdata.channels:
self.iterators[lineLabel] = ()
self.jump(lineNumber)
def __iter__(self):
return self
def __getitem__(self, lineLabel):
return LogIterator.LogIteratorSubValue(self.logdata, self.iterators, lineLabel)
def next(self):
'''increment iterator to next log line'''
self.currentLine += 1
if self.currentLine > self.logdata.lineCount:
return self
for lineLabel in self.iterators.keys():
# check if the currentLine has gone past our the line we're pointing to for this type of data
dataLabel = self.logdata.formats[lineLabel].labels[0]
(index, lineNumber) = self.iterators[lineLabel]
# if so, and it is not the last entry in the log, then increment the indices for all dataLabels under that lineLabel
if (self.currentLine > lineNumber) and (index < len(self.logdata.channels[lineLabel][dataLabel].listData)-1):
index += 1
lineNumber = self.logdata.channels[lineLabel][dataLabel].listData[index][0]
self.iterators[lineLabel] = (index,lineNumber)
return self
def jump(self, lineNumber):
'''jump iterator to specified log line'''
self.currentLine = lineNumber
for lineLabel in self.iterators.keys():
dataLabel = self.logdata.formats[lineLabel].labels[0]
(value,lineNumber) = self.logdata.channels[lineLabel][dataLabel].getNearestValue(self.currentLine)
self.iterators[lineLabel] = (self.logdata.channels[lineLabel][dataLabel].getIndexOf(lineNumber), lineNumber)
class DataflashLogHelper:
'''helper functions for dealing with log data, put here to keep DataflashLog class as a simple parser and data store'''
@staticmethod
def getTimeAtLine(logdata, lineNumber):
'''returns the nearest GPS timestamp in milliseconds after the given line number'''
if not "GPS" in logdata.channels:
raise Exception("no GPS log data found")
# older logs use 'TIme', newer logs use 'TimeMS'
# even newer logs use TimeUS
timeLabel = None
for possible in "TimeMS", "Time", "TimeUS":
if possible in logdata.channels["GPS"]:
timeLabel = possible
break
if timeLabel is None:
raise Exception("Unable to get time label")
while lineNumber <= logdata.lineCount:
if lineNumber in logdata.channels["GPS"][timeLabel].dictData:
return logdata.channels["GPS"][timeLabel].dictData[lineNumber]
lineNumber = lineNumber + 1
sys.stderr.write("didn't find GPS data for " + str(lineNumber) + " - using maxtime\n")
return logdata.channels["GPS"][timeLabel].max()
@staticmethod
def findLoiterChunks(logdata, minLengthSeconds=0, noRCInputs=True):
'''returns a list of (to,from) pairs defining sections of the log which are in loiter mode. Ordered from longest to shortest in time. If noRCInputs == True it only returns chunks with no control inputs'''
# TODO: implement noRCInputs handling when identifying stable loiter chunks, for now we're ignoring it
def chunkSizeCompare(chunk1, chunk2):
chunk1Len = chunk1[1]-chunk1[0]
chunk2Len = chunk2[1]-chunk2[0]
if chunk1Len == chunk2Len:
return 0
elif chunk1Len > chunk2Len:
return -1
else:
return 1
od = collections.OrderedDict(sorted(logdata.modeChanges.items(), key=lambda t: t[0]))
chunks = []
for i in range(len(od.keys())):
if od.values()[i][0] == "LOITER":
startLine = od.keys()[i]
endLine = None
if i == len(od.keys())-1:
endLine = logdata.lineCount
else:
endLine = od.keys()[i+1]-1
chunkTimeSeconds = (DataflashLogHelper.getTimeAtLine(logdata,endLine)-DataflashLogHelper.getTimeAtLine(logdata,startLine)+1) / 1000.0
if chunkTimeSeconds > minLengthSeconds:
chunks.append((startLine,endLine))
#print("LOITER chunk: %d to %d, %d lines" % (startLine,endLine,endLine-startLine+1))
#print(" (time %d to %d, %d seconds)" % (DataflashLogHelper.getTimeAtLine(logdata,startLine), DataflashLogHelper.getTimeAtLine(logdata,endLine), chunkTimeSeconds))
chunks.sort(chunkSizeCompare)
return chunks
@staticmethod
def isLogEmpty(logdata):
'''returns an human readable error string if the log is essentially empty, otherwise returns None'''
# naive check for now, see if the throttle output was ever above 20%
throttleThreshold = 20
if logdata.vehicleType == VehicleType.Copter:
throttleThreshold = 200 # copter uses 0-1000, plane+rover use 0-100
if "CTUN" in logdata.channels:
try:
maxThrottle = logdata.channels["CTUN"]["ThrOut"].max()
except KeyError as e:
# ThrOut was shorted to ThO at some stage...
maxThrottle = logdata.channels["CTUN"]["ThO"].max()
# at roughly the same time ThO became a range from 0 to 1
throttleThreshold = 0.2
if maxThrottle < throttleThreshold:
return "Throttle never above 20%"
return None
class DataflashLog(object):
'''APM Dataflash log file reader and container class. Keep this simple, add more advanced or specific functions to DataflashLogHelper class'''
knownHardwareTypes = ["APM", "PX4", "MPNG"]
intTypes = "bBhHiIM"
floatTypes = "fcCeEL"
charTypes = "nNZ"
def __init__(self, logfile=None, format="auto", ignoreBadlines=False):
self.filename = None
self.vehicleType = None # from VehicleType enumeration; value derived from header
self.vehicleTypeString = None # set at same time has the enum value
self.firmwareVersion = ""
self.firmwareHash = ""
self.freeRAM = 0
self.hardwareType = "" # APM 1, APM 2, PX4, MPNG, etc What is VRBrain? BeagleBone, etc? Needs more testing
self.formats = {} # name -> Format
self.parameters = {} # token -> value
self.messages = {} # lineNum -> message
self.modeChanges = {} # lineNum -> (mode,value)
self.channels = {} # lineLabel -> {dataLabel:Channel}
self.filesizeKB = 0
self.durationSecs = 0
self.lineCount = 0
self.skippedLines = 0
self.backpatch_these_modechanges = []
self.frame = None
if logfile:
self.read(logfile, format, ignoreBadlines)
def getCopterType(self):
'''returns quad/hex/octo/tradheli if this is a copter log'''
if self.vehicleType != VehicleType.Copter:
return None
motLabels = []
if "MOT" in self.formats: # not listed in PX4 log header for some reason?
motLabels = self.formats["MOT"].labels
if "GGain" in motLabels:
return "tradheli"
elif len(motLabels) == 4:
return "quad"
elif len(motLabels) == 6:
return "hex"
elif len(motLabels) == 8:
return "octo"
else:
return ""
def num_motor_channels(self):
motor_channels_for_frame = {
"QUAD": 4,
"HEXA": 6,
"Y6": 6,
"OCTA": 8,
"OCTA_QUAD": 8,
# "HELI": 1,
# "HELI_DUAL": 2,
"TRI": 3,
"SINGLE": 1,
"COAX": 2,
"TAILSITTER": 1,
"DODECA_HEXA" : 12,
}
return motor_channels_for_frame[self.frame]
def read(self, logfile, format="auto", ignoreBadlines=False):
'''returns on successful log read (including bad lines if ignoreBadlines==True), will throw an Exception otherwise'''
# TODO: dataflash log parsing code is pretty hacky, should re-write more methodically
self.filename = logfile
if self.filename == '<stdin>':
f = sys.stdin
else:
f = open(self.filename, 'r')
if format == 'bin':
head = '\xa3\x95\x80\x80'
elif format == 'log':
head = ""
elif format == 'auto':
if self.filename == '<stdin>':
# assuming TXT format
# raise ValueError("Invalid log format for stdin: {}".format(format))
head = ""
else:
head = f.read(4)
f.seek(0)
else:
raise ValueError("Unknown log format for {}: {}".format(self.filename, format))
if head == '\xa3\x95\x80\x80':
numBytes, lineNumber = self.read_binary(f, ignoreBadlines)
pass
else:
numBytes, lineNumber = self.read_text(f, ignoreBadlines)
# gather some general stats about the log
self.lineCount = lineNumber
self.filesizeKB = numBytes / 1024.0
# TODO: switch duration calculation to use TimeMS values rather than GPS timestemp
if "GPS" in self.channels:
# the GPS time label changed at some point, need to handle both
timeLabel = None
for i in 'TimeMS','TimeUS','Time':
if i in self.channels["GPS"]:
timeLabel = i
break
firstTimeGPS = int(self.channels["GPS"][timeLabel].listData[0][1])
lastTimeGPS = int(self.channels["GPS"][timeLabel].listData[-1][1])
if timeLabel == 'TimeUS':
firstTimeGPS /= 1000
lastTimeGPS /= 1000
self.durationSecs = (lastTimeGPS-firstTimeGPS) / 1000
# TODO: calculate logging rate based on timestamps
# ...
msg_vehicle_to_vehicle_map = {
"ArduCopter": VehicleType.Copter,
"APM:Copter": VehicleType.Copter,
"ArduPlane": VehicleType.Plane,
"ArduRover": VehicleType.Rover
}
# takes the vehicle type supplied via "MSG" and sets vehicleType from
# the VehicleType enumeration
def set_vehicleType_from_MSG_vehicle(self, MSG_vehicle):
ret = self.msg_vehicle_to_vehicle_map.get(MSG_vehicle, None)
if ret is None:
raise ValueError("Unknown vehicle type (%s)" % (MSG_vehicle))
self.vehicleType = ret
self.vehicleTypeString = VehicleTypeString[ret]
def handleModeChange(self, lineNumber, e):
if self.vehicleType == VehicleType.Copter:
try:
modes = {0:'STABILIZE',
1:'ACRO',
2:'ALT_HOLD',
3:'AUTO',
4:'GUIDED',
5:'LOITER',
6:'RTL',
7:'CIRCLE',
9:'LAND',
10:'OF_LOITER',
11:'DRIFT',
13:'SPORT',
14:'FLIP',
15:'AUTOTUNE',
16:'POSHOLD',
17:'BRAKE',
18:'THROW',
19:'AVOID_ADSB',
20:'GUIDED_NOGPS',
21:'SMART_RTL'}
if hasattr(e, 'ThrCrs'):
self.modeChanges[lineNumber] = (modes[int(e.Mode)], e.ThrCrs)
else:
# assume it has ModeNum:
self.modeChanges[lineNumber] = (modes[int(e.Mode)], e.ModeNum)
except:
if hasattr(e, 'ThrCrs'):
self.modeChanges[lineNumber] = (e.Mode, e.ThrCrs)
else:
# assume it has ModeNum:
print("Unknown mode=%u" % e.ModeNum)
self.modeChanges[lineNumber] = (e.Mode, "mode=%u" % e.ModeNum)
elif self.vehicleType in [VehicleType.Plane, VehicleType.Copter, VehicleType.Rover]:
self.modeChanges[lineNumber] = (e.Mode, e.ModeNum)
else:
# if you've gotten to here the chances are we don't
# know what vehicle you're flying...
raise Exception("Unknown log type for MODE line vehicletype=({}) line=({})".format(self.vehicleTypeString, repr(e)))
def backPatchModeChanges(self):
for (lineNumber, e) in self.backpatch_these_modechanges:
self.handleModeChange(lineNumber, e)
def set_frame(self, frame):
self.frame = frame
def process(self, lineNumber, e):
if e.NAME == 'FMT':
cls = e.to_class()
if cls is not None: # FMT messages can be broken ...
if hasattr(e, 'type') and e.type not in self._formats: # binary log specific
self._formats[e.type] = cls
if cls.NAME not in self.formats:
self.formats[cls.NAME] = cls
elif e.NAME == "PARM":
self.parameters[e.Name] = e.Value
elif e.NAME == "MSG":
tokens = e.Message.split(' ')
if not self.frame:
if "Frame" in tokens[0]:
self.set_frame(tokens[1])
if not self.vehicleType:
try:
self.set_vehicleType_from_MSG_vehicle(tokens[0]);
except ValueError:
return
self.backPatchModeChanges()
self.firmwareVersion = tokens[1]
if len(tokens) == 3:
self.firmwareHash = tokens[2][1:-1]
else:
self.messages[lineNumber] = e.Message
elif e.NAME == "MODE":
if self.vehicleType is None:
self.backpatch_these_modechanges.append( (lineNumber, e) )
else:
self.handleModeChange(lineNumber, e)
# anything else must be the log data
else:
groupName = e.NAME
# first time seeing this type of log line, create the channel storage
if not groupName in self.channels:
self.channels[groupName] = {}
for label in e.labels:
self.channels[groupName][label] = Channel()
# store each token in its relevant channel
for label in e.labels:
value = getattr(e, label)
channel = self.channels[groupName][label]
channel.dictData[lineNumber] = value
channel.listData.append((lineNumber, value))
def read_text(self, f, ignoreBadlines):
self.formats = {'FMT':Format}
lineNumber = 0
numBytes = 0
knownHardwareTypes = ["APM", "PX4", "MPNG"]
for line in f:
lineNumber = lineNumber + 1
numBytes += len(line) + 1
try:
#print("Reading line: %d" % lineNumber)
line = line.strip('\n\r')
tokens = line.split(', ')
# first handle the log header lines
if line == " Ready to drive." or line == " Ready to FLY.":
continue
if line == "----------------------------------------": # present in pre-3.0 logs
raise Exception("Log file seems to be in the older format (prior to self-describing logs), which isn't supported")
if len(tokens) == 1:
tokens2 = line.split(' ')
if line == "":
pass
elif len(tokens2) == 1 and tokens2[0].isdigit(): # log index
pass
elif len(tokens2) == 3 and tokens2[0] == "Free" and tokens2[1] == "RAM:":
self.freeRAM = int(tokens2[2])
elif tokens2[0] in knownHardwareTypes:
self.hardwareType = line # not sure if we can parse this more usefully, for now only need to report it back verbatim
elif (len(tokens2) == 2 or len(tokens2) == 3) and tokens2[1][0].lower() == "v": # e.g. ArduCopter V3.1 (5c6503e2)
try:
self.set_vehicleType_from_MSG_vehicle(tokens2[0])
except ValueError:
pass
self.firmwareVersion = tokens2[1]
if len(tokens2) == 3:
self.firmwareHash = tokens2[2][1:-1]
else:
errorMsg = "Error parsing line %d of log file: %s" % (lineNumber, self.filename)
if ignoreBadlines:
print(errorMsg + " (skipping line)", file=sys.stderr)
self.skippedLines += 1
else:
raise Exception("")
else:
if not tokens[0] in self.formats:
raise ValueError("Unknown Format {}".format(tokens[0]))
e = self.formats[tokens[0]](*tokens[1:])
self.process(lineNumber, e)
except Exception as e:
print("BAD LINE: " + line, file=sys.stderr)
if not ignoreBadlines:
raise Exception("Error parsing line %d of log file %s - %s" % (lineNumber,self.filename,e.args[0]))
return (numBytes,lineNumber)
def read_binary(self, f, ignoreBadlines):
lineNumber = 0
numBytes = 0
for e in self._read_binary(f, ignoreBadlines):
lineNumber += 1
if e is None:
continue
numBytes += e.SIZE
# print(e)
self.process(lineNumber, e)
return (numBytes,lineNumber)
def _read_binary(self, f, ignoreBadlines):
self._formats = {128:BinaryFormat}
data = bytearray(f.read())
offset = 0
while len(data) > offset + ctypes.sizeof(logheader):
h = logheader.from_buffer(data, offset)
if not (h.head1 == 0xa3 and h.head2 == 0x95):
if ignoreBadlines == False:
raise ValueError(h)
else:
if h.head1 == 0xff and h.head2 == 0xff and h.msgid == 0xff:
print("Assuming EOF due to dataflash block tail filled with \\xff... (offset={off})".format(off=offset), file=sys.stderr)
break
offset += 1
continue
if h.msgid in self._formats:
typ = self._formats[h.msgid]
if len(data) <= offset + typ.SIZE:
break
try:
e = typ.from_buffer(data, offset)
except:
print("data:{} offset:{} size:{} sizeof:{} sum:{}".format(len(data),offset,typ.SIZE,ctypes.sizeof(typ),offset+typ.SIZE))
raise
offset += typ.SIZE
else:
raise ValueError(str(h) + "unknown type")
yield e
|
SecHackLabs/WebHackSHL | refs/heads/master | modules/tplmap/tests/test_channel.py | 2 | import unittest
import requests
import os
import sys
import random
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from plugins.engines.mako import Mako
from core.channel import Channel
from core.checks import detect_template_injection
import utils.loggers
import logging
utils.loggers.stream_handler.setLevel(logging.FATAL)
class ChannelTest(unittest.TestCase):
expected_data = {
'language': 'python',
'engine': 'mako',
'evaluate' : 'python' ,
'execute' : True,
'write' : True,
'read' : True,
'trailer': '${%(trailer)s}',
'header': '${%(header)s}',
'render': '${%(code)s}',
'prefix': '',
'suffix': '',
'bind_shell' : True,
'reverse_shell': True
}
def test_post_reflection(self):
template = '%s'
channel = Channel({
'url' : 'http://127.0.0.1:15001/post/mako',
'force_level': [ 0, 0 ],
'data' : 'inj=*&othervar=1',
'injection_tag': '*',
'technique': 'R'
})
detect_template_injection(channel, [ Mako ])
del channel.data['os']
self.assertEqual(channel.data, self.expected_data)
def test_url_reflection(self):
channel = Channel({
'url' : 'http://127.0.0.1:15001/url/mako/AA*AA',
'force_level': [ 0, 0 ],
'injection_tag': '*',
'technique': 'R'
})
detect_template_injection(channel, [ Mako ])
del channel.data['os']
self.assertEqual(channel.data, self.expected_data)
def test_header_reflection(self):
template = '%s'
channel = Channel({
'url' : 'http://127.0.0.1:15001/header/mako',
'force_level': [ 0, 0 ],
'headers' : [ 'User-Agent: *' ],
'injection_tag': '*',
'technique': 'R'
})
detect_template_injection(channel, [ Mako ])
del channel.data['os']
self.assertEqual(channel.data, self.expected_data)
def test_put_reflection(self):
template = '%s'
channel = Channel({
'url' : 'http://127.0.0.1:15001/put/mako',
'data' : 'inj=*&othervar=1',
'request' : 'PUT',
'force_level': [ 0, 0 ],
'injection_tag': '*',
'technique': 'R'
})
detect_template_injection(channel, [ Mako ])
del channel.data['os']
self.assertEqual(channel.data, self.expected_data)
def test_custom_injection_tag(self):
template = '%s'
channel = Channel({
'url' : 'http://127.0.0.1:15001/reflect/mako?tpl=%s&inj=~',
'force_level': [ 0, 0 ],
'injection_tag': '~',
'technique': 'R'
})
detect_template_injection(channel, [ Mako ])
del channel.data['os']
self.assertEqual(channel.data, self.expected_data)
def test_reflection_multiple_point_tag(self):
template = '%s'
channel = Channel({
'url' : 'http://127.0.0.1:15001/reflect/mako?tpl=%s&asd=1&asd2=*&inj=*&inj2=*&inj3=*',
'force_level': [ 0, 0 ],
'injection_tag': '*',
'technique': 'R'
})
detect_template_injection(channel, [ Mako ])
del channel.data['os']
self.assertEqual(channel.data, self.expected_data)
def test_reflection_multiple_point_no_tag(self):
channel = Channel({
'url' : 'http://127.0.0.1:15001/reflect/mako?inj=asd&inj2=asd2',
'force_level': [ 0, 0 ],
'injection_tag': '*',
'technique': 'R'
})
detect_template_injection(channel, [ Mako ])
del channel.data['os']
self.assertEqual(channel.data, self.expected_data)
def test_no_reflection(self):
channel = Channel({
'url' : 'http://127.0.0.1:15001/reflect/mako?inj2=asd2',
'force_level': [ 0, 0 ],
'injection_tag': '*',
'technique': 'RT'
})
detect_template_injection(channel, [ Mako ])
self.assertEqual(channel.data, {})
def test_reflection_point_startswith(self):
channel = Channel({
'url' : 'http://127.0.0.1:15001/startswith/mako?inj=thismustexists*&startswith=thismustexists',
'force_level': [ 0, 0 ],
'injection_tag': '*',
'technique': 'R'
})
detect_template_injection(channel, [ Mako ])
del channel.data['os']
self.assertEqual(channel.data, self.expected_data)
def test_reflection_point_dont_startswith(self):
channel = Channel({
'url' : 'http://127.0.0.1:15001/startswith/mako?inj=*&startswith=thismustexists',
'force_level': [ 0, 0 ],
'injection_tag': '*',
'technique': 'R'
})
detect_template_injection(channel, [ Mako ])
self.assertEqual(channel.data, {})
def test_quotes(self):
channel = Channel({
'url' : 'http://127.0.0.1:15001/reflect/mako?inj=asd',
'force_level': [ 0, 0 ],
'injection_tag': '*',
'technique': 'RT'
})
obj = detect_template_injection(channel, [ Mako ])
result = obj.execute("""echo 1"2"'3'\\"\\'""")
self.assertEqual(result, """123"'""")
channel = Channel({
'url' : 'http://127.0.0.1:15001/blind/mako?inj=asd',
'force_level': [ 0, 0 ],
'injection_tag': '*',
'technique': 'RT'
})
obj = detect_template_injection(channel, [ Mako ])
self.assertTrue(obj.execute_blind("""echo 1"2"'3'\\"\\'"""))
def test_auth_reflection(self):
channel = Channel({
'url' : 'http://localhost:15001/reflect_cookieauth/mako?inj=asd*',
'force_level': [ 0, 0 ],
'headers' : [ 'Cookie: SID=SECRET' ],
'injection_tag': '*',
'technique': 'R'
})
detect_template_injection(channel, [ Mako ])
del channel.data['os']
self.assertEqual(channel.data, self.expected_data)
def test_wrong_auth_reflection(self):
channel = Channel({
'url' : 'http://localhost:15001/reflect_cookieauth/mako?inj=asd*',
'force_level': [ 0, 0 ],
'headers' : [ 'Cookie: SID=WRONGSECRET' ],
'injection_tag': '*',
'technique': 'R'
})
detect_template_injection(channel, [ Mako ])
self.assertEqual(channel.data, {}) |
yasoob/youtube-dl-GUI | refs/heads/master | youtube_dl/extractor/vgtv.py | 3 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .xstream import XstreamIE
from ..utils import (
ExtractorError,
float_or_none,
try_get,
)
class VGTVIE(XstreamIE):
IE_DESC = 'VGTV, BTTV, FTV, Aftenposten and Aftonbladet'
_GEO_BYPASS = False
_HOST_TO_APPNAME = {
'vgtv.no': 'vgtv',
'bt.no/tv': 'bttv',
'aftenbladet.no/tv': 'satv',
'fvn.no/fvntv': 'fvntv',
'aftenposten.no/webtv': 'aptv',
'ap.vgtv.no/webtv': 'aptv',
'tv.aftonbladet.se': 'abtv',
# obsolete URL schemas, kept in order to save one HTTP redirect
'tv.aftonbladet.se/abtv': 'abtv',
'www.aftonbladet.se/tv': 'abtv',
}
_APP_NAME_TO_VENDOR = {
'vgtv': 'vgtv',
'bttv': 'bt',
'satv': 'sa',
'fvntv': 'fvn',
'aptv': 'ap',
'abtv': 'ab',
}
_VALID_URL = r'''(?x)
(?:https?://(?:www\.)?
(?P<host>
%s
)
/?
(?:
(?:\#!/)?(?:video|live)/|
embed?.*id=|
a(?:rticles)?/
)|
(?P<appname>
%s
):)
(?P<id>\d+)
''' % ('|'.join(_HOST_TO_APPNAME.keys()), '|'.join(_APP_NAME_TO_VENDOR.keys()))
_TESTS = [
{
# streamType: vod
'url': 'http://www.vgtv.no/#!/video/84196/hevnen-er-soet-episode-10-abu',
'md5': 'b8be7a234cebb840c0d512c78013e02f',
'info_dict': {
'id': '84196',
'ext': 'mp4',
'title': 'Hevnen er søt: Episode 10 - Abu',
'description': 'md5:e25e4badb5f544b04341e14abdc72234',
'thumbnail': r're:^https?://.*\.jpg',
'duration': 648.000,
'timestamp': 1404626400,
'upload_date': '20140706',
'view_count': int,
},
},
{
# streamType: wasLive
'url': 'http://www.vgtv.no/#!/live/100764/opptak-vgtv-foelger-em-kvalifiseringen',
'info_dict': {
'id': '100764',
'ext': 'flv',
'title': 'OPPTAK: VGTV følger EM-kvalifiseringen',
'description': 'md5:3772d9c0dc2dff92a886b60039a7d4d3',
'thumbnail': r're:^https?://.*\.jpg',
'duration': 9103.0,
'timestamp': 1410113864,
'upload_date': '20140907',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
},
'skip': 'Video is no longer available',
},
{
# streamType: wasLive
'url': 'http://www.vgtv.no/#!/live/113063/direkte-v75-fra-solvalla',
'info_dict': {
'id': '113063',
'ext': 'mp4',
'title': 'V75 fra Solvalla 30.05.15',
'description': 'md5:b3743425765355855f88e096acc93231',
'thumbnail': r're:^https?://.*\.jpg',
'duration': 25966,
'timestamp': 1432975582,
'upload_date': '20150530',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
},
},
{
'url': 'http://www.aftenposten.no/webtv/#!/video/21039/trailer-sweatshop-i-can-t-take-any-more',
'md5': 'fd828cd29774a729bf4d4425fe192972',
'info_dict': {
'id': '21039',
'ext': 'mp4',
'title': 'TRAILER: «SWEATSHOP» - I can´t take any more',
'description': 'md5:21891f2b0dd7ec2f78d84a50e54f8238',
'duration': 66,
'timestamp': 1417002452,
'upload_date': '20141126',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
},
},
{
'url': 'http://www.bt.no/tv/#!/video/100250/norling-dette-er-forskjellen-paa-1-divisjon-og-eliteserien',
'only_matching': True,
},
{
'url': 'http://ap.vgtv.no/webtv#!/video/111084/de-nye-bysyklene-lettere-bedre-gir-stoerre-hjul-og-feste-til-mobil',
'only_matching': True,
},
{
# geoblocked
'url': 'http://www.vgtv.no/#!/video/127205/inside-the-mind-of-favela-funk',
'only_matching': True,
},
{
'url': 'https://tv.aftonbladet.se/video/36015/vulkanutbrott-i-rymden-nu-slapper-nasa-bilderna',
'only_matching': True,
},
{
'url': 'http://tv.aftonbladet.se/abtv/articles/36015',
'only_matching': True,
},
{
'url': 'https://www.aftonbladet.se/tv/a/36015',
'only_matching': True,
},
{
'url': 'abtv:140026',
'only_matching': True,
},
{
'url': 'http://www.vgtv.no/video/84196/hevnen-er-soet-episode-10-abu',
'only_matching': True,
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
host = mobj.group('host')
appname = self._HOST_TO_APPNAME[host] if host else mobj.group('appname')
vendor = self._APP_NAME_TO_VENDOR[appname]
data = self._download_json(
'http://svp.vg.no/svp/api/v1/%s/assets/%s?appName=%s-website'
% (vendor, video_id, appname),
video_id, 'Downloading media JSON')
if data.get('status') == 'inactive':
raise ExtractorError(
'Video %s is no longer available' % video_id, expected=True)
info = {
'formats': [],
}
if len(video_id) == 5:
if appname == 'bttv':
info = self._extract_video_info('btno', video_id)
streams = data['streamUrls']
stream_type = data.get('streamType')
is_live = stream_type == 'live'
formats = []
hls_url = streams.get('hls')
if hls_url:
formats.extend(self._extract_m3u8_formats(
hls_url, video_id, 'mp4',
entry_protocol='m3u8' if is_live else 'm3u8_native',
m3u8_id='hls', fatal=False))
hds_url = streams.get('hds')
if hds_url:
hdcore_sign = 'hdcore=3.7.0'
f4m_formats = self._extract_f4m_formats(
hds_url + '?%s' % hdcore_sign, video_id, f4m_id='hds', fatal=False)
if f4m_formats:
for entry in f4m_formats:
# URLs without the extra param induce an 404 error
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.append(entry)
mp4_urls = streams.get('pseudostreaming') or []
mp4_url = streams.get('mp4')
if mp4_url:
mp4_urls.append(mp4_url)
for mp4_url in mp4_urls:
format_info = {
'url': mp4_url,
}
mobj = re.search(r'(\d+)_(\d+)_(\d+)', mp4_url)
if mobj:
tbr = int(mobj.group(3))
format_info.update({
'width': int(mobj.group(1)),
'height': int(mobj.group(2)),
'tbr': tbr,
'format_id': 'mp4-%s' % tbr,
})
formats.append(format_info)
info['formats'].extend(formats)
if not info['formats']:
properties = try_get(
data, lambda x: x['streamConfiguration']['properties'], list)
if properties and 'geoblocked' in properties:
raise self.raise_geo_restricted(
countries=[host.rpartition('.')[-1].partition('/')[0].upper()])
self._sort_formats(info['formats'])
info.update({
'id': video_id,
'title': self._live_title(data['title']) if is_live else data['title'],
'description': data['description'],
'thumbnail': data['images']['main'] + '?t[]=900x506q80',
'timestamp': data['published'],
'duration': float_or_none(data['duration'], 1000),
'view_count': data['displays'],
'is_live': is_live,
})
return info
class BTArticleIE(InfoExtractor):
IE_NAME = 'bt:article'
IE_DESC = 'Bergens Tidende Articles'
_VALID_URL = r'https?://(?:www\.)?bt\.no/(?:[^/]+/)+(?P<id>[^/]+)-\d+\.html'
_TEST = {
'url': 'http://www.bt.no/nyheter/lokalt/Kjemper-for-internatet-1788214.html',
'md5': '2acbe8ad129b3469d5ae51b1158878df',
'info_dict': {
'id': '23199',
'ext': 'mp4',
'title': 'Alrekstad internat',
'description': 'md5:dc81a9056c874fedb62fc48a300dac58',
'thumbnail': r're:^https?://.*\.jpg',
'duration': 191,
'timestamp': 1289991323,
'upload_date': '20101117',
'view_count': int,
},
}
def _real_extract(self, url):
webpage = self._download_webpage(url, self._match_id(url))
video_id = self._search_regex(
r'<video[^>]+data-id="(\d+)"', webpage, 'video id')
return self.url_result('bttv:%s' % video_id, 'VGTV')
class BTVestlendingenIE(InfoExtractor):
IE_NAME = 'bt:vestlendingen'
IE_DESC = 'Bergens Tidende - Vestlendingen'
_VALID_URL = r'https?://(?:www\.)?bt\.no/spesial/vestlendingen/#!/(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.bt.no/spesial/vestlendingen/#!/86588',
'md5': 'd7d17e3337dc80de6d3a540aefbe441b',
'info_dict': {
'id': '86588',
'ext': 'mov',
'title': 'Otto Wollertsen',
'description': 'Vestlendingen Otto Fredrik Wollertsen',
'timestamp': 1430473209,
'upload_date': '20150501',
},
'skip': '404 Error',
}, {
'url': 'http://www.bt.no/spesial/vestlendingen/#!/86255',
'md5': 'a2893f8632e96389f4bdf36aa9463ceb',
'info_dict': {
'id': '86255',
'ext': 'mov',
'title': 'Du må tåle å fryse og være sulten',
'description': 'md5:b8046f4d022d5830ddab04865791d063',
'upload_date': '20150321',
'timestamp': 1426942023,
},
}]
def _real_extract(self, url):
return self.url_result('bttv:%s' % self._match_id(url), 'VGTV')
|
marcocaccin/scikit-learn | refs/heads/master | sklearn/semi_supervised/__init__.py | 436 | """
The :mod:`sklearn.semi_supervised` module implements semi-supervised learning
algorithms. These algorithms utilized small amounts of labeled data and large
amounts of unlabeled data for classification tasks. This module includes Label
Propagation.
"""
from .label_propagation import LabelPropagation, LabelSpreading
__all__ = ['LabelPropagation', 'LabelSpreading']
|
sectubs/2016s-SEP | refs/heads/master | gr-orcatun/docs/doxygen/doxyxml/generated/compoundsuper.py | 348 | #!/usr/bin/env python
#
# Generated Thu Jun 11 18:44:25 2009 by generateDS.py.
#
import sys
import getopt
from string import lower as str_lower
from xml.dom import minidom
from xml.dom import Node
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper:
def format_string(self, input_data, input_name=''):
return input_data
def format_integer(self, input_data, input_name=''):
return '%d' % input_data
def format_float(self, input_data, input_name=''):
return '%f' % input_data
def format_double(self, input_data, input_name=''):
return '%e' % input_data
def format_boolean(self, input_data, input_name=''):
return '%s' % input_data
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
#
# Support/utility functions.
#
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace):
if self.category == MixedContainer.CategoryText:
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace,name)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write('MixedContainer(%d, %d, "%s",\n' % \
(self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class _MemberSpec(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type(self): return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
#
# Data representation classes.
#
class DoxygenType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, version=None, compounddef=None):
self.version = version
self.compounddef = compounddef
def factory(*args_, **kwargs_):
if DoxygenType.subclass:
return DoxygenType.subclass(*args_, **kwargs_)
else:
return DoxygenType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_compounddef(self): return self.compounddef
def set_compounddef(self, compounddef): self.compounddef = compounddef
def get_version(self): return self.version
def set_version(self, version): self.version = version
def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='DoxygenType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'):
outfile.write(' version=%s' % (quote_attrib(self.version), ))
def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'):
if self.compounddef:
self.compounddef.export(outfile, level, namespace_, name_='compounddef')
def hasContent_(self):
if (
self.compounddef is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DoxygenType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.version is not None:
showIndent(outfile, level)
outfile.write('version = "%s",\n' % (self.version,))
def exportLiteralChildren(self, outfile, level, name_):
if self.compounddef:
showIndent(outfile, level)
outfile.write('compounddef=model_.compounddefType(\n')
self.compounddef.exportLiteral(outfile, level, name_='compounddef')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('version'):
self.version = attrs.get('version').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'compounddef':
obj_ = compounddefType.factory()
obj_.build(child_)
self.set_compounddef(obj_)
# end class DoxygenType
class compounddefType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, kind=None, prot=None, id=None, compoundname=None, title=None, basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None):
self.kind = kind
self.prot = prot
self.id = id
self.compoundname = compoundname
self.title = title
if basecompoundref is None:
self.basecompoundref = []
else:
self.basecompoundref = basecompoundref
if derivedcompoundref is None:
self.derivedcompoundref = []
else:
self.derivedcompoundref = derivedcompoundref
if includes is None:
self.includes = []
else:
self.includes = includes
if includedby is None:
self.includedby = []
else:
self.includedby = includedby
self.incdepgraph = incdepgraph
self.invincdepgraph = invincdepgraph
if innerdir is None:
self.innerdir = []
else:
self.innerdir = innerdir
if innerfile is None:
self.innerfile = []
else:
self.innerfile = innerfile
if innerclass is None:
self.innerclass = []
else:
self.innerclass = innerclass
if innernamespace is None:
self.innernamespace = []
else:
self.innernamespace = innernamespace
if innerpage is None:
self.innerpage = []
else:
self.innerpage = innerpage
if innergroup is None:
self.innergroup = []
else:
self.innergroup = innergroup
self.templateparamlist = templateparamlist
if sectiondef is None:
self.sectiondef = []
else:
self.sectiondef = sectiondef
self.briefdescription = briefdescription
self.detaileddescription = detaileddescription
self.inheritancegraph = inheritancegraph
self.collaborationgraph = collaborationgraph
self.programlisting = programlisting
self.location = location
self.listofallmembers = listofallmembers
def factory(*args_, **kwargs_):
if compounddefType.subclass:
return compounddefType.subclass(*args_, **kwargs_)
else:
return compounddefType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_compoundname(self): return self.compoundname
def set_compoundname(self, compoundname): self.compoundname = compoundname
def get_title(self): return self.title
def set_title(self, title): self.title = title
def get_basecompoundref(self): return self.basecompoundref
def set_basecompoundref(self, basecompoundref): self.basecompoundref = basecompoundref
def add_basecompoundref(self, value): self.basecompoundref.append(value)
def insert_basecompoundref(self, index, value): self.basecompoundref[index] = value
def get_derivedcompoundref(self): return self.derivedcompoundref
def set_derivedcompoundref(self, derivedcompoundref): self.derivedcompoundref = derivedcompoundref
def add_derivedcompoundref(self, value): self.derivedcompoundref.append(value)
def insert_derivedcompoundref(self, index, value): self.derivedcompoundref[index] = value
def get_includes(self): return self.includes
def set_includes(self, includes): self.includes = includes
def add_includes(self, value): self.includes.append(value)
def insert_includes(self, index, value): self.includes[index] = value
def get_includedby(self): return self.includedby
def set_includedby(self, includedby): self.includedby = includedby
def add_includedby(self, value): self.includedby.append(value)
def insert_includedby(self, index, value): self.includedby[index] = value
def get_incdepgraph(self): return self.incdepgraph
def set_incdepgraph(self, incdepgraph): self.incdepgraph = incdepgraph
def get_invincdepgraph(self): return self.invincdepgraph
def set_invincdepgraph(self, invincdepgraph): self.invincdepgraph = invincdepgraph
def get_innerdir(self): return self.innerdir
def set_innerdir(self, innerdir): self.innerdir = innerdir
def add_innerdir(self, value): self.innerdir.append(value)
def insert_innerdir(self, index, value): self.innerdir[index] = value
def get_innerfile(self): return self.innerfile
def set_innerfile(self, innerfile): self.innerfile = innerfile
def add_innerfile(self, value): self.innerfile.append(value)
def insert_innerfile(self, index, value): self.innerfile[index] = value
def get_innerclass(self): return self.innerclass
def set_innerclass(self, innerclass): self.innerclass = innerclass
def add_innerclass(self, value): self.innerclass.append(value)
def insert_innerclass(self, index, value): self.innerclass[index] = value
def get_innernamespace(self): return self.innernamespace
def set_innernamespace(self, innernamespace): self.innernamespace = innernamespace
def add_innernamespace(self, value): self.innernamespace.append(value)
def insert_innernamespace(self, index, value): self.innernamespace[index] = value
def get_innerpage(self): return self.innerpage
def set_innerpage(self, innerpage): self.innerpage = innerpage
def add_innerpage(self, value): self.innerpage.append(value)
def insert_innerpage(self, index, value): self.innerpage[index] = value
def get_innergroup(self): return self.innergroup
def set_innergroup(self, innergroup): self.innergroup = innergroup
def add_innergroup(self, value): self.innergroup.append(value)
def insert_innergroup(self, index, value): self.innergroup[index] = value
def get_templateparamlist(self): return self.templateparamlist
def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist
def get_sectiondef(self): return self.sectiondef
def set_sectiondef(self, sectiondef): self.sectiondef = sectiondef
def add_sectiondef(self, value): self.sectiondef.append(value)
def insert_sectiondef(self, index, value): self.sectiondef[index] = value
def get_briefdescription(self): return self.briefdescription
def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
def get_detaileddescription(self): return self.detaileddescription
def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription
def get_inheritancegraph(self): return self.inheritancegraph
def set_inheritancegraph(self, inheritancegraph): self.inheritancegraph = inheritancegraph
def get_collaborationgraph(self): return self.collaborationgraph
def set_collaborationgraph(self, collaborationgraph): self.collaborationgraph = collaborationgraph
def get_programlisting(self): return self.programlisting
def set_programlisting(self, programlisting): self.programlisting = programlisting
def get_location(self): return self.location
def set_location(self, location): self.location = location
def get_listofallmembers(self): return self.listofallmembers
def set_listofallmembers(self, listofallmembers): self.listofallmembers = listofallmembers
def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
def get_prot(self): return self.prot
def set_prot(self, prot): self.prot = prot
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, outfile, level, namespace_='', name_='compounddefType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='compounddefType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='compounddefType'):
if self.kind is not None:
outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
if self.prot is not None:
outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
if self.id is not None:
outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='compounddefType'):
if self.compoundname is not None:
showIndent(outfile, level)
outfile.write('<%scompoundname>%s</%scompoundname>\n' % (namespace_, self.format_string(quote_xml(self.compoundname).encode(ExternalEncoding), input_name='compoundname'), namespace_))
if self.title is not None:
showIndent(outfile, level)
outfile.write('<%stitle>%s</%stitle>\n' % (namespace_, self.format_string(quote_xml(self.title).encode(ExternalEncoding), input_name='title'), namespace_))
for basecompoundref_ in self.basecompoundref:
basecompoundref_.export(outfile, level, namespace_, name_='basecompoundref')
for derivedcompoundref_ in self.derivedcompoundref:
derivedcompoundref_.export(outfile, level, namespace_, name_='derivedcompoundref')
for includes_ in self.includes:
includes_.export(outfile, level, namespace_, name_='includes')
for includedby_ in self.includedby:
includedby_.export(outfile, level, namespace_, name_='includedby')
if self.incdepgraph:
self.incdepgraph.export(outfile, level, namespace_, name_='incdepgraph')
if self.invincdepgraph:
self.invincdepgraph.export(outfile, level, namespace_, name_='invincdepgraph')
for innerdir_ in self.innerdir:
innerdir_.export(outfile, level, namespace_, name_='innerdir')
for innerfile_ in self.innerfile:
innerfile_.export(outfile, level, namespace_, name_='innerfile')
for innerclass_ in self.innerclass:
innerclass_.export(outfile, level, namespace_, name_='innerclass')
for innernamespace_ in self.innernamespace:
innernamespace_.export(outfile, level, namespace_, name_='innernamespace')
for innerpage_ in self.innerpage:
innerpage_.export(outfile, level, namespace_, name_='innerpage')
for innergroup_ in self.innergroup:
innergroup_.export(outfile, level, namespace_, name_='innergroup')
if self.templateparamlist:
self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist')
for sectiondef_ in self.sectiondef:
sectiondef_.export(outfile, level, namespace_, name_='sectiondef')
if self.briefdescription:
self.briefdescription.export(outfile, level, namespace_, name_='briefdescription')
if self.detaileddescription:
self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription')
if self.inheritancegraph:
self.inheritancegraph.export(outfile, level, namespace_, name_='inheritancegraph')
if self.collaborationgraph:
self.collaborationgraph.export(outfile, level, namespace_, name_='collaborationgraph')
if self.programlisting:
self.programlisting.export(outfile, level, namespace_, name_='programlisting')
if self.location:
self.location.export(outfile, level, namespace_, name_='location')
if self.listofallmembers:
self.listofallmembers.export(outfile, level, namespace_, name_='listofallmembers')
def hasContent_(self):
if (
self.compoundname is not None or
self.title is not None or
self.basecompoundref is not None or
self.derivedcompoundref is not None or
self.includes is not None or
self.includedby is not None or
self.incdepgraph is not None or
self.invincdepgraph is not None or
self.innerdir is not None or
self.innerfile is not None or
self.innerclass is not None or
self.innernamespace is not None or
self.innerpage is not None or
self.innergroup is not None or
self.templateparamlist is not None or
self.sectiondef is not None or
self.briefdescription is not None or
self.detaileddescription is not None or
self.inheritancegraph is not None or
self.collaborationgraph is not None or
self.programlisting is not None or
self.location is not None or
self.listofallmembers is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='compounddefType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.kind is not None:
showIndent(outfile, level)
outfile.write('kind = "%s",\n' % (self.kind,))
if self.prot is not None:
showIndent(outfile, level)
outfile.write('prot = "%s",\n' % (self.prot,))
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('compoundname=%s,\n' % quote_python(self.compoundname).encode(ExternalEncoding))
if self.title:
showIndent(outfile, level)
outfile.write('title=model_.xsd_string(\n')
self.title.exportLiteral(outfile, level, name_='title')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('basecompoundref=[\n')
level += 1
for basecompoundref in self.basecompoundref:
showIndent(outfile, level)
outfile.write('model_.basecompoundref(\n')
basecompoundref.exportLiteral(outfile, level, name_='basecompoundref')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('derivedcompoundref=[\n')
level += 1
for derivedcompoundref in self.derivedcompoundref:
showIndent(outfile, level)
outfile.write('model_.derivedcompoundref(\n')
derivedcompoundref.exportLiteral(outfile, level, name_='derivedcompoundref')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('includes=[\n')
level += 1
for includes in self.includes:
showIndent(outfile, level)
outfile.write('model_.includes(\n')
includes.exportLiteral(outfile, level, name_='includes')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('includedby=[\n')
level += 1
for includedby in self.includedby:
showIndent(outfile, level)
outfile.write('model_.includedby(\n')
includedby.exportLiteral(outfile, level, name_='includedby')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.incdepgraph:
showIndent(outfile, level)
outfile.write('incdepgraph=model_.graphType(\n')
self.incdepgraph.exportLiteral(outfile, level, name_='incdepgraph')
showIndent(outfile, level)
outfile.write('),\n')
if self.invincdepgraph:
showIndent(outfile, level)
outfile.write('invincdepgraph=model_.graphType(\n')
self.invincdepgraph.exportLiteral(outfile, level, name_='invincdepgraph')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('innerdir=[\n')
level += 1
for innerdir in self.innerdir:
showIndent(outfile, level)
outfile.write('model_.innerdir(\n')
innerdir.exportLiteral(outfile, level, name_='innerdir')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('innerfile=[\n')
level += 1
for innerfile in self.innerfile:
showIndent(outfile, level)
outfile.write('model_.innerfile(\n')
innerfile.exportLiteral(outfile, level, name_='innerfile')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('innerclass=[\n')
level += 1
for innerclass in self.innerclass:
showIndent(outfile, level)
outfile.write('model_.innerclass(\n')
innerclass.exportLiteral(outfile, level, name_='innerclass')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('innernamespace=[\n')
level += 1
for innernamespace in self.innernamespace:
showIndent(outfile, level)
outfile.write('model_.innernamespace(\n')
innernamespace.exportLiteral(outfile, level, name_='innernamespace')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('innerpage=[\n')
level += 1
for innerpage in self.innerpage:
showIndent(outfile, level)
outfile.write('model_.innerpage(\n')
innerpage.exportLiteral(outfile, level, name_='innerpage')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('innergroup=[\n')
level += 1
for innergroup in self.innergroup:
showIndent(outfile, level)
outfile.write('model_.innergroup(\n')
innergroup.exportLiteral(outfile, level, name_='innergroup')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.templateparamlist:
showIndent(outfile, level)
outfile.write('templateparamlist=model_.templateparamlistType(\n')
self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('sectiondef=[\n')
level += 1
for sectiondef in self.sectiondef:
showIndent(outfile, level)
outfile.write('model_.sectiondef(\n')
sectiondef.exportLiteral(outfile, level, name_='sectiondef')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.briefdescription:
showIndent(outfile, level)
outfile.write('briefdescription=model_.descriptionType(\n')
self.briefdescription.exportLiteral(outfile, level, name_='briefdescription')
showIndent(outfile, level)
outfile.write('),\n')
if self.detaileddescription:
showIndent(outfile, level)
outfile.write('detaileddescription=model_.descriptionType(\n')
self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription')
showIndent(outfile, level)
outfile.write('),\n')
if self.inheritancegraph:
showIndent(outfile, level)
outfile.write('inheritancegraph=model_.graphType(\n')
self.inheritancegraph.exportLiteral(outfile, level, name_='inheritancegraph')
showIndent(outfile, level)
outfile.write('),\n')
if self.collaborationgraph:
showIndent(outfile, level)
outfile.write('collaborationgraph=model_.graphType(\n')
self.collaborationgraph.exportLiteral(outfile, level, name_='collaborationgraph')
showIndent(outfile, level)
outfile.write('),\n')
if self.programlisting:
showIndent(outfile, level)
outfile.write('programlisting=model_.listingType(\n')
self.programlisting.exportLiteral(outfile, level, name_='programlisting')
showIndent(outfile, level)
outfile.write('),\n')
if self.location:
showIndent(outfile, level)
outfile.write('location=model_.locationType(\n')
self.location.exportLiteral(outfile, level, name_='location')
showIndent(outfile, level)
outfile.write('),\n')
if self.listofallmembers:
showIndent(outfile, level)
outfile.write('listofallmembers=model_.listofallmembersType(\n')
self.listofallmembers.exportLiteral(outfile, level, name_='listofallmembers')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
if attrs.get('prot'):
self.prot = attrs.get('prot').value
if attrs.get('id'):
self.id = attrs.get('id').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'compoundname':
compoundname_ = ''
for text__content_ in child_.childNodes:
compoundname_ += text__content_.nodeValue
self.compoundname = compoundname_
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'title':
obj_ = docTitleType.factory()
obj_.build(child_)
self.set_title(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'basecompoundref':
obj_ = compoundRefType.factory()
obj_.build(child_)
self.basecompoundref.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'derivedcompoundref':
obj_ = compoundRefType.factory()
obj_.build(child_)
self.derivedcompoundref.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'includes':
obj_ = incType.factory()
obj_.build(child_)
self.includes.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'includedby':
obj_ = incType.factory()
obj_.build(child_)
self.includedby.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'incdepgraph':
obj_ = graphType.factory()
obj_.build(child_)
self.set_incdepgraph(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'invincdepgraph':
obj_ = graphType.factory()
obj_.build(child_)
self.set_invincdepgraph(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'innerdir':
obj_ = refType.factory()
obj_.build(child_)
self.innerdir.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'innerfile':
obj_ = refType.factory()
obj_.build(child_)
self.innerfile.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'innerclass':
obj_ = refType.factory()
obj_.build(child_)
self.innerclass.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'innernamespace':
obj_ = refType.factory()
obj_.build(child_)
self.innernamespace.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'innerpage':
obj_ = refType.factory()
obj_.build(child_)
self.innerpage.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'innergroup':
obj_ = refType.factory()
obj_.build(child_)
self.innergroup.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'templateparamlist':
obj_ = templateparamlistType.factory()
obj_.build(child_)
self.set_templateparamlist(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'sectiondef':
obj_ = sectiondefType.factory()
obj_.build(child_)
self.sectiondef.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'briefdescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_briefdescription(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'detaileddescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_detaileddescription(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'inheritancegraph':
obj_ = graphType.factory()
obj_.build(child_)
self.set_inheritancegraph(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'collaborationgraph':
obj_ = graphType.factory()
obj_.build(child_)
self.set_collaborationgraph(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'programlisting':
obj_ = listingType.factory()
obj_.build(child_)
self.set_programlisting(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'location':
obj_ = locationType.factory()
obj_.build(child_)
self.set_location(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'listofallmembers':
obj_ = listofallmembersType.factory()
obj_.build(child_)
self.set_listofallmembers(obj_)
# end class compounddefType
class listofallmembersType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, member=None):
if member is None:
self.member = []
else:
self.member = member
def factory(*args_, **kwargs_):
if listofallmembersType.subclass:
return listofallmembersType.subclass(*args_, **kwargs_)
else:
return listofallmembersType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_member(self): return self.member
def set_member(self, member): self.member = member
def add_member(self, value): self.member.append(value)
def insert_member(self, index, value): self.member[index] = value
def export(self, outfile, level, namespace_='', name_='listofallmembersType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='listofallmembersType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='listofallmembersType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='listofallmembersType'):
for member_ in self.member:
member_.export(outfile, level, namespace_, name_='member')
def hasContent_(self):
if (
self.member is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='listofallmembersType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('member=[\n')
level += 1
for member in self.member:
showIndent(outfile, level)
outfile.write('model_.member(\n')
member.exportLiteral(outfile, level, name_='member')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'member':
obj_ = memberRefType.factory()
obj_.build(child_)
self.member.append(obj_)
# end class listofallmembersType
class memberRefType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope=None, name=None):
self.virt = virt
self.prot = prot
self.refid = refid
self.ambiguityscope = ambiguityscope
self.scope = scope
self.name = name
def factory(*args_, **kwargs_):
if memberRefType.subclass:
return memberRefType.subclass(*args_, **kwargs_)
else:
return memberRefType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_scope(self): return self.scope
def set_scope(self, scope): self.scope = scope
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_virt(self): return self.virt
def set_virt(self, virt): self.virt = virt
def get_prot(self): return self.prot
def set_prot(self, prot): self.prot = prot
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def get_ambiguityscope(self): return self.ambiguityscope
def set_ambiguityscope(self, ambiguityscope): self.ambiguityscope = ambiguityscope
def export(self, outfile, level, namespace_='', name_='memberRefType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='memberRefType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='memberRefType'):
if self.virt is not None:
outfile.write(' virt=%s' % (quote_attrib(self.virt), ))
if self.prot is not None:
outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
if self.refid is not None:
outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
if self.ambiguityscope is not None:
outfile.write(' ambiguityscope=%s' % (self.format_string(quote_attrib(self.ambiguityscope).encode(ExternalEncoding), input_name='ambiguityscope'), ))
def exportChildren(self, outfile, level, namespace_='', name_='memberRefType'):
if self.scope is not None:
showIndent(outfile, level)
outfile.write('<%sscope>%s</%sscope>\n' % (namespace_, self.format_string(quote_xml(self.scope).encode(ExternalEncoding), input_name='scope'), namespace_))
if self.name is not None:
showIndent(outfile, level)
outfile.write('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
def hasContent_(self):
if (
self.scope is not None or
self.name is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='memberRefType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.virt is not None:
showIndent(outfile, level)
outfile.write('virt = "%s",\n' % (self.virt,))
if self.prot is not None:
showIndent(outfile, level)
outfile.write('prot = "%s",\n' % (self.prot,))
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
if self.ambiguityscope is not None:
showIndent(outfile, level)
outfile.write('ambiguityscope = %s,\n' % (self.ambiguityscope,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('scope=%s,\n' % quote_python(self.scope).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('virt'):
self.virt = attrs.get('virt').value
if attrs.get('prot'):
self.prot = attrs.get('prot').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
if attrs.get('ambiguityscope'):
self.ambiguityscope = attrs.get('ambiguityscope').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'scope':
scope_ = ''
for text__content_ in child_.childNodes:
scope_ += text__content_.nodeValue
self.scope = scope_
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'name':
name_ = ''
for text__content_ in child_.childNodes:
name_ += text__content_.nodeValue
self.name = name_
# end class memberRefType
class scope(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if scope.subclass:
return scope.subclass(*args_, **kwargs_)
else:
return scope(*args_, **kwargs_)
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='scope', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='scope')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='scope'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='scope'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='scope'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class scope
class name(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if name.subclass:
return name.subclass(*args_, **kwargs_)
else:
return name(*args_, **kwargs_)
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='name', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='name')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='name'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='name'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='name'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class name
class compoundRefType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
self.virt = virt
self.prot = prot
self.refid = refid
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if compoundRefType.subclass:
return compoundRefType.subclass(*args_, **kwargs_)
else:
return compoundRefType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_virt(self): return self.virt
def set_virt(self, virt): self.virt = virt
def get_prot(self): return self.prot
def set_prot(self, prot): self.prot = prot
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='compoundRefType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='compoundRefType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='compoundRefType'):
if self.virt is not None:
outfile.write(' virt=%s' % (quote_attrib(self.virt), ))
if self.prot is not None:
outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
if self.refid is not None:
outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
def exportChildren(self, outfile, level, namespace_='', name_='compoundRefType'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='compoundRefType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.virt is not None:
showIndent(outfile, level)
outfile.write('virt = "%s",\n' % (self.virt,))
if self.prot is not None:
showIndent(outfile, level)
outfile.write('prot = "%s",\n' % (self.prot,))
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('virt'):
self.virt = attrs.get('virt').value
if attrs.get('prot'):
self.prot = attrs.get('prot').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class compoundRefType
class reimplementType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None):
self.refid = refid
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if reimplementType.subclass:
return reimplementType.subclass(*args_, **kwargs_)
else:
return reimplementType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='reimplementType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='reimplementType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='reimplementType'):
if self.refid is not None:
outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
def exportChildren(self, outfile, level, namespace_='', name_='reimplementType'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='reimplementType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('refid'):
self.refid = attrs.get('refid').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class reimplementType
class incType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
self.local = local
self.refid = refid
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if incType.subclass:
return incType.subclass(*args_, **kwargs_)
else:
return incType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_local(self): return self.local
def set_local(self, local): self.local = local
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='incType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='incType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='incType'):
if self.local is not None:
outfile.write(' local=%s' % (quote_attrib(self.local), ))
if self.refid is not None:
outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
def exportChildren(self, outfile, level, namespace_='', name_='incType'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='incType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.local is not None:
showIndent(outfile, level)
outfile.write('local = "%s",\n' % (self.local,))
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('local'):
self.local = attrs.get('local').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class incType
class refType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
self.prot = prot
self.refid = refid
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if refType.subclass:
return refType.subclass(*args_, **kwargs_)
else:
return refType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_prot(self): return self.prot
def set_prot(self, prot): self.prot = prot
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='refType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='refType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='refType'):
if self.prot is not None:
outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
if self.refid is not None:
outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
def exportChildren(self, outfile, level, namespace_='', name_='refType'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='refType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.prot is not None:
showIndent(outfile, level)
outfile.write('prot = "%s",\n' % (self.prot,))
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('prot'):
self.prot = attrs.get('prot').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class refType
class refTextType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
self.refid = refid
self.kindref = kindref
self.external = external
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if refTextType.subclass:
return refTextType.subclass(*args_, **kwargs_)
else:
return refTextType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def get_kindref(self): return self.kindref
def set_kindref(self, kindref): self.kindref = kindref
def get_external(self): return self.external
def set_external(self, external): self.external = external
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='refTextType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='refTextType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='refTextType'):
if self.refid is not None:
outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
if self.kindref is not None:
outfile.write(' kindref=%s' % (quote_attrib(self.kindref), ))
if self.external is not None:
outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), ))
def exportChildren(self, outfile, level, namespace_='', name_='refTextType'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='refTextType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
if self.kindref is not None:
showIndent(outfile, level)
outfile.write('kindref = "%s",\n' % (self.kindref,))
if self.external is not None:
showIndent(outfile, level)
outfile.write('external = %s,\n' % (self.external,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('refid'):
self.refid = attrs.get('refid').value
if attrs.get('kindref'):
self.kindref = attrs.get('kindref').value
if attrs.get('external'):
self.external = attrs.get('external').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class refTextType
class sectiondefType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, kind=None, header=None, description=None, memberdef=None):
self.kind = kind
self.header = header
self.description = description
if memberdef is None:
self.memberdef = []
else:
self.memberdef = memberdef
def factory(*args_, **kwargs_):
if sectiondefType.subclass:
return sectiondefType.subclass(*args_, **kwargs_)
else:
return sectiondefType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_header(self): return self.header
def set_header(self, header): self.header = header
def get_description(self): return self.description
def set_description(self, description): self.description = description
def get_memberdef(self): return self.memberdef
def set_memberdef(self, memberdef): self.memberdef = memberdef
def add_memberdef(self, value): self.memberdef.append(value)
def insert_memberdef(self, index, value): self.memberdef[index] = value
def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
def export(self, outfile, level, namespace_='', name_='sectiondefType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='sectiondefType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='sectiondefType'):
if self.kind is not None:
outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
def exportChildren(self, outfile, level, namespace_='', name_='sectiondefType'):
if self.header is not None:
showIndent(outfile, level)
outfile.write('<%sheader>%s</%sheader>\n' % (namespace_, self.format_string(quote_xml(self.header).encode(ExternalEncoding), input_name='header'), namespace_))
if self.description:
self.description.export(outfile, level, namespace_, name_='description')
for memberdef_ in self.memberdef:
memberdef_.export(outfile, level, namespace_, name_='memberdef')
def hasContent_(self):
if (
self.header is not None or
self.description is not None or
self.memberdef is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='sectiondefType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.kind is not None:
showIndent(outfile, level)
outfile.write('kind = "%s",\n' % (self.kind,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('header=%s,\n' % quote_python(self.header).encode(ExternalEncoding))
if self.description:
showIndent(outfile, level)
outfile.write('description=model_.descriptionType(\n')
self.description.exportLiteral(outfile, level, name_='description')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('memberdef=[\n')
level += 1
for memberdef in self.memberdef:
showIndent(outfile, level)
outfile.write('model_.memberdef(\n')
memberdef.exportLiteral(outfile, level, name_='memberdef')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'header':
header_ = ''
for text__content_ in child_.childNodes:
header_ += text__content_.nodeValue
self.header = header_
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'description':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_description(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'memberdef':
obj_ = memberdefType.factory()
obj_.build(child_)
self.memberdef.append(obj_)
# end class sectiondefType
class memberdefType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, initonly=None, kind=None, volatile=None, const=None, raisexx=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition=None, argsstring=None, name=None, read=None, write=None, bitfield=None, reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None):
self.initonly = initonly
self.kind = kind
self.volatile = volatile
self.const = const
self.raisexx = raisexx
self.virt = virt
self.readable = readable
self.prot = prot
self.explicit = explicit
self.new = new
self.final = final
self.writable = writable
self.add = add
self.static = static
self.remove = remove
self.sealed = sealed
self.mutable = mutable
self.gettable = gettable
self.inline = inline
self.settable = settable
self.id = id
self.templateparamlist = templateparamlist
self.type_ = type_
self.definition = definition
self.argsstring = argsstring
self.name = name
self.read = read
self.write = write
self.bitfield = bitfield
if reimplements is None:
self.reimplements = []
else:
self.reimplements = reimplements
if reimplementedby is None:
self.reimplementedby = []
else:
self.reimplementedby = reimplementedby
if param is None:
self.param = []
else:
self.param = param
if enumvalue is None:
self.enumvalue = []
else:
self.enumvalue = enumvalue
self.initializer = initializer
self.exceptions = exceptions
self.briefdescription = briefdescription
self.detaileddescription = detaileddescription
self.inbodydescription = inbodydescription
self.location = location
if references is None:
self.references = []
else:
self.references = references
if referencedby is None:
self.referencedby = []
else:
self.referencedby = referencedby
def factory(*args_, **kwargs_):
if memberdefType.subclass:
return memberdefType.subclass(*args_, **kwargs_)
else:
return memberdefType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_templateparamlist(self): return self.templateparamlist
def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_definition(self): return self.definition
def set_definition(self, definition): self.definition = definition
def get_argsstring(self): return self.argsstring
def set_argsstring(self, argsstring): self.argsstring = argsstring
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_read(self): return self.read
def set_read(self, read): self.read = read
def get_write(self): return self.write
def set_write(self, write): self.write = write
def get_bitfield(self): return self.bitfield
def set_bitfield(self, bitfield): self.bitfield = bitfield
def get_reimplements(self): return self.reimplements
def set_reimplements(self, reimplements): self.reimplements = reimplements
def add_reimplements(self, value): self.reimplements.append(value)
def insert_reimplements(self, index, value): self.reimplements[index] = value
def get_reimplementedby(self): return self.reimplementedby
def set_reimplementedby(self, reimplementedby): self.reimplementedby = reimplementedby
def add_reimplementedby(self, value): self.reimplementedby.append(value)
def insert_reimplementedby(self, index, value): self.reimplementedby[index] = value
def get_param(self): return self.param
def set_param(self, param): self.param = param
def add_param(self, value): self.param.append(value)
def insert_param(self, index, value): self.param[index] = value
def get_enumvalue(self): return self.enumvalue
def set_enumvalue(self, enumvalue): self.enumvalue = enumvalue
def add_enumvalue(self, value): self.enumvalue.append(value)
def insert_enumvalue(self, index, value): self.enumvalue[index] = value
def get_initializer(self): return self.initializer
def set_initializer(self, initializer): self.initializer = initializer
def get_exceptions(self): return self.exceptions
def set_exceptions(self, exceptions): self.exceptions = exceptions
def get_briefdescription(self): return self.briefdescription
def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
def get_detaileddescription(self): return self.detaileddescription
def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription
def get_inbodydescription(self): return self.inbodydescription
def set_inbodydescription(self, inbodydescription): self.inbodydescription = inbodydescription
def get_location(self): return self.location
def set_location(self, location): self.location = location
def get_references(self): return self.references
def set_references(self, references): self.references = references
def add_references(self, value): self.references.append(value)
def insert_references(self, index, value): self.references[index] = value
def get_referencedby(self): return self.referencedby
def set_referencedby(self, referencedby): self.referencedby = referencedby
def add_referencedby(self, value): self.referencedby.append(value)
def insert_referencedby(self, index, value): self.referencedby[index] = value
def get_initonly(self): return self.initonly
def set_initonly(self, initonly): self.initonly = initonly
def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
def get_volatile(self): return self.volatile
def set_volatile(self, volatile): self.volatile = volatile
def get_const(self): return self.const
def set_const(self, const): self.const = const
def get_raise(self): return self.raisexx
def set_raise(self, raisexx): self.raisexx = raisexx
def get_virt(self): return self.virt
def set_virt(self, virt): self.virt = virt
def get_readable(self): return self.readable
def set_readable(self, readable): self.readable = readable
def get_prot(self): return self.prot
def set_prot(self, prot): self.prot = prot
def get_explicit(self): return self.explicit
def set_explicit(self, explicit): self.explicit = explicit
def get_new(self): return self.new
def set_new(self, new): self.new = new
def get_final(self): return self.final
def set_final(self, final): self.final = final
def get_writable(self): return self.writable
def set_writable(self, writable): self.writable = writable
def get_add(self): return self.add
def set_add(self, add): self.add = add
def get_static(self): return self.static
def set_static(self, static): self.static = static
def get_remove(self): return self.remove
def set_remove(self, remove): self.remove = remove
def get_sealed(self): return self.sealed
def set_sealed(self, sealed): self.sealed = sealed
def get_mutable(self): return self.mutable
def set_mutable(self, mutable): self.mutable = mutable
def get_gettable(self): return self.gettable
def set_gettable(self, gettable): self.gettable = gettable
def get_inline(self): return self.inline
def set_inline(self, inline): self.inline = inline
def get_settable(self): return self.settable
def set_settable(self, settable): self.settable = settable
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, outfile, level, namespace_='', name_='memberdefType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='memberdefType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='memberdefType'):
if self.initonly is not None:
outfile.write(' initonly=%s' % (quote_attrib(self.initonly), ))
if self.kind is not None:
outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
if self.volatile is not None:
outfile.write(' volatile=%s' % (quote_attrib(self.volatile), ))
if self.const is not None:
outfile.write(' const=%s' % (quote_attrib(self.const), ))
if self.raisexx is not None:
outfile.write(' raise=%s' % (quote_attrib(self.raisexx), ))
if self.virt is not None:
outfile.write(' virt=%s' % (quote_attrib(self.virt), ))
if self.readable is not None:
outfile.write(' readable=%s' % (quote_attrib(self.readable), ))
if self.prot is not None:
outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
if self.explicit is not None:
outfile.write(' explicit=%s' % (quote_attrib(self.explicit), ))
if self.new is not None:
outfile.write(' new=%s' % (quote_attrib(self.new), ))
if self.final is not None:
outfile.write(' final=%s' % (quote_attrib(self.final), ))
if self.writable is not None:
outfile.write(' writable=%s' % (quote_attrib(self.writable), ))
if self.add is not None:
outfile.write(' add=%s' % (quote_attrib(self.add), ))
if self.static is not None:
outfile.write(' static=%s' % (quote_attrib(self.static), ))
if self.remove is not None:
outfile.write(' remove=%s' % (quote_attrib(self.remove), ))
if self.sealed is not None:
outfile.write(' sealed=%s' % (quote_attrib(self.sealed), ))
if self.mutable is not None:
outfile.write(' mutable=%s' % (quote_attrib(self.mutable), ))
if self.gettable is not None:
outfile.write(' gettable=%s' % (quote_attrib(self.gettable), ))
if self.inline is not None:
outfile.write(' inline=%s' % (quote_attrib(self.inline), ))
if self.settable is not None:
outfile.write(' settable=%s' % (quote_attrib(self.settable), ))
if self.id is not None:
outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='memberdefType'):
if self.templateparamlist:
self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist')
if self.type_:
self.type_.export(outfile, level, namespace_, name_='type')
if self.definition is not None:
showIndent(outfile, level)
outfile.write('<%sdefinition>%s</%sdefinition>\n' % (namespace_, self.format_string(quote_xml(self.definition).encode(ExternalEncoding), input_name='definition'), namespace_))
if self.argsstring is not None:
showIndent(outfile, level)
outfile.write('<%sargsstring>%s</%sargsstring>\n' % (namespace_, self.format_string(quote_xml(self.argsstring).encode(ExternalEncoding), input_name='argsstring'), namespace_))
if self.name is not None:
showIndent(outfile, level)
outfile.write('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
if self.read is not None:
showIndent(outfile, level)
outfile.write('<%sread>%s</%sread>\n' % (namespace_, self.format_string(quote_xml(self.read).encode(ExternalEncoding), input_name='read'), namespace_))
if self.write is not None:
showIndent(outfile, level)
outfile.write('<%swrite>%s</%swrite>\n' % (namespace_, self.format_string(quote_xml(self.write).encode(ExternalEncoding), input_name='write'), namespace_))
if self.bitfield is not None:
showIndent(outfile, level)
outfile.write('<%sbitfield>%s</%sbitfield>\n' % (namespace_, self.format_string(quote_xml(self.bitfield).encode(ExternalEncoding), input_name='bitfield'), namespace_))
for reimplements_ in self.reimplements:
reimplements_.export(outfile, level, namespace_, name_='reimplements')
for reimplementedby_ in self.reimplementedby:
reimplementedby_.export(outfile, level, namespace_, name_='reimplementedby')
for param_ in self.param:
param_.export(outfile, level, namespace_, name_='param')
for enumvalue_ in self.enumvalue:
enumvalue_.export(outfile, level, namespace_, name_='enumvalue')
if self.initializer:
self.initializer.export(outfile, level, namespace_, name_='initializer')
if self.exceptions:
self.exceptions.export(outfile, level, namespace_, name_='exceptions')
if self.briefdescription:
self.briefdescription.export(outfile, level, namespace_, name_='briefdescription')
if self.detaileddescription:
self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription')
if self.inbodydescription:
self.inbodydescription.export(outfile, level, namespace_, name_='inbodydescription')
if self.location:
self.location.export(outfile, level, namespace_, name_='location', )
for references_ in self.references:
references_.export(outfile, level, namespace_, name_='references')
for referencedby_ in self.referencedby:
referencedby_.export(outfile, level, namespace_, name_='referencedby')
def hasContent_(self):
if (
self.templateparamlist is not None or
self.type_ is not None or
self.definition is not None or
self.argsstring is not None or
self.name is not None or
self.read is not None or
self.write is not None or
self.bitfield is not None or
self.reimplements is not None or
self.reimplementedby is not None or
self.param is not None or
self.enumvalue is not None or
self.initializer is not None or
self.exceptions is not None or
self.briefdescription is not None or
self.detaileddescription is not None or
self.inbodydescription is not None or
self.location is not None or
self.references is not None or
self.referencedby is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='memberdefType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.initonly is not None:
showIndent(outfile, level)
outfile.write('initonly = "%s",\n' % (self.initonly,))
if self.kind is not None:
showIndent(outfile, level)
outfile.write('kind = "%s",\n' % (self.kind,))
if self.volatile is not None:
showIndent(outfile, level)
outfile.write('volatile = "%s",\n' % (self.volatile,))
if self.const is not None:
showIndent(outfile, level)
outfile.write('const = "%s",\n' % (self.const,))
if self.raisexx is not None:
showIndent(outfile, level)
outfile.write('raisexx = "%s",\n' % (self.raisexx,))
if self.virt is not None:
showIndent(outfile, level)
outfile.write('virt = "%s",\n' % (self.virt,))
if self.readable is not None:
showIndent(outfile, level)
outfile.write('readable = "%s",\n' % (self.readable,))
if self.prot is not None:
showIndent(outfile, level)
outfile.write('prot = "%s",\n' % (self.prot,))
if self.explicit is not None:
showIndent(outfile, level)
outfile.write('explicit = "%s",\n' % (self.explicit,))
if self.new is not None:
showIndent(outfile, level)
outfile.write('new = "%s",\n' % (self.new,))
if self.final is not None:
showIndent(outfile, level)
outfile.write('final = "%s",\n' % (self.final,))
if self.writable is not None:
showIndent(outfile, level)
outfile.write('writable = "%s",\n' % (self.writable,))
if self.add is not None:
showIndent(outfile, level)
outfile.write('add = "%s",\n' % (self.add,))
if self.static is not None:
showIndent(outfile, level)
outfile.write('static = "%s",\n' % (self.static,))
if self.remove is not None:
showIndent(outfile, level)
outfile.write('remove = "%s",\n' % (self.remove,))
if self.sealed is not None:
showIndent(outfile, level)
outfile.write('sealed = "%s",\n' % (self.sealed,))
if self.mutable is not None:
showIndent(outfile, level)
outfile.write('mutable = "%s",\n' % (self.mutable,))
if self.gettable is not None:
showIndent(outfile, level)
outfile.write('gettable = "%s",\n' % (self.gettable,))
if self.inline is not None:
showIndent(outfile, level)
outfile.write('inline = "%s",\n' % (self.inline,))
if self.settable is not None:
showIndent(outfile, level)
outfile.write('settable = "%s",\n' % (self.settable,))
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
def exportLiteralChildren(self, outfile, level, name_):
if self.templateparamlist:
showIndent(outfile, level)
outfile.write('templateparamlist=model_.templateparamlistType(\n')
self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist')
showIndent(outfile, level)
outfile.write('),\n')
if self.type_:
showIndent(outfile, level)
outfile.write('type_=model_.linkedTextType(\n')
self.type_.exportLiteral(outfile, level, name_='type')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('definition=%s,\n' % quote_python(self.definition).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('argsstring=%s,\n' % quote_python(self.argsstring).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('read=%s,\n' % quote_python(self.read).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('write=%s,\n' % quote_python(self.write).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('bitfield=%s,\n' % quote_python(self.bitfield).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('reimplements=[\n')
level += 1
for reimplements in self.reimplements:
showIndent(outfile, level)
outfile.write('model_.reimplements(\n')
reimplements.exportLiteral(outfile, level, name_='reimplements')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('reimplementedby=[\n')
level += 1
for reimplementedby in self.reimplementedby:
showIndent(outfile, level)
outfile.write('model_.reimplementedby(\n')
reimplementedby.exportLiteral(outfile, level, name_='reimplementedby')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('param=[\n')
level += 1
for param in self.param:
showIndent(outfile, level)
outfile.write('model_.param(\n')
param.exportLiteral(outfile, level, name_='param')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('enumvalue=[\n')
level += 1
for enumvalue in self.enumvalue:
showIndent(outfile, level)
outfile.write('model_.enumvalue(\n')
enumvalue.exportLiteral(outfile, level, name_='enumvalue')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.initializer:
showIndent(outfile, level)
outfile.write('initializer=model_.linkedTextType(\n')
self.initializer.exportLiteral(outfile, level, name_='initializer')
showIndent(outfile, level)
outfile.write('),\n')
if self.exceptions:
showIndent(outfile, level)
outfile.write('exceptions=model_.linkedTextType(\n')
self.exceptions.exportLiteral(outfile, level, name_='exceptions')
showIndent(outfile, level)
outfile.write('),\n')
if self.briefdescription:
showIndent(outfile, level)
outfile.write('briefdescription=model_.descriptionType(\n')
self.briefdescription.exportLiteral(outfile, level, name_='briefdescription')
showIndent(outfile, level)
outfile.write('),\n')
if self.detaileddescription:
showIndent(outfile, level)
outfile.write('detaileddescription=model_.descriptionType(\n')
self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription')
showIndent(outfile, level)
outfile.write('),\n')
if self.inbodydescription:
showIndent(outfile, level)
outfile.write('inbodydescription=model_.descriptionType(\n')
self.inbodydescription.exportLiteral(outfile, level, name_='inbodydescription')
showIndent(outfile, level)
outfile.write('),\n')
if self.location:
showIndent(outfile, level)
outfile.write('location=model_.locationType(\n')
self.location.exportLiteral(outfile, level, name_='location')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('references=[\n')
level += 1
for references in self.references:
showIndent(outfile, level)
outfile.write('model_.references(\n')
references.exportLiteral(outfile, level, name_='references')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('referencedby=[\n')
level += 1
for referencedby in self.referencedby:
showIndent(outfile, level)
outfile.write('model_.referencedby(\n')
referencedby.exportLiteral(outfile, level, name_='referencedby')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('initonly'):
self.initonly = attrs.get('initonly').value
if attrs.get('kind'):
self.kind = attrs.get('kind').value
if attrs.get('volatile'):
self.volatile = attrs.get('volatile').value
if attrs.get('const'):
self.const = attrs.get('const').value
if attrs.get('raise'):
self.raisexx = attrs.get('raise').value
if attrs.get('virt'):
self.virt = attrs.get('virt').value
if attrs.get('readable'):
self.readable = attrs.get('readable').value
if attrs.get('prot'):
self.prot = attrs.get('prot').value
if attrs.get('explicit'):
self.explicit = attrs.get('explicit').value
if attrs.get('new'):
self.new = attrs.get('new').value
if attrs.get('final'):
self.final = attrs.get('final').value
if attrs.get('writable'):
self.writable = attrs.get('writable').value
if attrs.get('add'):
self.add = attrs.get('add').value
if attrs.get('static'):
self.static = attrs.get('static').value
if attrs.get('remove'):
self.remove = attrs.get('remove').value
if attrs.get('sealed'):
self.sealed = attrs.get('sealed').value
if attrs.get('mutable'):
self.mutable = attrs.get('mutable').value
if attrs.get('gettable'):
self.gettable = attrs.get('gettable').value
if attrs.get('inline'):
self.inline = attrs.get('inline').value
if attrs.get('settable'):
self.settable = attrs.get('settable').value
if attrs.get('id'):
self.id = attrs.get('id').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'templateparamlist':
obj_ = templateparamlistType.factory()
obj_.build(child_)
self.set_templateparamlist(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'type':
obj_ = linkedTextType.factory()
obj_.build(child_)
self.set_type(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'definition':
definition_ = ''
for text__content_ in child_.childNodes:
definition_ += text__content_.nodeValue
self.definition = definition_
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'argsstring':
argsstring_ = ''
for text__content_ in child_.childNodes:
argsstring_ += text__content_.nodeValue
self.argsstring = argsstring_
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'name':
name_ = ''
for text__content_ in child_.childNodes:
name_ += text__content_.nodeValue
self.name = name_
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'read':
read_ = ''
for text__content_ in child_.childNodes:
read_ += text__content_.nodeValue
self.read = read_
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'write':
write_ = ''
for text__content_ in child_.childNodes:
write_ += text__content_.nodeValue
self.write = write_
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'bitfield':
bitfield_ = ''
for text__content_ in child_.childNodes:
bitfield_ += text__content_.nodeValue
self.bitfield = bitfield_
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'reimplements':
obj_ = reimplementType.factory()
obj_.build(child_)
self.reimplements.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'reimplementedby':
obj_ = reimplementType.factory()
obj_.build(child_)
self.reimplementedby.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'param':
obj_ = paramType.factory()
obj_.build(child_)
self.param.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'enumvalue':
obj_ = enumvalueType.factory()
obj_.build(child_)
self.enumvalue.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'initializer':
obj_ = linkedTextType.factory()
obj_.build(child_)
self.set_initializer(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'exceptions':
obj_ = linkedTextType.factory()
obj_.build(child_)
self.set_exceptions(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'briefdescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_briefdescription(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'detaileddescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_detaileddescription(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'inbodydescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_inbodydescription(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'location':
obj_ = locationType.factory()
obj_.build(child_)
self.set_location(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'references':
obj_ = referenceType.factory()
obj_.build(child_)
self.references.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'referencedby':
obj_ = referenceType.factory()
obj_.build(child_)
self.referencedby.append(obj_)
# end class memberdefType
class definition(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if definition.subclass:
return definition.subclass(*args_, **kwargs_)
else:
return definition(*args_, **kwargs_)
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='definition', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='definition')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='definition'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='definition'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='definition'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class definition
class argsstring(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if argsstring.subclass:
return argsstring.subclass(*args_, **kwargs_)
else:
return argsstring(*args_, **kwargs_)
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='argsstring', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='argsstring')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='argsstring'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='argsstring'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='argsstring'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class argsstring
class read(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if read.subclass:
return read.subclass(*args_, **kwargs_)
else:
return read(*args_, **kwargs_)
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='read', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='read')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='read'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='read'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='read'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class read
class write(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if write.subclass:
return write.subclass(*args_, **kwargs_)
else:
return write(*args_, **kwargs_)
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='write', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='write')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='write'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='write'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='write'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class write
class bitfield(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if bitfield.subclass:
return bitfield.subclass(*args_, **kwargs_)
else:
return bitfield(*args_, **kwargs_)
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='bitfield', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='bitfield')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='bitfield'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='bitfield'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='bitfield'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class bitfield
class descriptionType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, title=None, para=None, sect1=None, internal=None, mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if descriptionType.subclass:
return descriptionType.subclass(*args_, **kwargs_)
else:
return descriptionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_title(self): return self.title
def set_title(self, title): self.title = title
def get_para(self): return self.para
def set_para(self, para): self.para = para
def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
def get_sect1(self): return self.sect1
def set_sect1(self, sect1): self.sect1 = sect1
def add_sect1(self, value): self.sect1.append(value)
def insert_sect1(self, index, value): self.sect1[index] = value
def get_internal(self): return self.internal
def set_internal(self, internal): self.internal = internal
def export(self, outfile, level, namespace_='', name_='descriptionType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='descriptionType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='descriptionType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='descriptionType'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
def hasContent_(self):
if (
self.title is not None or
self.para is not None or
self.sect1 is not None or
self.internal is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='descriptionType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'title':
childobj_ = docTitleType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'title', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'sect1':
childobj_ = docSect1Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'sect1', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'internal':
childobj_ = docInternalType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'internal', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class descriptionType
class enumvalueType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, prot=None, id=None, name=None, initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None):
self.prot = prot
self.id = id
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if enumvalueType.subclass:
return enumvalueType.subclass(*args_, **kwargs_)
else:
return enumvalueType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_initializer(self): return self.initializer
def set_initializer(self, initializer): self.initializer = initializer
def get_briefdescription(self): return self.briefdescription
def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
def get_detaileddescription(self): return self.detaileddescription
def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription
def get_prot(self): return self.prot
def set_prot(self, prot): self.prot = prot
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, outfile, level, namespace_='', name_='enumvalueType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='enumvalueType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='enumvalueType'):
if self.prot is not None:
outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
if self.id is not None:
outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='enumvalueType'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
def hasContent_(self):
if (
self.name is not None or
self.initializer is not None or
self.briefdescription is not None or
self.detaileddescription is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='enumvalueType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.prot is not None:
showIndent(outfile, level)
outfile.write('prot = "%s",\n' % (self.prot,))
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('prot'):
self.prot = attrs.get('prot').value
if attrs.get('id'):
self.id = attrs.get('id').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'name':
value_ = []
for text_ in child_.childNodes:
value_.append(text_.nodeValue)
valuestr_ = ''.join(value_)
obj_ = self.mixedclass_(MixedContainer.CategorySimple,
MixedContainer.TypeString, 'name', valuestr_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'initializer':
childobj_ = linkedTextType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'initializer', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'briefdescription':
childobj_ = descriptionType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'briefdescription', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'detaileddescription':
childobj_ = descriptionType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'detaileddescription', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class enumvalueType
class templateparamlistType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, param=None):
if param is None:
self.param = []
else:
self.param = param
def factory(*args_, **kwargs_):
if templateparamlistType.subclass:
return templateparamlistType.subclass(*args_, **kwargs_)
else:
return templateparamlistType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_param(self): return self.param
def set_param(self, param): self.param = param
def add_param(self, value): self.param.append(value)
def insert_param(self, index, value): self.param[index] = value
def export(self, outfile, level, namespace_='', name_='templateparamlistType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='templateparamlistType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='templateparamlistType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='templateparamlistType'):
for param_ in self.param:
param_.export(outfile, level, namespace_, name_='param')
def hasContent_(self):
if (
self.param is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='templateparamlistType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('param=[\n')
level += 1
for param in self.param:
showIndent(outfile, level)
outfile.write('model_.param(\n')
param.exportLiteral(outfile, level, name_='param')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'param':
obj_ = paramType.factory()
obj_.build(child_)
self.param.append(obj_)
# end class templateparamlistType
class paramType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, type_=None, declname=None, defname=None, array=None, defval=None, briefdescription=None):
self.type_ = type_
self.declname = declname
self.defname = defname
self.array = array
self.defval = defval
self.briefdescription = briefdescription
def factory(*args_, **kwargs_):
if paramType.subclass:
return paramType.subclass(*args_, **kwargs_)
else:
return paramType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_declname(self): return self.declname
def set_declname(self, declname): self.declname = declname
def get_defname(self): return self.defname
def set_defname(self, defname): self.defname = defname
def get_array(self): return self.array
def set_array(self, array): self.array = array
def get_defval(self): return self.defval
def set_defval(self, defval): self.defval = defval
def get_briefdescription(self): return self.briefdescription
def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
def export(self, outfile, level, namespace_='', name_='paramType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='paramType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='paramType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='paramType'):
if self.type_:
self.type_.export(outfile, level, namespace_, name_='type')
if self.declname is not None:
showIndent(outfile, level)
outfile.write('<%sdeclname>%s</%sdeclname>\n' % (namespace_, self.format_string(quote_xml(self.declname).encode(ExternalEncoding), input_name='declname'), namespace_))
if self.defname is not None:
showIndent(outfile, level)
outfile.write('<%sdefname>%s</%sdefname>\n' % (namespace_, self.format_string(quote_xml(self.defname).encode(ExternalEncoding), input_name='defname'), namespace_))
if self.array is not None:
showIndent(outfile, level)
outfile.write('<%sarray>%s</%sarray>\n' % (namespace_, self.format_string(quote_xml(self.array).encode(ExternalEncoding), input_name='array'), namespace_))
if self.defval:
self.defval.export(outfile, level, namespace_, name_='defval')
if self.briefdescription:
self.briefdescription.export(outfile, level, namespace_, name_='briefdescription')
def hasContent_(self):
if (
self.type_ is not None or
self.declname is not None or
self.defname is not None or
self.array is not None or
self.defval is not None or
self.briefdescription is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='paramType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.type_:
showIndent(outfile, level)
outfile.write('type_=model_.linkedTextType(\n')
self.type_.exportLiteral(outfile, level, name_='type')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('declname=%s,\n' % quote_python(self.declname).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('defname=%s,\n' % quote_python(self.defname).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('array=%s,\n' % quote_python(self.array).encode(ExternalEncoding))
if self.defval:
showIndent(outfile, level)
outfile.write('defval=model_.linkedTextType(\n')
self.defval.exportLiteral(outfile, level, name_='defval')
showIndent(outfile, level)
outfile.write('),\n')
if self.briefdescription:
showIndent(outfile, level)
outfile.write('briefdescription=model_.descriptionType(\n')
self.briefdescription.exportLiteral(outfile, level, name_='briefdescription')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'type':
obj_ = linkedTextType.factory()
obj_.build(child_)
self.set_type(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'declname':
declname_ = ''
for text__content_ in child_.childNodes:
declname_ += text__content_.nodeValue
self.declname = declname_
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'defname':
defname_ = ''
for text__content_ in child_.childNodes:
defname_ += text__content_.nodeValue
self.defname = defname_
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'array':
array_ = ''
for text__content_ in child_.childNodes:
array_ += text__content_.nodeValue
self.array = array_
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'defval':
obj_ = linkedTextType.factory()
obj_.build(child_)
self.set_defval(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'briefdescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_briefdescription(obj_)
# end class paramType
class declname(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if declname.subclass:
return declname.subclass(*args_, **kwargs_)
else:
return declname(*args_, **kwargs_)
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='declname', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='declname')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='declname'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='declname'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='declname'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class declname
class defname(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if defname.subclass:
return defname.subclass(*args_, **kwargs_)
else:
return defname(*args_, **kwargs_)
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='defname', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='defname')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='defname'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='defname'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='defname'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class defname
class array(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if array.subclass:
return array.subclass(*args_, **kwargs_)
else:
return array(*args_, **kwargs_)
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='array', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='array')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='array'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='array'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='array'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class array
class linkedTextType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, ref=None, mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if linkedTextType.subclass:
return linkedTextType.subclass(*args_, **kwargs_)
else:
return linkedTextType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ref(self): return self.ref
def set_ref(self, ref): self.ref = ref
def add_ref(self, value): self.ref.append(value)
def insert_ref(self, index, value): self.ref[index] = value
def export(self, outfile, level, namespace_='', name_='linkedTextType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='linkedTextType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='linkedTextType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='linkedTextType'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
def hasContent_(self):
if (
self.ref is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='linkedTextType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'ref':
childobj_ = docRefTextType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'ref', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class linkedTextType
class graphType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, node=None):
if node is None:
self.node = []
else:
self.node = node
def factory(*args_, **kwargs_):
if graphType.subclass:
return graphType.subclass(*args_, **kwargs_)
else:
return graphType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_node(self): return self.node
def set_node(self, node): self.node = node
def add_node(self, value): self.node.append(value)
def insert_node(self, index, value): self.node[index] = value
def export(self, outfile, level, namespace_='', name_='graphType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='graphType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='graphType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='graphType'):
for node_ in self.node:
node_.export(outfile, level, namespace_, name_='node')
def hasContent_(self):
if (
self.node is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='graphType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('node=[\n')
level += 1
for node in self.node:
showIndent(outfile, level)
outfile.write('model_.node(\n')
node.exportLiteral(outfile, level, name_='node')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'node':
obj_ = nodeType.factory()
obj_.build(child_)
self.node.append(obj_)
# end class graphType
class nodeType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, id=None, label=None, link=None, childnode=None):
self.id = id
self.label = label
self.link = link
if childnode is None:
self.childnode = []
else:
self.childnode = childnode
def factory(*args_, **kwargs_):
if nodeType.subclass:
return nodeType.subclass(*args_, **kwargs_)
else:
return nodeType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_label(self): return self.label
def set_label(self, label): self.label = label
def get_link(self): return self.link
def set_link(self, link): self.link = link
def get_childnode(self): return self.childnode
def set_childnode(self, childnode): self.childnode = childnode
def add_childnode(self, value): self.childnode.append(value)
def insert_childnode(self, index, value): self.childnode[index] = value
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, outfile, level, namespace_='', name_='nodeType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='nodeType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='nodeType'):
if self.id is not None:
outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='nodeType'):
if self.label is not None:
showIndent(outfile, level)
outfile.write('<%slabel>%s</%slabel>\n' % (namespace_, self.format_string(quote_xml(self.label).encode(ExternalEncoding), input_name='label'), namespace_))
if self.link:
self.link.export(outfile, level, namespace_, name_='link')
for childnode_ in self.childnode:
childnode_.export(outfile, level, namespace_, name_='childnode')
def hasContent_(self):
if (
self.label is not None or
self.link is not None or
self.childnode is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='nodeType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('label=%s,\n' % quote_python(self.label).encode(ExternalEncoding))
if self.link:
showIndent(outfile, level)
outfile.write('link=model_.linkType(\n')
self.link.exportLiteral(outfile, level, name_='link')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('childnode=[\n')
level += 1
for childnode in self.childnode:
showIndent(outfile, level)
outfile.write('model_.childnode(\n')
childnode.exportLiteral(outfile, level, name_='childnode')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'label':
label_ = ''
for text__content_ in child_.childNodes:
label_ += text__content_.nodeValue
self.label = label_
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'link':
obj_ = linkType.factory()
obj_.build(child_)
self.set_link(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'childnode':
obj_ = childnodeType.factory()
obj_.build(child_)
self.childnode.append(obj_)
# end class nodeType
class label(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if label.subclass:
return label.subclass(*args_, **kwargs_)
else:
return label(*args_, **kwargs_)
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='label', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='label')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='label'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='label'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='label'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class label
class childnodeType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, relation=None, refid=None, edgelabel=None):
self.relation = relation
self.refid = refid
if edgelabel is None:
self.edgelabel = []
else:
self.edgelabel = edgelabel
def factory(*args_, **kwargs_):
if childnodeType.subclass:
return childnodeType.subclass(*args_, **kwargs_)
else:
return childnodeType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_edgelabel(self): return self.edgelabel
def set_edgelabel(self, edgelabel): self.edgelabel = edgelabel
def add_edgelabel(self, value): self.edgelabel.append(value)
def insert_edgelabel(self, index, value): self.edgelabel[index] = value
def get_relation(self): return self.relation
def set_relation(self, relation): self.relation = relation
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def export(self, outfile, level, namespace_='', name_='childnodeType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='childnodeType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='childnodeType'):
if self.relation is not None:
outfile.write(' relation=%s' % (quote_attrib(self.relation), ))
if self.refid is not None:
outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
def exportChildren(self, outfile, level, namespace_='', name_='childnodeType'):
for edgelabel_ in self.edgelabel:
showIndent(outfile, level)
outfile.write('<%sedgelabel>%s</%sedgelabel>\n' % (namespace_, self.format_string(quote_xml(edgelabel_).encode(ExternalEncoding), input_name='edgelabel'), namespace_))
def hasContent_(self):
if (
self.edgelabel is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='childnodeType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.relation is not None:
showIndent(outfile, level)
outfile.write('relation = "%s",\n' % (self.relation,))
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('edgelabel=[\n')
level += 1
for edgelabel in self.edgelabel:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(edgelabel).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('relation'):
self.relation = attrs.get('relation').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'edgelabel':
edgelabel_ = ''
for text__content_ in child_.childNodes:
edgelabel_ += text__content_.nodeValue
self.edgelabel.append(edgelabel_)
# end class childnodeType
class edgelabel(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if edgelabel.subclass:
return edgelabel.subclass(*args_, **kwargs_)
else:
return edgelabel(*args_, **kwargs_)
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='edgelabel', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='edgelabel')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='edgelabel'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='edgelabel'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='edgelabel'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class edgelabel
class linkType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, refid=None, external=None, valueOf_=''):
self.refid = refid
self.external = external
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if linkType.subclass:
return linkType.subclass(*args_, **kwargs_)
else:
return linkType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def get_external(self): return self.external
def set_external(self, external): self.external = external
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='linkType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='linkType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='linkType'):
if self.refid is not None:
outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
if self.external is not None:
outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), ))
def exportChildren(self, outfile, level, namespace_='', name_='linkType'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='linkType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
if self.external is not None:
showIndent(outfile, level)
outfile.write('external = %s,\n' % (self.external,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('refid'):
self.refid = attrs.get('refid').value
if attrs.get('external'):
self.external = attrs.get('external').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class linkType
class listingType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, codeline=None):
if codeline is None:
self.codeline = []
else:
self.codeline = codeline
def factory(*args_, **kwargs_):
if listingType.subclass:
return listingType.subclass(*args_, **kwargs_)
else:
return listingType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_codeline(self): return self.codeline
def set_codeline(self, codeline): self.codeline = codeline
def add_codeline(self, value): self.codeline.append(value)
def insert_codeline(self, index, value): self.codeline[index] = value
def export(self, outfile, level, namespace_='', name_='listingType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='listingType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='listingType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='listingType'):
for codeline_ in self.codeline:
codeline_.export(outfile, level, namespace_, name_='codeline')
def hasContent_(self):
if (
self.codeline is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='listingType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('codeline=[\n')
level += 1
for codeline in self.codeline:
showIndent(outfile, level)
outfile.write('model_.codeline(\n')
codeline.exportLiteral(outfile, level, name_='codeline')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'codeline':
obj_ = codelineType.factory()
obj_.build(child_)
self.codeline.append(obj_)
# end class listingType
class codelineType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None):
self.external = external
self.lineno = lineno
self.refkind = refkind
self.refid = refid
if highlight is None:
self.highlight = []
else:
self.highlight = highlight
def factory(*args_, **kwargs_):
if codelineType.subclass:
return codelineType.subclass(*args_, **kwargs_)
else:
return codelineType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_highlight(self): return self.highlight
def set_highlight(self, highlight): self.highlight = highlight
def add_highlight(self, value): self.highlight.append(value)
def insert_highlight(self, index, value): self.highlight[index] = value
def get_external(self): return self.external
def set_external(self, external): self.external = external
def get_lineno(self): return self.lineno
def set_lineno(self, lineno): self.lineno = lineno
def get_refkind(self): return self.refkind
def set_refkind(self, refkind): self.refkind = refkind
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def export(self, outfile, level, namespace_='', name_='codelineType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='codelineType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='codelineType'):
if self.external is not None:
outfile.write(' external=%s' % (quote_attrib(self.external), ))
if self.lineno is not None:
outfile.write(' lineno="%s"' % self.format_integer(self.lineno, input_name='lineno'))
if self.refkind is not None:
outfile.write(' refkind=%s' % (quote_attrib(self.refkind), ))
if self.refid is not None:
outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
def exportChildren(self, outfile, level, namespace_='', name_='codelineType'):
for highlight_ in self.highlight:
highlight_.export(outfile, level, namespace_, name_='highlight')
def hasContent_(self):
if (
self.highlight is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='codelineType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.external is not None:
showIndent(outfile, level)
outfile.write('external = "%s",\n' % (self.external,))
if self.lineno is not None:
showIndent(outfile, level)
outfile.write('lineno = %s,\n' % (self.lineno,))
if self.refkind is not None:
showIndent(outfile, level)
outfile.write('refkind = "%s",\n' % (self.refkind,))
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('highlight=[\n')
level += 1
for highlight in self.highlight:
showIndent(outfile, level)
outfile.write('model_.highlight(\n')
highlight.exportLiteral(outfile, level, name_='highlight')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('external'):
self.external = attrs.get('external').value
if attrs.get('lineno'):
try:
self.lineno = int(attrs.get('lineno').value)
except ValueError, exp:
raise ValueError('Bad integer attribute (lineno): %s' % exp)
if attrs.get('refkind'):
self.refkind = attrs.get('refkind').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'highlight':
obj_ = highlightType.factory()
obj_.build(child_)
self.highlight.append(obj_)
# end class codelineType
class highlightType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, classxx=None, sp=None, ref=None, mixedclass_=None, content_=None):
self.classxx = classxx
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if highlightType.subclass:
return highlightType.subclass(*args_, **kwargs_)
else:
return highlightType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_sp(self): return self.sp
def set_sp(self, sp): self.sp = sp
def add_sp(self, value): self.sp.append(value)
def insert_sp(self, index, value): self.sp[index] = value
def get_ref(self): return self.ref
def set_ref(self, ref): self.ref = ref
def add_ref(self, value): self.ref.append(value)
def insert_ref(self, index, value): self.ref[index] = value
def get_class(self): return self.classxx
def set_class(self, classxx): self.classxx = classxx
def export(self, outfile, level, namespace_='', name_='highlightType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='highlightType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='highlightType'):
if self.classxx is not None:
outfile.write(' class=%s' % (quote_attrib(self.classxx), ))
def exportChildren(self, outfile, level, namespace_='', name_='highlightType'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
def hasContent_(self):
if (
self.sp is not None or
self.ref is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='highlightType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.classxx is not None:
showIndent(outfile, level)
outfile.write('classxx = "%s",\n' % (self.classxx,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('class'):
self.classxx = attrs.get('class').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'sp':
value_ = []
for text_ in child_.childNodes:
value_.append(text_.nodeValue)
valuestr_ = ''.join(value_)
obj_ = self.mixedclass_(MixedContainer.CategorySimple,
MixedContainer.TypeString, 'sp', valuestr_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'ref':
childobj_ = docRefTextType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'ref', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class highlightType
class sp(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if sp.subclass:
return sp.subclass(*args_, **kwargs_)
else:
return sp(*args_, **kwargs_)
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='sp', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='sp')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='sp'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='sp'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='sp'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class sp
class referenceType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None):
self.endline = endline
self.startline = startline
self.refid = refid
self.compoundref = compoundref
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if referenceType.subclass:
return referenceType.subclass(*args_, **kwargs_)
else:
return referenceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_endline(self): return self.endline
def set_endline(self, endline): self.endline = endline
def get_startline(self): return self.startline
def set_startline(self, startline): self.startline = startline
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def get_compoundref(self): return self.compoundref
def set_compoundref(self, compoundref): self.compoundref = compoundref
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='referenceType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='referenceType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='referenceType'):
if self.endline is not None:
outfile.write(' endline="%s"' % self.format_integer(self.endline, input_name='endline'))
if self.startline is not None:
outfile.write(' startline="%s"' % self.format_integer(self.startline, input_name='startline'))
if self.refid is not None:
outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
if self.compoundref is not None:
outfile.write(' compoundref=%s' % (self.format_string(quote_attrib(self.compoundref).encode(ExternalEncoding), input_name='compoundref'), ))
def exportChildren(self, outfile, level, namespace_='', name_='referenceType'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='referenceType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.endline is not None:
showIndent(outfile, level)
outfile.write('endline = %s,\n' % (self.endline,))
if self.startline is not None:
showIndent(outfile, level)
outfile.write('startline = %s,\n' % (self.startline,))
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
if self.compoundref is not None:
showIndent(outfile, level)
outfile.write('compoundref = %s,\n' % (self.compoundref,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('endline'):
try:
self.endline = int(attrs.get('endline').value)
except ValueError, exp:
raise ValueError('Bad integer attribute (endline): %s' % exp)
if attrs.get('startline'):
try:
self.startline = int(attrs.get('startline').value)
except ValueError, exp:
raise ValueError('Bad integer attribute (startline): %s' % exp)
if attrs.get('refid'):
self.refid = attrs.get('refid').value
if attrs.get('compoundref'):
self.compoundref = attrs.get('compoundref').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class referenceType
class locationType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''):
self.bodystart = bodystart
self.line = line
self.bodyend = bodyend
self.bodyfile = bodyfile
self.file = file
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if locationType.subclass:
return locationType.subclass(*args_, **kwargs_)
else:
return locationType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_bodystart(self): return self.bodystart
def set_bodystart(self, bodystart): self.bodystart = bodystart
def get_line(self): return self.line
def set_line(self, line): self.line = line
def get_bodyend(self): return self.bodyend
def set_bodyend(self, bodyend): self.bodyend = bodyend
def get_bodyfile(self): return self.bodyfile
def set_bodyfile(self, bodyfile): self.bodyfile = bodyfile
def get_file(self): return self.file
def set_file(self, file): self.file = file
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='locationType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='locationType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='locationType'):
if self.bodystart is not None:
outfile.write(' bodystart="%s"' % self.format_integer(self.bodystart, input_name='bodystart'))
if self.line is not None:
outfile.write(' line="%s"' % self.format_integer(self.line, input_name='line'))
if self.bodyend is not None:
outfile.write(' bodyend="%s"' % self.format_integer(self.bodyend, input_name='bodyend'))
if self.bodyfile is not None:
outfile.write(' bodyfile=%s' % (self.format_string(quote_attrib(self.bodyfile).encode(ExternalEncoding), input_name='bodyfile'), ))
if self.file is not None:
outfile.write(' file=%s' % (self.format_string(quote_attrib(self.file).encode(ExternalEncoding), input_name='file'), ))
def exportChildren(self, outfile, level, namespace_='', name_='locationType'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='locationType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.bodystart is not None:
showIndent(outfile, level)
outfile.write('bodystart = %s,\n' % (self.bodystart,))
if self.line is not None:
showIndent(outfile, level)
outfile.write('line = %s,\n' % (self.line,))
if self.bodyend is not None:
showIndent(outfile, level)
outfile.write('bodyend = %s,\n' % (self.bodyend,))
if self.bodyfile is not None:
showIndent(outfile, level)
outfile.write('bodyfile = %s,\n' % (self.bodyfile,))
if self.file is not None:
showIndent(outfile, level)
outfile.write('file = %s,\n' % (self.file,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('bodystart'):
try:
self.bodystart = int(attrs.get('bodystart').value)
except ValueError, exp:
raise ValueError('Bad integer attribute (bodystart): %s' % exp)
if attrs.get('line'):
try:
self.line = int(attrs.get('line').value)
except ValueError, exp:
raise ValueError('Bad integer attribute (line): %s' % exp)
if attrs.get('bodyend'):
try:
self.bodyend = int(attrs.get('bodyend').value)
except ValueError, exp:
raise ValueError('Bad integer attribute (bodyend): %s' % exp)
if attrs.get('bodyfile'):
self.bodyfile = attrs.get('bodyfile').value
if attrs.get('file'):
self.file = attrs.get('file').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class locationType
class docSect1Type(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, id=None, title=None, para=None, sect2=None, internal=None, mixedclass_=None, content_=None):
self.id = id
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docSect1Type.subclass:
return docSect1Type.subclass(*args_, **kwargs_)
else:
return docSect1Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_title(self): return self.title
def set_title(self, title): self.title = title
def get_para(self): return self.para
def set_para(self, para): self.para = para
def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
def get_sect2(self): return self.sect2
def set_sect2(self, sect2): self.sect2 = sect2
def add_sect2(self, value): self.sect2.append(value)
def insert_sect2(self, index, value): self.sect2[index] = value
def get_internal(self): return self.internal
def set_internal(self, internal): self.internal = internal
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, outfile, level, namespace_='', name_='docSect1Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docSect1Type')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='docSect1Type'):
if self.id is not None:
outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='docSect1Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
def hasContent_(self):
if (
self.title is not None or
self.para is not None or
self.sect2 is not None or
self.internal is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docSect1Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'title':
childobj_ = docTitleType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'title', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'sect2':
childobj_ = docSect2Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'sect2', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'internal':
childobj_ = docInternalS1Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'internal', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docSect1Type
class docSect2Type(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, id=None, title=None, para=None, sect3=None, internal=None, mixedclass_=None, content_=None):
self.id = id
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docSect2Type.subclass:
return docSect2Type.subclass(*args_, **kwargs_)
else:
return docSect2Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_title(self): return self.title
def set_title(self, title): self.title = title
def get_para(self): return self.para
def set_para(self, para): self.para = para
def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
def get_sect3(self): return self.sect3
def set_sect3(self, sect3): self.sect3 = sect3
def add_sect3(self, value): self.sect3.append(value)
def insert_sect3(self, index, value): self.sect3[index] = value
def get_internal(self): return self.internal
def set_internal(self, internal): self.internal = internal
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, outfile, level, namespace_='', name_='docSect2Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docSect2Type')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='docSect2Type'):
if self.id is not None:
outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='docSect2Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
def hasContent_(self):
if (
self.title is not None or
self.para is not None or
self.sect3 is not None or
self.internal is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docSect2Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'title':
childobj_ = docTitleType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'title', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'sect3':
childobj_ = docSect3Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'sect3', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'internal':
childobj_ = docInternalS2Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'internal', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docSect2Type
class docSect3Type(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, id=None, title=None, para=None, sect4=None, internal=None, mixedclass_=None, content_=None):
self.id = id
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docSect3Type.subclass:
return docSect3Type.subclass(*args_, **kwargs_)
else:
return docSect3Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_title(self): return self.title
def set_title(self, title): self.title = title
def get_para(self): return self.para
def set_para(self, para): self.para = para
def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
def get_sect4(self): return self.sect4
def set_sect4(self, sect4): self.sect4 = sect4
def add_sect4(self, value): self.sect4.append(value)
def insert_sect4(self, index, value): self.sect4[index] = value
def get_internal(self): return self.internal
def set_internal(self, internal): self.internal = internal
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, outfile, level, namespace_='', name_='docSect3Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docSect3Type')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='docSect3Type'):
if self.id is not None:
outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='docSect3Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
def hasContent_(self):
if (
self.title is not None or
self.para is not None or
self.sect4 is not None or
self.internal is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docSect3Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'title':
childobj_ = docTitleType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'title', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'sect4':
childobj_ = docSect4Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'sect4', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'internal':
childobj_ = docInternalS3Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'internal', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docSect3Type
class docSect4Type(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, id=None, title=None, para=None, internal=None, mixedclass_=None, content_=None):
self.id = id
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docSect4Type.subclass:
return docSect4Type.subclass(*args_, **kwargs_)
else:
return docSect4Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_title(self): return self.title
def set_title(self, title): self.title = title
def get_para(self): return self.para
def set_para(self, para): self.para = para
def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
def get_internal(self): return self.internal
def set_internal(self, internal): self.internal = internal
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, outfile, level, namespace_='', name_='docSect4Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docSect4Type')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='docSect4Type'):
if self.id is not None:
outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='docSect4Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
def hasContent_(self):
if (
self.title is not None or
self.para is not None or
self.internal is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docSect4Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'title':
childobj_ = docTitleType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'title', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'internal':
childobj_ = docInternalS4Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'internal', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docSect4Type
class docInternalType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docInternalType.subclass:
return docInternalType.subclass(*args_, **kwargs_)
else:
return docInternalType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_para(self): return self.para
def set_para(self, para): self.para = para
def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
def get_sect1(self): return self.sect1
def set_sect1(self, sect1): self.sect1 = sect1
def add_sect1(self, value): self.sect1.append(value)
def insert_sect1(self, index, value): self.sect1[index] = value
def export(self, outfile, level, namespace_='', name_='docInternalType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docInternalType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='docInternalType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='docInternalType'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
def hasContent_(self):
if (
self.para is not None or
self.sect1 is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docInternalType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'sect1':
childobj_ = docSect1Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'sect1', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docInternalType
class docInternalS1Type(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docInternalS1Type.subclass:
return docInternalS1Type.subclass(*args_, **kwargs_)
else:
return docInternalS1Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_para(self): return self.para
def set_para(self, para): self.para = para
def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
def get_sect2(self): return self.sect2
def set_sect2(self, sect2): self.sect2 = sect2
def add_sect2(self, value): self.sect2.append(value)
def insert_sect2(self, index, value): self.sect2[index] = value
def export(self, outfile, level, namespace_='', name_='docInternalS1Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docInternalS1Type')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS1Type'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='docInternalS1Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
def hasContent_(self):
if (
self.para is not None or
self.sect2 is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docInternalS1Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'sect2':
childobj_ = docSect2Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'sect2', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docInternalS1Type
class docInternalS2Type(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docInternalS2Type.subclass:
return docInternalS2Type.subclass(*args_, **kwargs_)
else:
return docInternalS2Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_para(self): return self.para
def set_para(self, para): self.para = para
def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
def get_sect3(self): return self.sect3
def set_sect3(self, sect3): self.sect3 = sect3
def add_sect3(self, value): self.sect3.append(value)
def insert_sect3(self, index, value): self.sect3[index] = value
def export(self, outfile, level, namespace_='', name_='docInternalS2Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docInternalS2Type')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS2Type'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='docInternalS2Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
def hasContent_(self):
if (
self.para is not None or
self.sect3 is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docInternalS2Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'sect3':
childobj_ = docSect3Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'sect3', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docInternalS2Type
class docInternalS3Type(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docInternalS3Type.subclass:
return docInternalS3Type.subclass(*args_, **kwargs_)
else:
return docInternalS3Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_para(self): return self.para
def set_para(self, para): self.para = para
def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
def get_sect3(self): return self.sect3
def set_sect3(self, sect3): self.sect3 = sect3
def add_sect3(self, value): self.sect3.append(value)
def insert_sect3(self, index, value): self.sect3[index] = value
def export(self, outfile, level, namespace_='', name_='docInternalS3Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docInternalS3Type')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS3Type'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='docInternalS3Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
def hasContent_(self):
if (
self.para is not None or
self.sect3 is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docInternalS3Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'sect3':
childobj_ = docSect4Type.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'sect3', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docInternalS3Type
class docInternalS4Type(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, para=None, mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docInternalS4Type.subclass:
return docInternalS4Type.subclass(*args_, **kwargs_)
else:
return docInternalS4Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_para(self): return self.para
def set_para(self, para): self.para = para
def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
def export(self, outfile, level, namespace_='', name_='docInternalS4Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docInternalS4Type')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS4Type'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='docInternalS4Type'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
def hasContent_(self):
if (
self.para is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docInternalS4Type'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'para':
childobj_ = docParaType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'para', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docInternalS4Type
class docTitleType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_='', mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docTitleType.subclass:
return docTitleType.subclass(*args_, **kwargs_)
else:
return docTitleType(*args_, **kwargs_)
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='docTitleType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docTitleType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='docTitleType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='docTitleType'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docTitleType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class docTitleType
class docParaType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_='', mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docParaType.subclass:
return docParaType.subclass(*args_, **kwargs_)
else:
return docParaType(*args_, **kwargs_)
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='docParaType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docParaType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='docParaType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='docParaType'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docParaType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class docParaType
class docMarkupType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_='', mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docMarkupType.subclass:
return docMarkupType.subclass(*args_, **kwargs_)
else:
return docMarkupType(*args_, **kwargs_)
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='docMarkupType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docMarkupType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='docMarkupType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='docMarkupType'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docMarkupType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class docMarkupType
class docURLLink(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None):
self.url = url
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docURLLink.subclass:
return docURLLink.subclass(*args_, **kwargs_)
else:
return docURLLink(*args_, **kwargs_)
factory = staticmethod(factory)
def get_url(self): return self.url
def set_url(self, url): self.url = url
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='docURLLink', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docURLLink')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='docURLLink'):
if self.url is not None:
outfile.write(' url=%s' % (self.format_string(quote_attrib(self.url).encode(ExternalEncoding), input_name='url'), ))
def exportChildren(self, outfile, level, namespace_='', name_='docURLLink'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docURLLink'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.url is not None:
showIndent(outfile, level)
outfile.write('url = %s,\n' % (self.url,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('url'):
self.url = attrs.get('url').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class docURLLink
class docAnchorType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
self.id = id
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docAnchorType.subclass:
return docAnchorType.subclass(*args_, **kwargs_)
else:
return docAnchorType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_id(self): return self.id
def set_id(self, id): self.id = id
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='docAnchorType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docAnchorType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='docAnchorType'):
if self.id is not None:
outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='docAnchorType'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docAnchorType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class docAnchorType
class docFormulaType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
self.id = id
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docFormulaType.subclass:
return docFormulaType.subclass(*args_, **kwargs_)
else:
return docFormulaType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_id(self): return self.id
def set_id(self, id): self.id = id
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='docFormulaType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docFormulaType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='docFormulaType'):
if self.id is not None:
outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='docFormulaType'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docFormulaType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class docFormulaType
class docIndexEntryType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, primaryie=None, secondaryie=None):
self.primaryie = primaryie
self.secondaryie = secondaryie
def factory(*args_, **kwargs_):
if docIndexEntryType.subclass:
return docIndexEntryType.subclass(*args_, **kwargs_)
else:
return docIndexEntryType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_primaryie(self): return self.primaryie
def set_primaryie(self, primaryie): self.primaryie = primaryie
def get_secondaryie(self): return self.secondaryie
def set_secondaryie(self, secondaryie): self.secondaryie = secondaryie
def export(self, outfile, level, namespace_='', name_='docIndexEntryType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docIndexEntryType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='docIndexEntryType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='docIndexEntryType'):
if self.primaryie is not None:
showIndent(outfile, level)
outfile.write('<%sprimaryie>%s</%sprimaryie>\n' % (namespace_, self.format_string(quote_xml(self.primaryie).encode(ExternalEncoding), input_name='primaryie'), namespace_))
if self.secondaryie is not None:
showIndent(outfile, level)
outfile.write('<%ssecondaryie>%s</%ssecondaryie>\n' % (namespace_, self.format_string(quote_xml(self.secondaryie).encode(ExternalEncoding), input_name='secondaryie'), namespace_))
def hasContent_(self):
if (
self.primaryie is not None or
self.secondaryie is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docIndexEntryType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('primaryie=%s,\n' % quote_python(self.primaryie).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('secondaryie=%s,\n' % quote_python(self.secondaryie).encode(ExternalEncoding))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'primaryie':
primaryie_ = ''
for text__content_ in child_.childNodes:
primaryie_ += text__content_.nodeValue
self.primaryie = primaryie_
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'secondaryie':
secondaryie_ = ''
for text__content_ in child_.childNodes:
secondaryie_ += text__content_.nodeValue
self.secondaryie = secondaryie_
# end class docIndexEntryType
class docListType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, listitem=None):
if listitem is None:
self.listitem = []
else:
self.listitem = listitem
def factory(*args_, **kwargs_):
if docListType.subclass:
return docListType.subclass(*args_, **kwargs_)
else:
return docListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_listitem(self): return self.listitem
def set_listitem(self, listitem): self.listitem = listitem
def add_listitem(self, value): self.listitem.append(value)
def insert_listitem(self, index, value): self.listitem[index] = value
def export(self, outfile, level, namespace_='', name_='docListType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docListType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='docListType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='docListType'):
for listitem_ in self.listitem:
listitem_.export(outfile, level, namespace_, name_='listitem')
def hasContent_(self):
if (
self.listitem is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docListType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('listitem=[\n')
level += 1
for listitem in self.listitem:
showIndent(outfile, level)
outfile.write('model_.listitem(\n')
listitem.exportLiteral(outfile, level, name_='listitem')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'listitem':
obj_ = docListItemType.factory()
obj_.build(child_)
self.listitem.append(obj_)
# end class docListType
class docListItemType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, para=None):
if para is None:
self.para = []
else:
self.para = para
def factory(*args_, **kwargs_):
if docListItemType.subclass:
return docListItemType.subclass(*args_, **kwargs_)
else:
return docListItemType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_para(self): return self.para
def set_para(self, para): self.para = para
def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
def export(self, outfile, level, namespace_='', name_='docListItemType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docListItemType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='docListItemType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='docListItemType'):
for para_ in self.para:
para_.export(outfile, level, namespace_, name_='para')
def hasContent_(self):
if (
self.para is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docListItemType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('para=[\n')
level += 1
for para in self.para:
showIndent(outfile, level)
outfile.write('model_.para(\n')
para.exportLiteral(outfile, level, name_='para')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'para':
obj_ = docParaType.factory()
obj_.build(child_)
self.para.append(obj_)
# end class docListItemType
class docSimpleSectType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, kind=None, title=None, para=None):
self.kind = kind
self.title = title
if para is None:
self.para = []
else:
self.para = para
def factory(*args_, **kwargs_):
if docSimpleSectType.subclass:
return docSimpleSectType.subclass(*args_, **kwargs_)
else:
return docSimpleSectType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_title(self): return self.title
def set_title(self, title): self.title = title
def get_para(self): return self.para
def set_para(self, para): self.para = para
def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
def export(self, outfile, level, namespace_='', name_='docSimpleSectType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docSimpleSectType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='docSimpleSectType'):
if self.kind is not None:
outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
def exportChildren(self, outfile, level, namespace_='', name_='docSimpleSectType'):
if self.title:
self.title.export(outfile, level, namespace_, name_='title')
for para_ in self.para:
para_.export(outfile, level, namespace_, name_='para')
def hasContent_(self):
if (
self.title is not None or
self.para is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docSimpleSectType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.kind is not None:
showIndent(outfile, level)
outfile.write('kind = "%s",\n' % (self.kind,))
def exportLiteralChildren(self, outfile, level, name_):
if self.title:
showIndent(outfile, level)
outfile.write('title=model_.docTitleType(\n')
self.title.exportLiteral(outfile, level, name_='title')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('para=[\n')
level += 1
for para in self.para:
showIndent(outfile, level)
outfile.write('model_.para(\n')
para.exportLiteral(outfile, level, name_='para')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'title':
obj_ = docTitleType.factory()
obj_.build(child_)
self.set_title(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'para':
obj_ = docParaType.factory()
obj_.build(child_)
self.para.append(obj_)
# end class docSimpleSectType
class docVarListEntryType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, term=None):
self.term = term
def factory(*args_, **kwargs_):
if docVarListEntryType.subclass:
return docVarListEntryType.subclass(*args_, **kwargs_)
else:
return docVarListEntryType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_term(self): return self.term
def set_term(self, term): self.term = term
def export(self, outfile, level, namespace_='', name_='docVarListEntryType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docVarListEntryType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='docVarListEntryType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='docVarListEntryType'):
if self.term:
self.term.export(outfile, level, namespace_, name_='term', )
def hasContent_(self):
if (
self.term is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docVarListEntryType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.term:
showIndent(outfile, level)
outfile.write('term=model_.docTitleType(\n')
self.term.exportLiteral(outfile, level, name_='term')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'term':
obj_ = docTitleType.factory()
obj_.build(child_)
self.set_term(obj_)
# end class docVarListEntryType
class docVariableListType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if docVariableListType.subclass:
return docVariableListType.subclass(*args_, **kwargs_)
else:
return docVariableListType(*args_, **kwargs_)
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='docVariableListType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docVariableListType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='docVariableListType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='docVariableListType'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docVariableListType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class docVariableListType
class docRefTextType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
self.refid = refid
self.kindref = kindref
self.external = external
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docRefTextType.subclass:
return docRefTextType.subclass(*args_, **kwargs_)
else:
return docRefTextType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def get_kindref(self): return self.kindref
def set_kindref(self, kindref): self.kindref = kindref
def get_external(self): return self.external
def set_external(self, external): self.external = external
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='docRefTextType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docRefTextType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='docRefTextType'):
if self.refid is not None:
outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
if self.kindref is not None:
outfile.write(' kindref=%s' % (quote_attrib(self.kindref), ))
if self.external is not None:
outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), ))
def exportChildren(self, outfile, level, namespace_='', name_='docRefTextType'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docRefTextType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
if self.kindref is not None:
showIndent(outfile, level)
outfile.write('kindref = "%s",\n' % (self.kindref,))
if self.external is not None:
showIndent(outfile, level)
outfile.write('external = %s,\n' % (self.external,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('refid'):
self.refid = attrs.get('refid').value
if attrs.get('kindref'):
self.kindref = attrs.get('kindref').value
if attrs.get('external'):
self.external = attrs.get('external').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class docRefTextType
class docTableType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, rows=None, cols=None, row=None, caption=None):
self.rows = rows
self.cols = cols
if row is None:
self.row = []
else:
self.row = row
self.caption = caption
def factory(*args_, **kwargs_):
if docTableType.subclass:
return docTableType.subclass(*args_, **kwargs_)
else:
return docTableType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_row(self): return self.row
def set_row(self, row): self.row = row
def add_row(self, value): self.row.append(value)
def insert_row(self, index, value): self.row[index] = value
def get_caption(self): return self.caption
def set_caption(self, caption): self.caption = caption
def get_rows(self): return self.rows
def set_rows(self, rows): self.rows = rows
def get_cols(self): return self.cols
def set_cols(self, cols): self.cols = cols
def export(self, outfile, level, namespace_='', name_='docTableType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docTableType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='docTableType'):
if self.rows is not None:
outfile.write(' rows="%s"' % self.format_integer(self.rows, input_name='rows'))
if self.cols is not None:
outfile.write(' cols="%s"' % self.format_integer(self.cols, input_name='cols'))
def exportChildren(self, outfile, level, namespace_='', name_='docTableType'):
for row_ in self.row:
row_.export(outfile, level, namespace_, name_='row')
if self.caption:
self.caption.export(outfile, level, namespace_, name_='caption')
def hasContent_(self):
if (
self.row is not None or
self.caption is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docTableType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.rows is not None:
showIndent(outfile, level)
outfile.write('rows = %s,\n' % (self.rows,))
if self.cols is not None:
showIndent(outfile, level)
outfile.write('cols = %s,\n' % (self.cols,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('row=[\n')
level += 1
for row in self.row:
showIndent(outfile, level)
outfile.write('model_.row(\n')
row.exportLiteral(outfile, level, name_='row')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.caption:
showIndent(outfile, level)
outfile.write('caption=model_.docCaptionType(\n')
self.caption.exportLiteral(outfile, level, name_='caption')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('rows'):
try:
self.rows = int(attrs.get('rows').value)
except ValueError, exp:
raise ValueError('Bad integer attribute (rows): %s' % exp)
if attrs.get('cols'):
try:
self.cols = int(attrs.get('cols').value)
except ValueError, exp:
raise ValueError('Bad integer attribute (cols): %s' % exp)
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'row':
obj_ = docRowType.factory()
obj_.build(child_)
self.row.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'caption':
obj_ = docCaptionType.factory()
obj_.build(child_)
self.set_caption(obj_)
# end class docTableType
class docRowType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, entry=None):
if entry is None:
self.entry = []
else:
self.entry = entry
def factory(*args_, **kwargs_):
if docRowType.subclass:
return docRowType.subclass(*args_, **kwargs_)
else:
return docRowType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_entry(self): return self.entry
def set_entry(self, entry): self.entry = entry
def add_entry(self, value): self.entry.append(value)
def insert_entry(self, index, value): self.entry[index] = value
def export(self, outfile, level, namespace_='', name_='docRowType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docRowType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='docRowType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='docRowType'):
for entry_ in self.entry:
entry_.export(outfile, level, namespace_, name_='entry')
def hasContent_(self):
if (
self.entry is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docRowType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('entry=[\n')
level += 1
for entry in self.entry:
showIndent(outfile, level)
outfile.write('model_.entry(\n')
entry.exportLiteral(outfile, level, name_='entry')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'entry':
obj_ = docEntryType.factory()
obj_.build(child_)
self.entry.append(obj_)
# end class docRowType
class docEntryType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, thead=None, para=None):
self.thead = thead
if para is None:
self.para = []
else:
self.para = para
def factory(*args_, **kwargs_):
if docEntryType.subclass:
return docEntryType.subclass(*args_, **kwargs_)
else:
return docEntryType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_para(self): return self.para
def set_para(self, para): self.para = para
def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
def get_thead(self): return self.thead
def set_thead(self, thead): self.thead = thead
def export(self, outfile, level, namespace_='', name_='docEntryType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docEntryType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='docEntryType'):
if self.thead is not None:
outfile.write(' thead=%s' % (quote_attrib(self.thead), ))
def exportChildren(self, outfile, level, namespace_='', name_='docEntryType'):
for para_ in self.para:
para_.export(outfile, level, namespace_, name_='para')
def hasContent_(self):
if (
self.para is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docEntryType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.thead is not None:
showIndent(outfile, level)
outfile.write('thead = "%s",\n' % (self.thead,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('para=[\n')
level += 1
for para in self.para:
showIndent(outfile, level)
outfile.write('model_.para(\n')
para.exportLiteral(outfile, level, name_='para')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('thead'):
self.thead = attrs.get('thead').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'para':
obj_ = docParaType.factory()
obj_.build(child_)
self.para.append(obj_)
# end class docEntryType
class docCaptionType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_='', mixedclass_=None, content_=None):
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docCaptionType.subclass:
return docCaptionType.subclass(*args_, **kwargs_)
else:
return docCaptionType(*args_, **kwargs_)
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='docCaptionType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docCaptionType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='docCaptionType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='docCaptionType'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docCaptionType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class docCaptionType
class docHeadingType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None):
self.level = level
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docHeadingType.subclass:
return docHeadingType.subclass(*args_, **kwargs_)
else:
return docHeadingType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_level(self): return self.level
def set_level(self, level): self.level = level
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='docHeadingType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docHeadingType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='docHeadingType'):
if self.level is not None:
outfile.write(' level="%s"' % self.format_integer(self.level, input_name='level'))
def exportChildren(self, outfile, level, namespace_='', name_='docHeadingType'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docHeadingType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.level is not None:
showIndent(outfile, level)
outfile.write('level = %s,\n' % (self.level,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('level'):
try:
self.level = int(attrs.get('level').value)
except ValueError, exp:
raise ValueError('Bad integer attribute (level): %s' % exp)
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class docHeadingType
class docImageType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None):
self.width = width
self.type_ = type_
self.name = name
self.height = height
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docImageType.subclass:
return docImageType.subclass(*args_, **kwargs_)
else:
return docImageType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_width(self): return self.width
def set_width(self, width): self.width = width
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_height(self): return self.height
def set_height(self, height): self.height = height
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='docImageType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docImageType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='docImageType'):
if self.width is not None:
outfile.write(' width=%s' % (self.format_string(quote_attrib(self.width).encode(ExternalEncoding), input_name='width'), ))
if self.type_ is not None:
outfile.write(' type=%s' % (quote_attrib(self.type_), ))
if self.name is not None:
outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.height is not None:
outfile.write(' height=%s' % (self.format_string(quote_attrib(self.height).encode(ExternalEncoding), input_name='height'), ))
def exportChildren(self, outfile, level, namespace_='', name_='docImageType'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docImageType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.width is not None:
showIndent(outfile, level)
outfile.write('width = %s,\n' % (self.width,))
if self.type_ is not None:
showIndent(outfile, level)
outfile.write('type_ = "%s",\n' % (self.type_,))
if self.name is not None:
showIndent(outfile, level)
outfile.write('name = %s,\n' % (self.name,))
if self.height is not None:
showIndent(outfile, level)
outfile.write('height = %s,\n' % (self.height,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('width'):
self.width = attrs.get('width').value
if attrs.get('type'):
self.type_ = attrs.get('type').value
if attrs.get('name'):
self.name = attrs.get('name').value
if attrs.get('height'):
self.height = attrs.get('height').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class docImageType
class docDotFileType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None):
self.name = name
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docDotFileType.subclass:
return docDotFileType.subclass(*args_, **kwargs_)
else:
return docDotFileType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='docDotFileType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docDotFileType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='docDotFileType'):
if self.name is not None:
outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='', name_='docDotFileType'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docDotFileType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.name is not None:
showIndent(outfile, level)
outfile.write('name = %s,\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('name'):
self.name = attrs.get('name').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class docDotFileType
class docTocItemType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
self.id = id
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docTocItemType.subclass:
return docTocItemType.subclass(*args_, **kwargs_)
else:
return docTocItemType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_id(self): return self.id
def set_id(self, id): self.id = id
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='docTocItemType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docTocItemType')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='docTocItemType'):
if self.id is not None:
outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='docTocItemType'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docTocItemType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class docTocItemType
class docTocListType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, tocitem=None):
if tocitem is None:
self.tocitem = []
else:
self.tocitem = tocitem
def factory(*args_, **kwargs_):
if docTocListType.subclass:
return docTocListType.subclass(*args_, **kwargs_)
else:
return docTocListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_tocitem(self): return self.tocitem
def set_tocitem(self, tocitem): self.tocitem = tocitem
def add_tocitem(self, value): self.tocitem.append(value)
def insert_tocitem(self, index, value): self.tocitem[index] = value
def export(self, outfile, level, namespace_='', name_='docTocListType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docTocListType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='docTocListType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='docTocListType'):
for tocitem_ in self.tocitem:
tocitem_.export(outfile, level, namespace_, name_='tocitem')
def hasContent_(self):
if (
self.tocitem is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docTocListType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('tocitem=[\n')
level += 1
for tocitem in self.tocitem:
showIndent(outfile, level)
outfile.write('model_.tocitem(\n')
tocitem.exportLiteral(outfile, level, name_='tocitem')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'tocitem':
obj_ = docTocItemType.factory()
obj_.build(child_)
self.tocitem.append(obj_)
# end class docTocListType
class docLanguageType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, langid=None, para=None):
self.langid = langid
if para is None:
self.para = []
else:
self.para = para
def factory(*args_, **kwargs_):
if docLanguageType.subclass:
return docLanguageType.subclass(*args_, **kwargs_)
else:
return docLanguageType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_para(self): return self.para
def set_para(self, para): self.para = para
def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
def get_langid(self): return self.langid
def set_langid(self, langid): self.langid = langid
def export(self, outfile, level, namespace_='', name_='docLanguageType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docLanguageType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='docLanguageType'):
if self.langid is not None:
outfile.write(' langid=%s' % (self.format_string(quote_attrib(self.langid).encode(ExternalEncoding), input_name='langid'), ))
def exportChildren(self, outfile, level, namespace_='', name_='docLanguageType'):
for para_ in self.para:
para_.export(outfile, level, namespace_, name_='para')
def hasContent_(self):
if (
self.para is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docLanguageType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.langid is not None:
showIndent(outfile, level)
outfile.write('langid = %s,\n' % (self.langid,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('para=[\n')
level += 1
for para in self.para:
showIndent(outfile, level)
outfile.write('model_.para(\n')
para.exportLiteral(outfile, level, name_='para')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('langid'):
self.langid = attrs.get('langid').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'para':
obj_ = docParaType.factory()
obj_.build(child_)
self.para.append(obj_)
# end class docLanguageType
class docParamListType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, kind=None, parameteritem=None):
self.kind = kind
if parameteritem is None:
self.parameteritem = []
else:
self.parameteritem = parameteritem
def factory(*args_, **kwargs_):
if docParamListType.subclass:
return docParamListType.subclass(*args_, **kwargs_)
else:
return docParamListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_parameteritem(self): return self.parameteritem
def set_parameteritem(self, parameteritem): self.parameteritem = parameteritem
def add_parameteritem(self, value): self.parameteritem.append(value)
def insert_parameteritem(self, index, value): self.parameteritem[index] = value
def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
def export(self, outfile, level, namespace_='', name_='docParamListType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docParamListType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='docParamListType'):
if self.kind is not None:
outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
def exportChildren(self, outfile, level, namespace_='', name_='docParamListType'):
for parameteritem_ in self.parameteritem:
parameteritem_.export(outfile, level, namespace_, name_='parameteritem')
def hasContent_(self):
if (
self.parameteritem is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docParamListType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.kind is not None:
showIndent(outfile, level)
outfile.write('kind = "%s",\n' % (self.kind,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('parameteritem=[\n')
level += 1
for parameteritem in self.parameteritem:
showIndent(outfile, level)
outfile.write('model_.parameteritem(\n')
parameteritem.exportLiteral(outfile, level, name_='parameteritem')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'parameteritem':
obj_ = docParamListItem.factory()
obj_.build(child_)
self.parameteritem.append(obj_)
# end class docParamListType
class docParamListItem(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, parameternamelist=None, parameterdescription=None):
if parameternamelist is None:
self.parameternamelist = []
else:
self.parameternamelist = parameternamelist
self.parameterdescription = parameterdescription
def factory(*args_, **kwargs_):
if docParamListItem.subclass:
return docParamListItem.subclass(*args_, **kwargs_)
else:
return docParamListItem(*args_, **kwargs_)
factory = staticmethod(factory)
def get_parameternamelist(self): return self.parameternamelist
def set_parameternamelist(self, parameternamelist): self.parameternamelist = parameternamelist
def add_parameternamelist(self, value): self.parameternamelist.append(value)
def insert_parameternamelist(self, index, value): self.parameternamelist[index] = value
def get_parameterdescription(self): return self.parameterdescription
def set_parameterdescription(self, parameterdescription): self.parameterdescription = parameterdescription
def export(self, outfile, level, namespace_='', name_='docParamListItem', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docParamListItem')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='docParamListItem'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='docParamListItem'):
for parameternamelist_ in self.parameternamelist:
parameternamelist_.export(outfile, level, namespace_, name_='parameternamelist')
if self.parameterdescription:
self.parameterdescription.export(outfile, level, namespace_, name_='parameterdescription', )
def hasContent_(self):
if (
self.parameternamelist is not None or
self.parameterdescription is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docParamListItem'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('parameternamelist=[\n')
level += 1
for parameternamelist in self.parameternamelist:
showIndent(outfile, level)
outfile.write('model_.parameternamelist(\n')
parameternamelist.exportLiteral(outfile, level, name_='parameternamelist')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.parameterdescription:
showIndent(outfile, level)
outfile.write('parameterdescription=model_.descriptionType(\n')
self.parameterdescription.exportLiteral(outfile, level, name_='parameterdescription')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'parameternamelist':
obj_ = docParamNameList.factory()
obj_.build(child_)
self.parameternamelist.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'parameterdescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_parameterdescription(obj_)
# end class docParamListItem
class docParamNameList(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, parametername=None):
if parametername is None:
self.parametername = []
else:
self.parametername = parametername
def factory(*args_, **kwargs_):
if docParamNameList.subclass:
return docParamNameList.subclass(*args_, **kwargs_)
else:
return docParamNameList(*args_, **kwargs_)
factory = staticmethod(factory)
def get_parametername(self): return self.parametername
def set_parametername(self, parametername): self.parametername = parametername
def add_parametername(self, value): self.parametername.append(value)
def insert_parametername(self, index, value): self.parametername[index] = value
def export(self, outfile, level, namespace_='', name_='docParamNameList', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docParamNameList')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='docParamNameList'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='docParamNameList'):
for parametername_ in self.parametername:
parametername_.export(outfile, level, namespace_, name_='parametername')
def hasContent_(self):
if (
self.parametername is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docParamNameList'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('parametername=[\n')
level += 1
for parametername in self.parametername:
showIndent(outfile, level)
outfile.write('model_.parametername(\n')
parametername.exportLiteral(outfile, level, name_='parametername')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'parametername':
obj_ = docParamName.factory()
obj_.build(child_)
self.parametername.append(obj_)
# end class docParamNameList
class docParamName(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None):
self.direction = direction
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
def factory(*args_, **kwargs_):
if docParamName.subclass:
return docParamName.subclass(*args_, **kwargs_)
else:
return docParamName(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ref(self): return self.ref
def set_ref(self, ref): self.ref = ref
def get_direction(self): return self.direction
def set_direction(self, direction): self.direction = direction
def export(self, outfile, level, namespace_='', name_='docParamName', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docParamName')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
def exportAttributes(self, outfile, level, namespace_='', name_='docParamName'):
if self.direction is not None:
outfile.write(' direction=%s' % (quote_attrib(self.direction), ))
def exportChildren(self, outfile, level, namespace_='', name_='docParamName'):
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
def hasContent_(self):
if (
self.ref is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docParamName'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.direction is not None:
showIndent(outfile, level)
outfile.write('direction = "%s",\n' % (self.direction,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('direction'):
self.direction = attrs.get('direction').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'ref':
childobj_ = docRefTextType.factory()
childobj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'ref', childobj_)
self.content_.append(obj_)
elif child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content_.append(obj_)
# end class docParamName
class docXRefSectType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, id=None, xreftitle=None, xrefdescription=None):
self.id = id
if xreftitle is None:
self.xreftitle = []
else:
self.xreftitle = xreftitle
self.xrefdescription = xrefdescription
def factory(*args_, **kwargs_):
if docXRefSectType.subclass:
return docXRefSectType.subclass(*args_, **kwargs_)
else:
return docXRefSectType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_xreftitle(self): return self.xreftitle
def set_xreftitle(self, xreftitle): self.xreftitle = xreftitle
def add_xreftitle(self, value): self.xreftitle.append(value)
def insert_xreftitle(self, index, value): self.xreftitle[index] = value
def get_xrefdescription(self): return self.xrefdescription
def set_xrefdescription(self, xrefdescription): self.xrefdescription = xrefdescription
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, outfile, level, namespace_='', name_='docXRefSectType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docXRefSectType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='docXRefSectType'):
if self.id is not None:
outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='docXRefSectType'):
for xreftitle_ in self.xreftitle:
showIndent(outfile, level)
outfile.write('<%sxreftitle>%s</%sxreftitle>\n' % (namespace_, self.format_string(quote_xml(xreftitle_).encode(ExternalEncoding), input_name='xreftitle'), namespace_))
if self.xrefdescription:
self.xrefdescription.export(outfile, level, namespace_, name_='xrefdescription', )
def hasContent_(self):
if (
self.xreftitle is not None or
self.xrefdescription is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docXRefSectType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.id is not None:
showIndent(outfile, level)
outfile.write('id = %s,\n' % (self.id,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('xreftitle=[\n')
level += 1
for xreftitle in self.xreftitle:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(xreftitle).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.xrefdescription:
showIndent(outfile, level)
outfile.write('xrefdescription=model_.descriptionType(\n')
self.xrefdescription.exportLiteral(outfile, level, name_='xrefdescription')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'xreftitle':
xreftitle_ = ''
for text__content_ in child_.childNodes:
xreftitle_ += text__content_.nodeValue
self.xreftitle.append(xreftitle_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'xrefdescription':
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_xrefdescription(obj_)
# end class docXRefSectType
class docCopyType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, link=None, para=None, sect1=None, internal=None):
self.link = link
if para is None:
self.para = []
else:
self.para = para
if sect1 is None:
self.sect1 = []
else:
self.sect1 = sect1
self.internal = internal
def factory(*args_, **kwargs_):
if docCopyType.subclass:
return docCopyType.subclass(*args_, **kwargs_)
else:
return docCopyType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_para(self): return self.para
def set_para(self, para): self.para = para
def add_para(self, value): self.para.append(value)
def insert_para(self, index, value): self.para[index] = value
def get_sect1(self): return self.sect1
def set_sect1(self, sect1): self.sect1 = sect1
def add_sect1(self, value): self.sect1.append(value)
def insert_sect1(self, index, value): self.sect1[index] = value
def get_internal(self): return self.internal
def set_internal(self, internal): self.internal = internal
def get_link(self): return self.link
def set_link(self, link): self.link = link
def export(self, outfile, level, namespace_='', name_='docCopyType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docCopyType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='docCopyType'):
if self.link is not None:
outfile.write(' link=%s' % (self.format_string(quote_attrib(self.link).encode(ExternalEncoding), input_name='link'), ))
def exportChildren(self, outfile, level, namespace_='', name_='docCopyType'):
for para_ in self.para:
para_.export(outfile, level, namespace_, name_='para')
for sect1_ in self.sect1:
sect1_.export(outfile, level, namespace_, name_='sect1')
if self.internal:
self.internal.export(outfile, level, namespace_, name_='internal')
def hasContent_(self):
if (
self.para is not None or
self.sect1 is not None or
self.internal is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docCopyType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.link is not None:
showIndent(outfile, level)
outfile.write('link = %s,\n' % (self.link,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('para=[\n')
level += 1
for para in self.para:
showIndent(outfile, level)
outfile.write('model_.para(\n')
para.exportLiteral(outfile, level, name_='para')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('sect1=[\n')
level += 1
for sect1 in self.sect1:
showIndent(outfile, level)
outfile.write('model_.sect1(\n')
sect1.exportLiteral(outfile, level, name_='sect1')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.internal:
showIndent(outfile, level)
outfile.write('internal=model_.docInternalType(\n')
self.internal.exportLiteral(outfile, level, name_='internal')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('link'):
self.link = attrs.get('link').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'para':
obj_ = docParaType.factory()
obj_.build(child_)
self.para.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'sect1':
obj_ = docSect1Type.factory()
obj_.build(child_)
self.sect1.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'internal':
obj_ = docInternalType.factory()
obj_.build(child_)
self.set_internal(obj_)
# end class docCopyType
class docCharType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, char=None, valueOf_=''):
self.char = char
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if docCharType.subclass:
return docCharType.subclass(*args_, **kwargs_)
else:
return docCharType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_char(self): return self.char
def set_char(self, char): self.char = char
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='docCharType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docCharType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='docCharType'):
if self.char is not None:
outfile.write(' char=%s' % (quote_attrib(self.char), ))
def exportChildren(self, outfile, level, namespace_='', name_='docCharType'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docCharType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.char is not None:
showIndent(outfile, level)
outfile.write('char = "%s",\n' % (self.char,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('char'):
self.char = attrs.get('char').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class docCharType
class docEmptyType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if docEmptyType.subclass:
return docEmptyType.subclass(*args_, **kwargs_)
else:
return docEmptyType(*args_, **kwargs_)
factory = staticmethod(factory)
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='docEmptyType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='docEmptyType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='docEmptyType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='docEmptyType'):
if self.valueOf_.find('![CDATA')>-1:
value=quote_xml('%s' % self.valueOf_)
value=value.replace('![CDATA','<![CDATA')
value=value.replace(']]',']]>')
outfile.write(value)
else:
outfile.write(quote_xml('%s' % self.valueOf_))
def hasContent_(self):
if (
self.valueOf_ is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docEmptyType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
self.valueOf_ = ''
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.TEXT_NODE:
self.valueOf_ += child_.nodeValue
elif child_.nodeType == Node.CDATA_SECTION_NODE:
self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
# end class docEmptyType
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
Options:
-s Use the SAX parser, not the minidom parser.
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def parse(inFileName):
doc = minidom.parse(inFileName)
rootNode = doc.documentElement
rootObj = DoxygenType.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="doxygen",
namespacedef_='')
return rootObj
def parseString(inString):
doc = minidom.parseString(inString)
rootNode = doc.documentElement
rootObj = DoxygenType.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="doxygen",
namespacedef_='')
return rootObj
def parseLiteral(inFileName):
doc = minidom.parse(inFileName)
rootNode = doc.documentElement
rootObj = DoxygenType.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('from compound import *\n\n')
sys.stdout.write('rootObj = doxygen(\n')
rootObj.exportLiteral(sys.stdout, 0, name_="doxygen")
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
main()
#import pdb
#pdb.run('main()')
|
mollstam/UnrealPy | refs/heads/master | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/test/profilee.py | 399 | """
Input for test_profile.py and test_cprofile.py.
IMPORTANT: This stuff is touchy. If you modify anything above the
test class you'll have to regenerate the stats by running the two
test files.
*ALL* NUMBERS in the expected output are relevant. If you change
the formatting of pstats, please don't just regenerate the expected
output without checking very carefully that not a single number has
changed.
"""
import sys
# In order to have reproducible time, we simulate a timer in the global
# variable 'TICKS', which represents simulated time in milliseconds.
# (We can't use a helper function increment the timer since it would be
# included in the profile and would appear to consume all the time.)
TICKS = 42000
def timer():
return TICKS
def testfunc():
# 1 call
# 1000 ticks total: 270 ticks local, 730 ticks in subfunctions
global TICKS
TICKS += 99
helper() # 300
helper() # 300
TICKS += 171
factorial(14) # 130
def factorial(n):
# 23 calls total
# 170 ticks total, 150 ticks local
# 3 primitive calls, 130, 20 and 20 ticks total
# including 116, 17, 17 ticks local
global TICKS
if n > 0:
TICKS += n
return mul(n, factorial(n-1))
else:
TICKS += 11
return 1
def mul(a, b):
# 20 calls
# 1 tick, local
global TICKS
TICKS += 1
return a * b
def helper():
# 2 calls
# 300 ticks total: 20 ticks local, 260 ticks in subfunctions
global TICKS
TICKS += 1
helper1() # 30
TICKS += 2
helper1() # 30
TICKS += 6
helper2() # 50
TICKS += 3
helper2() # 50
TICKS += 2
helper2() # 50
TICKS += 5
helper2_indirect() # 70
TICKS += 1
def helper1():
# 4 calls
# 30 ticks total: 29 ticks local, 1 tick in subfunctions
global TICKS
TICKS += 10
hasattr(C(), "foo") # 1
TICKS += 19
lst = []
lst.append(42) # 0
sys.exc_info() # 0
def helper2_indirect():
helper2() # 50
factorial(3) # 20
def helper2():
# 8 calls
# 50 ticks local: 39 ticks local, 11 ticks in subfunctions
global TICKS
TICKS += 11
hasattr(C(), "bar") # 1
TICKS += 13
subhelper() # 10
TICKS += 15
def subhelper():
# 8 calls
# 10 ticks total: 8 ticks local, 2 ticks in subfunctions
global TICKS
TICKS += 2
for i in range(2): # 0
try:
C().foo # 1 x 2
except AttributeError:
TICKS += 3 # 3 x 2
class C:
def __getattr__(self, name):
# 28 calls
# 1 tick, local
global TICKS
TICKS += 1
raise AttributeError
|
OpenTransportDataProject/ckanext-ontology | refs/heads/master | setup.py | 1 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='''ckanext-ontology''',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='0.0.4',
description='''Extension for adding and cataloguing ontologies''',
long_description=long_description,
# The project's main homepage. Updated to refer to project main page
url='https://github.com/OpenTransportDataProject/ckanext-ontology',
# Author details
author='''Kristoffer Larsen''',
author_email='''[email protected]''',
# Choose your license
license='AGPL',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
# What does your project relate to?
keywords='''CKAN extension ontology''',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=[],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
include_package_data=True,
package_data={
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points='''
[ckan.plugins]
ontology=ckanext.ontology.plugin:OntologyPlugin
datasetform=ckanext.ontology.plugin:DatasetFormPlugin
[babel.extractors]
ckan = ckan.lib.extract:extract_ckan
''',
# If you are changing from the default layout of your extension, you may
# have to change the message extractors, you can read more about babel
# message extraction at
# http://babel.pocoo.org/docs/messages/#extraction-method-mapping-and-configuration
message_extractors={
'ckanext': [
('**.py', 'python', None),
('**.js', 'javascript', None),
('**/templates/**.html', 'ckan', None),
],
}
)
|
Ademan/NumPy-GSoC | refs/heads/master | numpy/distutils/tests/test_misc_util.py | 35 | #!/usr/bin/env python
from numpy.testing import *
from numpy.distutils.misc_util import appendpath, minrelpath, gpaths, rel_path
from os.path import join, sep, dirname
ajoin = lambda *paths: join(*((sep,)+paths))
class TestAppendpath(TestCase):
def test_1(self):
assert_equal(appendpath('prefix','name'),join('prefix','name'))
assert_equal(appendpath('/prefix','name'),ajoin('prefix','name'))
assert_equal(appendpath('/prefix','/name'),ajoin('prefix','name'))
assert_equal(appendpath('prefix','/name'),join('prefix','name'))
def test_2(self):
assert_equal(appendpath('prefix/sub','name'),
join('prefix','sub','name'))
assert_equal(appendpath('prefix/sub','sup/name'),
join('prefix','sub','sup','name'))
assert_equal(appendpath('/prefix/sub','/prefix/name'),
ajoin('prefix','sub','name'))
def test_3(self):
assert_equal(appendpath('/prefix/sub','/prefix/sup/name'),
ajoin('prefix','sub','sup','name'))
assert_equal(appendpath('/prefix/sub/sub2','/prefix/sup/sup2/name'),
ajoin('prefix','sub','sub2','sup','sup2','name'))
assert_equal(appendpath('/prefix/sub/sub2','/prefix/sub/sup/name'),
ajoin('prefix','sub','sub2','sup','name'))
class TestMinrelpath(TestCase):
def test_1(self):
n = lambda path: path.replace('/',sep)
assert_equal(minrelpath(n('aa/bb')),n('aa/bb'))
assert_equal(minrelpath('..'),'..')
assert_equal(minrelpath(n('aa/..')),'')
assert_equal(minrelpath(n('aa/../bb')),'bb')
assert_equal(minrelpath(n('aa/bb/..')),'aa')
assert_equal(minrelpath(n('aa/bb/../..')),'')
assert_equal(minrelpath(n('aa/bb/../cc/../dd')),n('aa/dd'))
assert_equal(minrelpath(n('.././..')),n('../..'))
assert_equal(minrelpath(n('aa/bb/.././../dd')),n('dd'))
class TestGpaths(TestCase):
def test_gpaths(self):
local_path = minrelpath(join(dirname(__file__),'..'))
ls = gpaths('command/*.py', local_path)
assert join(local_path,'command','build_src.py') in ls,`ls`
f = gpaths('system_info.py', local_path)
assert join(local_path,'system_info.py')==f[0],`f`
if __name__ == "__main__":
run_module_suite()
|
coberger/DIRAC | refs/heads/integration | FrameworkSystem/test/testLogger.py | 7 | # __RCSID__ = "$Id$"
#
# # FIXME: should be rewritten as a real unitttest
#
# import DIRAC
# from DIRAC import gLogger
# gLogger.setLevel( 'DEBUG' )
#
# DIRAC.gLogger.initialize('test_gLogger','/testSectionDebug')
#
#
# try:
# for i in None:
# pass
# except:
# pass
#
# testList = [{ 'method' : DIRAC.gLogger.always,
# 'arguments' : ( ( "This is a always message" ), ),
# 'output' : True
# },
# { 'method' : DIRAC.gLogger.info,
# 'arguments' : ( ( "This is a info message" ), ),
# 'output' : True
# },
# { 'method' : DIRAC.gLogger.verbose,
# 'arguments' : ( ( "This is a info message" ), ),
# 'output' : True
# },
# { 'method' : DIRAC.gLogger.debug,
# 'arguments' : ( ( "This is a debug message" ), ),
# 'output' : True
# },
# { 'method' : DIRAC.gLogger.warn,
# 'arguments' : ( ( "This is a warn message" ), ),
# 'output' : True
# },
# { 'method' : DIRAC.gLogger.error,
# 'arguments' : ( ( "This is a error message" ), ),
# 'output' : True
# },
# { 'method' : DIRAC.gLogger.exception,
# 'arguments' : ( ( "This is a exception message" ), ),
# 'output' : True
# },
# { 'method' : DIRAC.gLogger.fatal,
# 'arguments' : ( ( "This is a fatal message" ), ),
# 'output' : True
# },
# ]
# testdict = { 'DIRAC.gLogger' : testList,}
#
#
# DIRAC.Tests.run( testdict, 'DIRAC.Information.Logger' )
|
iesugrace/kazam-custom | refs/heads/master | frontend/main_menu.py | 2 | # -*- coding: utf-8 -*-
#
# main_menu.py
#
# Copyright 2012 David Klasinc <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from gettext import gettext as _
from gi.repository import Gtk, GObject
MENUBAR = """
<ui>
<menubar name='MenuBar'>
<menu action='FileMenu'>
<menuitem action='FilePreferences' />
<menuitem action='FileQuit' />
</menu>
<menu action='HelpMenu'>
<menuitem action='HelpAbout' />
</menu>
</menubar>
</ui>
"""
class MainMenu(GObject.GObject):
__gsignals__ = {
"file-preferences" : (GObject.SIGNAL_RUN_LAST,
None,
(),
),
"file-quit" : (GObject.SIGNAL_RUN_LAST,
None,
(),
),
"help-about" : (GObject.SIGNAL_RUN_LAST,
None,
(),
),
}
def __init__(self):
GObject.GObject.__init__(self)
self.action_group = Gtk.ActionGroup("kazam_actions")
self.action_group.add_actions([
("FileMenu", None, _("File")),
("FileQuit", Gtk.STOCK_QUIT, _("Quit"), None, _("Quit Kazam"),
self.cb_file_quit),
("FilePreferences", Gtk.STOCK_PREFERENCES, _("Preferences"), None, _("Open preferences"),
self.cb_file_preferences),
("HelpMenu", None, _("Help")),
("HelpAbout", None, _("About"), None , _("About Kazam"),
self.cb_help_about)
])
self.uimanager = Gtk.UIManager()
self.uimanager.add_ui_from_string(MENUBAR)
self.uimanager.insert_action_group(self.action_group)
self.menubar = self.uimanager.get_widget("/MenuBar")
def cb_file_quit(self, action):
self.emit("file-quit")
def cb_file_preferences(self, action):
self.emit("file-preferences")
def cb_help_about(self, action):
self.emit("help-about")
|
qenter/vlc-android | refs/heads/master | toolchains/arm/lib/python2.7/sqlite3/test/types.py | 70 | #-*- coding: ISO-8859-1 -*-
# pysqlite2/test/types.py: tests for type conversion and detection
#
# Copyright (C) 2005-2007 Gerhard Häring <[email protected]>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import datetime
import unittest
import sqlite3 as sqlite
try:
import zlib
except ImportError:
zlib = None
class SqliteTypeTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
self.cur = self.con.cursor()
self.cur.execute("create table test(i integer, s varchar, f number, b blob)")
def tearDown(self):
self.cur.close()
self.con.close()
def CheckString(self):
self.cur.execute("insert into test(s) values (?)", (u"Österreich",))
self.cur.execute("select s from test")
row = self.cur.fetchone()
self.assertEqual(row[0], u"Österreich")
def CheckSmallInt(self):
self.cur.execute("insert into test(i) values (?)", (42,))
self.cur.execute("select i from test")
row = self.cur.fetchone()
self.assertEqual(row[0], 42)
def CheckLargeInt(self):
num = 2**40
self.cur.execute("insert into test(i) values (?)", (num,))
self.cur.execute("select i from test")
row = self.cur.fetchone()
self.assertEqual(row[0], num)
def CheckFloat(self):
val = 3.14
self.cur.execute("insert into test(f) values (?)", (val,))
self.cur.execute("select f from test")
row = self.cur.fetchone()
self.assertEqual(row[0], val)
def CheckBlob(self):
val = buffer("Guglhupf")
self.cur.execute("insert into test(b) values (?)", (val,))
self.cur.execute("select b from test")
row = self.cur.fetchone()
self.assertEqual(row[0], val)
def CheckUnicodeExecute(self):
self.cur.execute(u"select 'Österreich'")
row = self.cur.fetchone()
self.assertEqual(row[0], u"Österreich")
def CheckNonUtf8_Default(self):
try:
self.cur.execute("select ?", (chr(150),))
self.fail("should have raised a ProgrammingError")
except sqlite.ProgrammingError:
pass
def CheckNonUtf8_TextFactoryString(self):
orig_text_factory = self.con.text_factory
try:
self.con.text_factory = str
self.cur.execute("select ?", (chr(150),))
finally:
self.con.text_factory = orig_text_factory
def CheckNonUtf8_TextFactoryOptimizedUnicode(self):
orig_text_factory = self.con.text_factory
try:
try:
self.con.text_factory = sqlite.OptimizedUnicode
self.cur.execute("select ?", (chr(150),))
self.fail("should have raised a ProgrammingError")
except sqlite.ProgrammingError:
pass
finally:
self.con.text_factory = orig_text_factory
class DeclTypesTests(unittest.TestCase):
class Foo:
def __init__(self, _val):
self.val = _val
def __cmp__(self, other):
if not isinstance(other, DeclTypesTests.Foo):
raise ValueError
if self.val == other.val:
return 0
else:
return 1
def __conform__(self, protocol):
if protocol is sqlite.PrepareProtocol:
return self.val
else:
return None
def __str__(self):
return "<%s>" % self.val
def setUp(self):
self.con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_DECLTYPES)
self.cur = self.con.cursor()
self.cur.execute("create table test(i int, s str, f float, b bool, u unicode, foo foo, bin blob, n1 number, n2 number(5))")
# override float, make them always return the same number
sqlite.converters["FLOAT"] = lambda x: 47.2
# and implement two custom ones
sqlite.converters["BOOL"] = lambda x: bool(int(x))
sqlite.converters["FOO"] = DeclTypesTests.Foo
sqlite.converters["WRONG"] = lambda x: "WRONG"
sqlite.converters["NUMBER"] = float
def tearDown(self):
del sqlite.converters["FLOAT"]
del sqlite.converters["BOOL"]
del sqlite.converters["FOO"]
del sqlite.converters["NUMBER"]
self.cur.close()
self.con.close()
def CheckString(self):
# default
self.cur.execute("insert into test(s) values (?)", ("foo",))
self.cur.execute('select s as "s [WRONG]" from test')
row = self.cur.fetchone()
self.assertEqual(row[0], "foo")
def CheckSmallInt(self):
# default
self.cur.execute("insert into test(i) values (?)", (42,))
self.cur.execute("select i from test")
row = self.cur.fetchone()
self.assertEqual(row[0], 42)
def CheckLargeInt(self):
# default
num = 2**40
self.cur.execute("insert into test(i) values (?)", (num,))
self.cur.execute("select i from test")
row = self.cur.fetchone()
self.assertEqual(row[0], num)
def CheckFloat(self):
# custom
val = 3.14
self.cur.execute("insert into test(f) values (?)", (val,))
self.cur.execute("select f from test")
row = self.cur.fetchone()
self.assertEqual(row[0], 47.2)
def CheckBool(self):
# custom
self.cur.execute("insert into test(b) values (?)", (False,))
self.cur.execute("select b from test")
row = self.cur.fetchone()
self.assertEqual(row[0], False)
self.cur.execute("delete from test")
self.cur.execute("insert into test(b) values (?)", (True,))
self.cur.execute("select b from test")
row = self.cur.fetchone()
self.assertEqual(row[0], True)
def CheckUnicode(self):
# default
val = u"\xd6sterreich"
self.cur.execute("insert into test(u) values (?)", (val,))
self.cur.execute("select u from test")
row = self.cur.fetchone()
self.assertEqual(row[0], val)
def CheckFoo(self):
val = DeclTypesTests.Foo("bla")
self.cur.execute("insert into test(foo) values (?)", (val,))
self.cur.execute("select foo from test")
row = self.cur.fetchone()
self.assertEqual(row[0], val)
def CheckUnsupportedSeq(self):
class Bar: pass
val = Bar()
try:
self.cur.execute("insert into test(f) values (?)", (val,))
self.fail("should have raised an InterfaceError")
except sqlite.InterfaceError:
pass
except:
self.fail("should have raised an InterfaceError")
def CheckUnsupportedDict(self):
class Bar: pass
val = Bar()
try:
self.cur.execute("insert into test(f) values (:val)", {"val": val})
self.fail("should have raised an InterfaceError")
except sqlite.InterfaceError:
pass
except:
self.fail("should have raised an InterfaceError")
def CheckBlob(self):
# default
val = buffer("Guglhupf")
self.cur.execute("insert into test(bin) values (?)", (val,))
self.cur.execute("select bin from test")
row = self.cur.fetchone()
self.assertEqual(row[0], val)
def CheckNumber1(self):
self.cur.execute("insert into test(n1) values (5)")
value = self.cur.execute("select n1 from test").fetchone()[0]
# if the converter is not used, it's an int instead of a float
self.assertEqual(type(value), float)
def CheckNumber2(self):
"""Checks wether converter names are cut off at '(' characters"""
self.cur.execute("insert into test(n2) values (5)")
value = self.cur.execute("select n2 from test").fetchone()[0]
# if the converter is not used, it's an int instead of a float
self.assertEqual(type(value), float)
class ColNamesTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_COLNAMES)
self.cur = self.con.cursor()
self.cur.execute("create table test(x foo)")
sqlite.converters["FOO"] = lambda x: "[%s]" % x
sqlite.converters["BAR"] = lambda x: "<%s>" % x
sqlite.converters["EXC"] = lambda x: 5 // 0
sqlite.converters["B1B1"] = lambda x: "MARKER"
def tearDown(self):
del sqlite.converters["FOO"]
del sqlite.converters["BAR"]
del sqlite.converters["EXC"]
del sqlite.converters["B1B1"]
self.cur.close()
self.con.close()
def CheckDeclTypeNotUsed(self):
"""
Assures that the declared type is not used when PARSE_DECLTYPES
is not set.
"""
self.cur.execute("insert into test(x) values (?)", ("xxx",))
self.cur.execute("select x from test")
val = self.cur.fetchone()[0]
self.assertEqual(val, "xxx")
def CheckNone(self):
self.cur.execute("insert into test(x) values (?)", (None,))
self.cur.execute("select x from test")
val = self.cur.fetchone()[0]
self.assertEqual(val, None)
def CheckColName(self):
self.cur.execute("insert into test(x) values (?)", ("xxx",))
self.cur.execute('select x as "x [bar]" from test')
val = self.cur.fetchone()[0]
self.assertEqual(val, "<xxx>")
# Check if the stripping of colnames works. Everything after the first
# whitespace should be stripped.
self.assertEqual(self.cur.description[0][0], "x")
def CheckCaseInConverterName(self):
self.cur.execute("""select 'other' as "x [b1b1]\"""")
val = self.cur.fetchone()[0]
self.assertEqual(val, "MARKER")
def CheckCursorDescriptionNoRow(self):
"""
cursor.description should at least provide the column name(s), even if
no row returned.
"""
self.cur.execute("select * from test where 0 = 1")
self.assertEqual(self.cur.description[0][0], "x")
class ObjectAdaptationTests(unittest.TestCase):
def cast(obj):
return float(obj)
cast = staticmethod(cast)
def setUp(self):
self.con = sqlite.connect(":memory:")
try:
del sqlite.adapters[int]
except:
pass
sqlite.register_adapter(int, ObjectAdaptationTests.cast)
self.cur = self.con.cursor()
def tearDown(self):
del sqlite.adapters[(int, sqlite.PrepareProtocol)]
self.cur.close()
self.con.close()
def CheckCasterIsUsed(self):
self.cur.execute("select ?", (4,))
val = self.cur.fetchone()[0]
self.assertEqual(type(val), float)
@unittest.skipUnless(zlib, "requires zlib")
class BinaryConverterTests(unittest.TestCase):
def convert(s):
return zlib.decompress(s)
convert = staticmethod(convert)
def setUp(self):
self.con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_COLNAMES)
sqlite.register_converter("bin", BinaryConverterTests.convert)
def tearDown(self):
self.con.close()
def CheckBinaryInputForConverter(self):
testdata = "abcdefg" * 10
result = self.con.execute('select ? as "x [bin]"', (buffer(zlib.compress(testdata)),)).fetchone()[0]
self.assertEqual(testdata, result)
class DateTimeTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_DECLTYPES)
self.cur = self.con.cursor()
self.cur.execute("create table test(d date, ts timestamp)")
def tearDown(self):
self.cur.close()
self.con.close()
def CheckSqliteDate(self):
d = sqlite.Date(2004, 2, 14)
self.cur.execute("insert into test(d) values (?)", (d,))
self.cur.execute("select d from test")
d2 = self.cur.fetchone()[0]
self.assertEqual(d, d2)
def CheckSqliteTimestamp(self):
ts = sqlite.Timestamp(2004, 2, 14, 7, 15, 0)
self.cur.execute("insert into test(ts) values (?)", (ts,))
self.cur.execute("select ts from test")
ts2 = self.cur.fetchone()[0]
self.assertEqual(ts, ts2)
def CheckSqlTimestamp(self):
# The date functions are only available in SQLite version 3.1 or later
if sqlite.sqlite_version_info < (3, 1):
return
# SQLite's current_timestamp uses UTC time, while datetime.datetime.now() uses local time.
now = datetime.datetime.now()
self.cur.execute("insert into test(ts) values (current_timestamp)")
self.cur.execute("select ts from test")
ts = self.cur.fetchone()[0]
self.assertEqual(type(ts), datetime.datetime)
self.assertEqual(ts.year, now.year)
def CheckDateTimeSubSeconds(self):
ts = sqlite.Timestamp(2004, 2, 14, 7, 15, 0, 500000)
self.cur.execute("insert into test(ts) values (?)", (ts,))
self.cur.execute("select ts from test")
ts2 = self.cur.fetchone()[0]
self.assertEqual(ts, ts2)
def CheckDateTimeSubSecondsFloatingPoint(self):
ts = sqlite.Timestamp(2004, 2, 14, 7, 15, 0, 510241)
self.cur.execute("insert into test(ts) values (?)", (ts,))
self.cur.execute("select ts from test")
ts2 = self.cur.fetchone()[0]
self.assertEqual(ts, ts2)
def suite():
sqlite_type_suite = unittest.makeSuite(SqliteTypeTests, "Check")
decltypes_type_suite = unittest.makeSuite(DeclTypesTests, "Check")
colnames_type_suite = unittest.makeSuite(ColNamesTests, "Check")
adaptation_suite = unittest.makeSuite(ObjectAdaptationTests, "Check")
bin_suite = unittest.makeSuite(BinaryConverterTests, "Check")
date_suite = unittest.makeSuite(DateTimeTests, "Check")
return unittest.TestSuite((sqlite_type_suite, decltypes_type_suite, colnames_type_suite, adaptation_suite, bin_suite, date_suite))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
|
kirbyfan64/shedskin | refs/heads/master | shedskin/lib/builtin.py | 5 | # Copyright 2005-2011 Mark Dufour and contributors; License Expat (See LICENSE)
class class_:
def __repr__(self):
return self.__name__
class int_:
def __add__(self, b):
return b.__with_int__()
def __sub__(self, b):
return b.__with_int__()
def __mul__(self, b):
return b.__with_int__()
def __div__(self, b):
return b.__with_int__()
def __floordiv__(self, b):
return b.__with_int__()
def __mod__(self, b):
return b.__with_int__()
def __divmod__(self, b):
return (b.__with_int__(),)
def __and__(self, b):
return 1
def __or__(self, b):
return 1
def __xor__(self, b):
return 1
def __rshift__(self, b):
return 1
def __lshift__(self, b):
return 1
def __invert__(self):
return 1
def __neg__(self):
return 1
def __pos__(self):
return 1
def __hash__(self):
return 1
def __abs__(self):
return 1
def __pow__(self, b):
return b
def __copy__(self):
return self
def __deepcopy__(self):
return self
def __with_int__(self):
return 1
def __with_float__(self):
return 1.0
def __with_bool__(self):
return 1
def __repr__(self):
return ''
class bool_:
def __add__(self, b):
return b.__with_int__()
def __sub__(self, b):
return b.__with_int__()
def __mul__(self, b):
return b.__with_int__()
def __div__(self, b):
return b.__with_int__()
def __floordiv__(self, b):
return b.__with_int__()
def __mod__(self, b):
return b.__with_int__()
def __divmod__(self, b):
return (b.__with_int__(),)
def __and__(self, b):
return b.__with_bool__()
def __or__(self, b):
return b.__with_bool__()
def __xor__(self, b):
return b.__with_bool__()
def __rshift__(self, b):
return 1
def __lshift__(self, b):
return 1
def __invert__(self):
return 1
def __neg__(self):
return 1
def __pos__(self):
return 1
def __hash__(self):
return 1
def __abs__(self):
return 1
def __pow__(self, b):
return b
def __copy__(self):
return self
def __deepcopy__(self):
return self
def __with_int__(self):
return 1
def __with_float__(self):
return 1.0
def __with_bool__(self):
return self
def __repr__(self):
return ''
class float_:
def __add__(self, b):
return b.__with_float__()
def __sub__(self, b):
return b.__with_float__()
def __mul__(self, b):
return b.__with_float__()
def __div__(self, b):
return b.__with_float__()
def __floordiv__(self, b):
return b.__with_float__()
def __mod__(self, b):
return b.__with_float__()
def __divmod__(self, b):
return (b.__with_float__(),)
def __pow__(self, b):
return b.__with_float__()
def is_integer(self):
return True
def __neg__(self):
return 1.0
def __pos__(self):
return 1.0
def __abs__(self):
return 1.0
def __hash__(self):
return 1
def __copy__(self):
return self
def __deepcopy__(self):
return self
def __with_int__(self):
return 1.0
def __with_float__(self):
return 1.0
def __repr__(self):
return ''
class none:
def __hash__(self):
return 1
class pyiter:
def __init__(self, i=None):
pass
def __inititer__(self, i):
self.unit = iter(i).next()
def __iter__(self):
return __iter(self.unit)
def __copy__(self): # XXX to base class
return self
def __deepcopy__(self):
return self
def __with_int__(self):
return self
def __contains__(self, x):
x == self.unit
return True
class pyseq(pyiter):
pass
class list(pyseq):
def append(self, u):
self.unit = u
def index(self, u, s=0, e=0):
u == self.unit
return 1
def count(self, u):
u == self.unit
return 1
def remove(self, u):
u == self.unit
def __getitem__(self, i):
return self.unit
def __setitem__(self, i, u):
self.unit = u
def __delitem__(self, i):
pass
def __len__(self):
return 1
def __add__(self, b):
return self
return b
def __mul__(self, b):
return self
def __iadd__(self, b):
self.unit = b.unit
return self
def __imul__(self, n):
return self
def __slice__(self, x, lower, upper, step):
return self
def __delslice__(self, a, b):
pass
def __setslice__(self, x, lower, upper, step, r):
self.unit = r.unit
def __delete__(self, x, a=1, b=1, s=1):
pass
def __repr__(self):
self.unit.__repr__()
return ''
def __str__(self):
return self.__repr__()
def extend(self, other):
self.unit = other.unit
def insert(self, i, u):
self.unit = u
def pop(self, m=0):
return self.unit
def reverse(self):
pass
def sort(self, cmp=0, key=0, reverse=0):
elem = self.unit
cmp(elem, elem)
cmp2(elem, elem)
key(elem)
class tuple(pyseq):
def __len__(self):
return 1
def __repr__(self):
self.unit.__repr__()
return ''
def __str__(self):
return self.__repr__()
def __add__(self, b):
a = self.unit
a = b.unit
return (a,)
def __mul__(self, b):
return self
def __getitem__(self, i):
a = self.unit
return a
def __slice__(self, x, l, u, s):
return self
def __hash__(self):
return 1
class tuple2(pyseq):
def __len__(self):
return 1
def __repr__(self):
self.first.__repr__()
self.second.__repr__()
return ''
def __str__(self):
return self.__repr__()
def __add__(self, b):
a = self.unit
a = b.unit
return (a,)
def __mul__(self, b):
return (self.unit,)
def __getitem__(self, i):
return self.unit
def __getfirst__(self, i):
return self.first
def __getsecond__(self, i):
return self.second
def __hash__(self):
return 1
class str_(pyseq):
def strip(self, chars=''):
return ''
def lstrip(self, chars=''):
return ''
def rstrip(self, chars=''):
return ''
def istitle(self):
return True
def splitlines(self, c=0):
return ['']
def partition(self, sep):
return ('',)
def rpartition(self, sep):
return ('',)
def rsplit(self, sep='', c=-1):
return ['']
def split(self, sep='',c=-1):
return ['']
def join(self, l):
return self
def __getitem__(self, i):
return ''
def __mul__(self, n):
return ''
def __repr__(self):
return ''
def __mod__(self, a=None):
a = a.unit
a.__str__()
a.__repr__()
return ''
def __add__(self,be):
return ''
def __len__(self):
return 1
def upper(self):
return ''
def lower(self):
return ''
def title(self):
return ''
def capitalize(self):
return ''
def find(self, sub, s=0, e=0):
return 1
def rfind(self, sub, s=0, e=0):
return 1
def index(self, sub, s=0, e=0):
return 1
def rindex(self, sub, s=0, e=0):
return 1
def isdigit(self):
return True
def islower(self):
return True
def isupper(self):
return True
def isalpha(self):
return True
def isspace(self):
return True
def isalnum(self):
return True
def zfill(self, width):
return ''
def ljust(self, width, chars=''):
return ''
def rjust(self, width, chars=''):
return ''
def expandtabs(self, width=8):
return ''
def count(self, e, start=0, end=0):
return 1
def startswith(self, e, start=0, end=0):
return True
def endswith(self, e, start=0, end=0):
return True
def replace(self, a, b, c=0):
return ''
def translate(self, table, delchars=''):
return ''
def swapcase(self):
return ''
def center(self, w, fill=''):
return ''
def __slice__(self, x, l, u, s):
return self
def __hash__(self):
return 1
class dict(pyiter):
def __initdict__(self, other):
self.__setunit__(other.unit, other.value)
def __inititer__(self, other):
item = iter(other).next()
self.__setunit__(item[0], item[1])
def __repr__(self):
self.unit.__repr__()
self.value.__repr__()
return ''
def __str__(self):
return self.__repr__()
def __key__(self, k):
k.__hash__()
k.__eq__(k)
def __setunit__(self, k, v):
self.__key__(k)
self.unit = k
self.value = v
def __setitem__(self, u, v):
self.__setunit__(u, v)
def __getitem__(self, k):
self.__key__(k)
return self.value
def __delitem__(self, k):
self.__key__(k)
def setdefault(self, u, v=None):
self.__setunit__(u, v)
return v
def keys(self):
return [self.unit]
def values(self):
return [self.value]
def items(self):
return [(self.unit, self.value)]
def has_key(self, u):
self.__key__(u)
return True
def __len__(self):
return 1
def clear(self):
pass
def copy(self):
return {self.unit: self.value}
def get(self, u, v=None):
self.__key__(u)
return self.value
return v
def pop(self, u):
self.__key__(u)
return self.value
def popitem(self):
return (self.unit, self.value)
def update(self, d):
self.__setunit__(d.unit, d.value)
def updateiter(self, other):
item = iter(other).next()
self.__setunit__(item[0], item[1])
def __delete__(self, k):
self.__key__(k)
def fromkeys(l, b=None):
return {l.unit: b}
fromkeys = staticmethod(fromkeys) # XXX classmethod
def iterkeys(self):
return __iter(self.unit)
def itervalues(self):
return __iter(self.value)
def iteritems(self):
return __iter((self.unit, self.value))
class pyset(pyiter):
def __inititer__(self, i):
self.__setunit__(iter(i).next())
def __setunit__(self, unit):
self.unit = unit
unit.__hash__()
unit.__eq__(unit)
def issubset(self, b):
return True
def issuperset(self, b):
return True
def isdisjoint(self, b):
return True
def intersection(self, *b):
return self
def difference(self, *b):
return self
def symmetric_difference(self, b):
return self
return b
def __sub__(self, b):
return self
def __and__(self, b):
return self
def __or__(self, b):
return self
def __xor__(self, b):
return self
def __ior__(self, b):
self.__setunit__(iter(b).next())
return self
def __iand__(self, b):
self.__setunit__(iter(b).next())
return self
def __ixor__(self, b):
self.__setunit__(iter(b).next())
return self
def __isub__(self, b):
self.__setunit__(iter(b).next())
return self
def union(self, *b):
return self
return set(b)
def copy(self):
return self
def __hash__(self):
return 1
def __len__(self):
return 1
def __repr__(self):
self.unit.__repr__()
return ''
class frozenset(pyset):
pass
class set(pyset):
def add(self, u):
self.__setunit__(u)
def discard(self, u):
pass
def remove(self, u):
pass
def pop(self):
return self.unit
def clear(self):
pass
def update(self, *b):
self.__setunit__(iter(b).next())
def difference_update(self, *b):
self.__setunit__(iter(b).next())
def symmetric_difference_update(self, b):
self.__setunit__(iter(b).next())
def intersection_update(self, *b):
self.__setunit__(iter(b).next())
class complex:
def __init__(self, real=None, imag=None):
real.__float__()
self.real = self.imag = 1.0
def __add__(self, c):
return self
def __sub__(self, c):
return self
def __mul__(self, c):
return self
def __div__(self, c):
return self
def __floordiv__(self, b):
return self
def __mod__(self, b):
return self
def __divmod__(self, b):
return (self,)
def __pos__(self):
return self
def __neg__(self):
return self
def __abs__(self):
return 1.0
def conjugate(self):
return self
def __pow__(self, b):
return self
def __hash__(self):
return 1
def __with_int__(self):
return self
def __with_float__(self):
return self
def __repr__(self):
return ''
complex(1.0, 1.0)
class object: pass
class BaseException:
def __init__(self, msg=None):
self.msg = msg # XXX needed?
self.message = msg
class Exception(BaseException): pass
class StandardError(Exception): pass
class AssertionError(StandardError): pass
class EOFError(StandardError): pass
class FloatingPointError(StandardError): pass
class IndexError(StandardError): pass
class IOError(StandardError): pass
class KeyError(StandardError): pass
class MemoryError(StandardError): pass
class NameError(StandardError): pass
class NotImplementedError(StandardError): pass
class OSError(StandardError): pass
class OverflowError(StandardError): pass
class RuntimeError(StandardError): pass
class SyntaxError(StandardError): pass
class SystemError(StandardError): pass
class TypeError(StandardError): pass
class ValueError(StandardError): pass
class ZeroDivisionError(StandardError): pass
class StopIteration(Exception): pass
class GeneratorExit(BaseException): pass
class KeyboardInterrupt(BaseException): pass
class SystemExit(BaseException): pass
__exception = OSError('') # XXX remove
__exception = IOError('')
__exception.errno = 0
__exception.filename = ''
__exception.strerror = ''
__exception2 = SystemExit('')
__exception2.code = 1
def str(x=None):
x.__str__()
x.__repr__()
return ''
def int(x=None, base=1):
x.__int__()
return 1
def float(x=None):
x.__float__()
return 1.0
def hex(x):
x.__hex__()
return ''
def oct(x):
x.__oct__()
return ''
def bin(x):
x.__index__()
return ''
def isinstance(a, b):
return True
def range(a, b=1, s=1):
return [1]
def raw_input(msg=''):
return ''
class file(pyiter):
def __init__(self, name, flags=None):
self.unit = ''
self.closed = 0
self.name = ''
self.mode = ''
def read(self, size=0):
return ''
def readline(self, n=-1):
return ''
def readlines(self, sizehint=-1):
return ['']
def xreadlines(self):
return iter(self)
def write(self, s):
pass
def writelines(self, it):
pass
def seek(self, i, w=0):
pass
def tell(self):
return 1
def flush(self):
pass
def close(self):
pass
def fileno(self):
return 1
def __repr__(self):
return ''
def isatty(self):
return False
def truncate(self, size=-1):
pass
def next(self):
return ''
def open(name, flags=None):
return file(name, flags)
def ord(c):
return 1
def chr(i):
return 'c'
def round(x, n=0):
return 1.0
def divmod(a, b):
return a.__divmod__(b)
def bool(x):
x.__nonzero__()
x.__len__()
return True
def repr(x):
return x.__repr__()
def hash(x):
return x.__hash__()
def len(w):
return w.__len__()
def pow(a, b, c=1):
return a.__pow__(b)
def abs(x):
return x.__abs__()
def sorted(it, cmp=0, key=0, reverse=0):
elem = iter(it).next()
cmp(elem, elem)
cmp2(elem, elem)
key(elem)
return [elem]
def reversed(l):
return iter(l)
def enumerate(x, start=0):
return __iter((1, iter(x).next()))
class __xrange:
def __init__(self):
self.unit = 1
def __iter__(self):
return __iter(1)
def __len__(self):
return 1
def xrange(a, b=1, s=1):
return __xrange()
def zip(*args):
return [(iter(args).next(),)]
def __zip2(arg1, arg2):
return [(iter(arg1).next(), iter(arg2).next())]
def max(__kw_key=0, *arg): # XXX 0
cmp(arg, arg)
__kw_key(arg)
return arg
def __max1(arg, __kw_key=0):
elem = iter(arg).next()
cmp(elem, elem)
__kw_key(elem)
return elem
def min(__kw_key=0, *arg): # XXX 0
cmp(arg, arg)
__kw_key(arg)
return arg
def __min1(arg, __kw_key=0):
elem = iter(arg).next()
cmp(elem, elem)
__kw_key(elem)
return elem
def sum(l, b):
return sum(l)
return b
def __sum1(l):
elem = iter(l).next()
elem.__add__(elem)
return elem
def cmp(a, b):
a.__cmp__(b)
a.__eq__(b)
a.__lt__(b)
a.__gt__(b)
return 1
def cmp2(a, b):
cmp(a, b)
def __lt(a, b):
a.__lt__(b)
b.__gt__(a)
def __gt(a, b):
a.__gt__(b)
b.__lt__(a)
def __le(a, b):
a.__le__(b)
b.__ge__(a)
def __ge(a, b):
a.__ge__(b)
b.__le__(a)
def any(a):
return True
def all(a):
return True
class __iter(pyiter):
def __init__(self, a):
self.unit = a
def next(self):
return self.unit
def __iter__(self):
return self
def __len__(self): # xrange and such
return 1
def __getitem__(self, i): # modeling shortcut
return self.unit
def iter(a):
return a.__iter__()
def exit(code=0):
pass
def quit(code=0):
pass
def map(func, *iter1):
return [func(*iter(iter1).next())]
def __map3(func, iter1, iter2):
return [func(iter(iter1).next(), iter(iter2).next())]
def __map4(func, iter1, iter2, iter3): # XXX
return [func(iter(iter1).next(), iter(iter2).next(), iter(iter3).next())]
def filter(func, iter1):
elem = iter(iter1).next()
func(elem)
return [elem]
def __filter(func, iter1):
func(iter(iter1).next())
return iter1
def reduce(func, iter1, init=None):
elem = iter(iter1).next()
# elem = init
elem = func(elem, elem)
return elem
def next(iter1, fillvalue=None):
return iter1.next()
return fillvalue
def id(x):
return 1
def __print(__kw_sep=0, __kw_end=0, __kw_file=0, *value):
value.__str__()
|
Salat-Cx65/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/ctypes/test/test_pep3118.py | 50 | import unittest
from ctypes import *
import re, struct, sys
if sys.byteorder == "little":
THIS_ENDIAN = "<"
OTHER_ENDIAN = ">"
else:
THIS_ENDIAN = ">"
OTHER_ENDIAN = "<"
def normalize(format):
# Remove current endian specifier and white space from a format
# string
if format is None:
return ""
format = format.replace(OTHER_ENDIAN, THIS_ENDIAN)
return re.sub(r"\s", "", format)
class Test(unittest.TestCase):
def test_native_types(self):
for tp, fmt, shape, itemtp in native_types:
ob = tp()
v = memoryview(ob)
try:
self.assertEqual(normalize(v.format), normalize(fmt))
if shape is not None:
self.assertEqual(len(v), shape[0])
else:
self.assertEqual(len(v) * sizeof(itemtp), sizeof(ob))
self.assertEqual(v.itemsize, sizeof(itemtp))
self.assertEqual(v.shape, shape)
# ctypes object always have a non-strided memory block
self.assertEqual(v.strides, None)
# they are always read/write
self.assertFalse(v.readonly)
if v.shape:
n = 1
for dim in v.shape:
n = n * dim
self.assertEqual(n * v.itemsize, len(v.tobytes()))
except:
# so that we can see the failing type
print(tp)
raise
def test_endian_types(self):
for tp, fmt, shape, itemtp in endian_types:
ob = tp()
v = memoryview(ob)
try:
self.assertEqual(v.format, fmt)
if shape is not None:
self.assertEqual(len(v), shape[0])
else:
self.assertEqual(len(v) * sizeof(itemtp), sizeof(ob))
self.assertEqual(v.itemsize, sizeof(itemtp))
self.assertEqual(v.shape, shape)
# ctypes object always have a non-strided memory block
self.assertEqual(v.strides, None)
# they are always read/write
self.assertFalse(v.readonly)
if v.shape:
n = 1
for dim in v.shape:
n = n * dim
self.assertEqual(n, len(v))
except:
# so that we can see the failing type
print(tp)
raise
# define some structure classes
class Point(Structure):
_fields_ = [("x", c_long), ("y", c_long)]
class PackedPoint(Structure):
_pack_ = 2
_fields_ = [("x", c_long), ("y", c_long)]
class Point2(Structure):
pass
Point2._fields_ = [("x", c_long), ("y", c_long)]
class EmptyStruct(Structure):
_fields_ = []
class aUnion(Union):
_fields_ = [("a", c_int)]
class Incomplete(Structure):
pass
class Complete(Structure):
pass
PComplete = POINTER(Complete)
Complete._fields_ = [("a", c_long)]
################################################################
#
# This table contains format strings as they look on little endian
# machines. The test replaces '<' with '>' on big endian machines.
#
native_types = [
# type format shape calc itemsize
## simple types
(c_char, "<c", None, c_char),
(c_byte, "<b", None, c_byte),
(c_ubyte, "<B", None, c_ubyte),
(c_short, "<h", None, c_short),
(c_ushort, "<H", None, c_ushort),
# c_int and c_uint may be aliases to c_long
#(c_int, "<i", None, c_int),
#(c_uint, "<I", None, c_uint),
(c_long, "<l", None, c_long),
(c_ulong, "<L", None, c_ulong),
# c_longlong and c_ulonglong are aliases on 64-bit platforms
#(c_longlong, "<q", None, c_longlong),
#(c_ulonglong, "<Q", None, c_ulonglong),
(c_float, "<f", None, c_float),
(c_double, "<d", None, c_double),
# c_longdouble may be an alias to c_double
(c_bool, "<?", None, c_bool),
(py_object, "<O", None, py_object),
## pointers
(POINTER(c_byte), "&<b", None, POINTER(c_byte)),
(POINTER(POINTER(c_long)), "&&<l", None, POINTER(POINTER(c_long))),
## arrays and pointers
(c_double * 4, "(4)<d", (4,), c_double),
(c_float * 4 * 3 * 2, "(2,3,4)<f", (2,3,4), c_float),
(POINTER(c_short) * 2, "(2)&<h", (2,), POINTER(c_short)),
(POINTER(c_short) * 2 * 3, "(3,2)&<h", (3,2,), POINTER(c_short)),
(POINTER(c_short * 2), "&(2)<h", None, POINTER(c_short)),
## structures and unions
(Point, "T{<l:x:<l:y:}", None, Point),
# packed structures do not implement the pep
(PackedPoint, "B", None, PackedPoint),
(Point2, "T{<l:x:<l:y:}", None, Point2),
(EmptyStruct, "T{}", None, EmptyStruct),
# the pep does't support unions
(aUnion, "B", None, aUnion),
## pointer to incomplete structure
(Incomplete, "B", None, Incomplete),
(POINTER(Incomplete), "&B", None, POINTER(Incomplete)),
# 'Complete' is a structure that starts incomplete, but is completed after the
# pointer type to it has been created.
(Complete, "T{<l:a:}", None, Complete),
# Unfortunately the pointer format string is not fixed...
(POINTER(Complete), "&B", None, POINTER(Complete)),
## other
# function signatures are not implemented
(CFUNCTYPE(None), "X{}", None, CFUNCTYPE(None)),
]
class BEPoint(BigEndianStructure):
_fields_ = [("x", c_long), ("y", c_long)]
class LEPoint(LittleEndianStructure):
_fields_ = [("x", c_long), ("y", c_long)]
################################################################
#
# This table contains format strings as they really look, on both big
# and little endian machines.
#
endian_types = [
(BEPoint, "T{>l:x:>l:y:}", None, BEPoint),
(LEPoint, "T{<l:x:<l:y:}", None, LEPoint),
(POINTER(BEPoint), "&T{>l:x:>l:y:}", None, POINTER(BEPoint)),
(POINTER(LEPoint), "&T{<l:x:<l:y:}", None, POINTER(LEPoint)),
]
if __name__ == "__main__":
unittest.main()
|
andgoldschmidt/iEBE | refs/heads/trento-dev | EBE-Node/EbeCollector/uhg_tester.py | 8 |
from uhg import *
import uhg
use("/home/qiu/Downloads/Pb_0_5_Glb_collected.db")
e("dN/dydpT(0.5)(pion_p_hydro)")
e(" v_2[2]( linspace(0,2,30) )(pion_p_hydro) ")
sqrt(mean(abs(uhg._storedEbeDBReader.get_diff_V_n(particleName="pion_p_hydro", order=2, pTs=linspace(0,2,30))**2,0)))
|
Quikling/gpdb | refs/heads/master | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/vacuum/__init__.py | 12133432 | |
IndonesiaX/edx-platform | refs/heads/master | lms/djangoapps/certificates/migrations/__init__.py | 12133432 | |
mrquim/repository.mrquim | refs/heads/master | repo/script.toolbox/resources/__init__.py | 12133432 | |
redhat-openstack/sahara | refs/heads/master-patches | sahara/tests/unit/utils/openstack/__init__.py | 12133432 | |
sivel/ansible-modules-extras | refs/heads/devel | network/__init__.py | 12133432 | |
destos/free-audio-books | refs/heads/master | free_audio_books/contrib/__init__.py | 12133432 | |
MXWXZ/DuiMini | refs/heads/master | thirdpart/googletest/googletest/test/gtest_help_test.py | 101 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing and Mocking Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
|
taschik/ramcloud-load-manager | refs/heads/master | scripts/forking_test_runner.py | 20 | #!/usr/bin/env python
# Copyright (c) 2010 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Runs each unit test in a separate process.
This is useful for finding which tests cause crashes or enter infinite loops.
Pass any arguments to output timing statistics.
"""
import os
import re
import signal
import subprocess
import sys
import time
FAIL_AFTER_SECONDS = 2.0
ignore = \
"""terminate called after throwing an instance of 'std::invalid_argument'
what(): No test named <%s> found in test <All Tests>."""
cppunit_fail_header = """!!!FAILURES!!!
Test Results:
Run: 1 Failures: 1 Errors: 0
1) test: RAMCloud::%s::%s (F) """
signals = dict([(getattr(signal, name), name)
for name in dir(signal) if name.startswith('SIG')])
p = subprocess.Popen(['git', 'symbolic-ref', '-q', 'HEAD'],
stdout=subprocess.PIPE)
p.wait()
git_branch = re.search('^refs/heads/(.*)$', p.stdout.read())
if git_branch is None:
obj_dir = 'obj'
else:
git_branch = git_branch.group(1)
obj_dir = 'obj.%s' % git_branch
tests = []
for name in os.listdir('src/'):
if name.endswith('Test.in.cc') or name.endswith('Test.cc'):
suite = None
for line in open('src/%s' % name):
m = re.match('\s*CPPUNIT_TEST_SUITE\((.*)\);', line)
if m:
suite = m.group(1)
continue
m = re.match('\s*CPPUNIT_TEST\((.*)\);', line)
if m:
test = m.group(1)
tests.append((suite, test))
continue
print 'Running %d tests...' % len(tests)
ok = 0
failed = 0
suite_times = {}
test_times = {}
for (suite, test) in tests:
start = time.time()
process = subprocess.Popen(['./%s/test' % obj_dir,
'-t', 'RAMCloud::%s::%s' % (suite, test)],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
rc = None
while True:
rc = process.poll()
now = time.time()
if rc is not None:
break
if now - start > FAIL_AFTER_SECONDS:
print "Killing %s::%s" % (suite, test)
process.kill()
break
if rc != 0:
output = process.stdout.read().strip()
if output == (ignore % test):
print "Ignored: RAMCloud::%s::%s" % (suite, test)
continue
if rc is None:
why = ' by taking too long (over %ss)' % FAIL_AFTER_SECONDS
elif rc == 1:
why = '' # usual CPPUNIT failure
elif rc > 1:
why = ' with return value %d' % rc
elif rc < 0:
why = ' from signal %s' % signals[-rc]
cfh = cppunit_fail_header % (suite, test)
if output.startswith(cfh):
output = output[len(cfh):]
print '%s::%s failed%s%s\n' % (suite, test, why,
':\n%s' % output if output else '')
failed += 1
else:
if suite in suite_times:
suite_times[suite] += now - start
else:
suite_times[suite] = now - start
suite_test = '%s::%s' % (suite, test)
if suite_test in test_times:
test_times[suite_test] += now - start
else:
test_times[suite_test] = now - start
ok += 1
print '%d tests passed, %d failed' % (ok, failed)
def print_timing(title, times, num=None):
print title
print '=' * len(title)
l = times.items()
l.sort(key=lambda x: x[1], reverse=True)
if num is not None:
l = l[:num]
max_name_length = max([len(name) for name, t in l])
for name, t in l:
print '%s%s' % (name.ljust(max_name_length),
('%0.02fms' % (t * 1000)).rjust(8))
if len(sys.argv) > 1:
print
print 'Total time: %0.02fms' % (sum(suite_times.values()) * 1000)
print
print_timing('Suite Timing', suite_times)
print
print_timing('Test Timing (top 20)', test_times, num=20)
|
aykol/pymatgen | refs/heads/master | pymatgen/io/__init__.py | 21 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This package provides the packages and modules to perform IO from various
input file formats and pymatgen objects.
"""
|
aoyono/sicpy | refs/heads/master | Chapter2/exercises/exercise2_76.py | 1 | # -*- coding: utf-8 -*-
"""
https://mitpress.mit.edu/sicp/full-text/book/book-Z-H-16.html#%_thm_2.76
"""
def run_the_magic():
print("""
In case we add new types to a system with generic operations:
1. in explicit design style, we would need to create a new predicate
for each type, a new constructor and modify the generic operators
2. in data-directed design style, we would need to modify an install
procedure
3. in message-passing design style, we would need to modify the
dispatch procedure internal to the data constructor
In case we add new operators:
1. in explicit: modify almost everything
2. in data-directed: modify the install package
3. in message-passing: a new constructor with a dispatch function
Data-directed is better when we must often add types.
Message-passing is better when we must often add operations.
""")
if __name__ == "__main__":
run_the_magic()
|
cloudbrain/cloudbrain | refs/heads/master | setup.py | 2 | from setuptools import setup, find_packages
try:
from pip.req import parse_requirements
except ImportError:
from pip._internal.req import parse_requirements
try:
from pip.download import PipSession
except ImportError:
from pip._internal.download import PipSession
import os
# Get __version__ and set other constants.
# Source: https://stackoverflow.com/a/16084844
with open(os.path.join('src', 'cloudbrain', 'version.py'), 'r') as f:
exec (f.read())
URL = 'https://github.com/cloudbrain/cloudbrain'
DOWNLOAD_URL = '%s/archive/%s.tar.gz' % (URL, __version__)
DESCRIPTION = open('README.rst').read()
# Helper function for requirements parsing by requirement type
def parse_reqs(req_type):
reqs_file = os.path.join('requirements', '%s.txt' % req_type)
install_reqs = parse_requirements(reqs_file, session=PipSession())
reqs = [str(ir.req) for ir in install_reqs]
return reqs
# Get requirements for all types
REQUIREMENT_TYPES = ['core', 'analytics', 'muse']
reqs = {req_type: parse_reqs(req_type) for req_type in REQUIREMENT_TYPES}
setup(name='cloudbrain',
version=__version__,
description='Platform for wearable data analysis.',
author='Marion Le Borgne',
author_email='[email protected]',
url=URL,
download_url=DOWNLOAD_URL,
package_dir={'': 'src'},
packages=find_packages('src'),
install_requires=reqs['core'],
long_description=DESCRIPTION,
test_suite='nose.collector',
include_package_data=True,
package_data={
"cloudbrain.core": ["*.json"],
"cloudbrain.schema": ["*.json"]
},
extras_require={
'muse:python_version>="3"': reqs['muse'],
'analytics': reqs['analytics']
},
entry_points={
'console_scripts':
['cloudbrain=cloudbrain.run:main']
}
)
|
stevenvolckaert/plugin.video.vrt.nu | refs/heads/master | resources/lib/__init__.py | 12133432 | |
MechCoder/sympy | refs/heads/master | sympy/concrete/summations.py | 4 | from __future__ import print_function, division
from sympy.concrete.expr_with_limits import AddWithLimits
from sympy.concrete.expr_with_intlimits import ExprWithIntLimits
from sympy.core.function import Derivative
from sympy.core.relational import Eq
from sympy.core.singleton import S
from sympy.core.symbol import Dummy, Wild
from sympy.core.add import Add
from sympy.calculus.singularities import is_decreasing
from sympy.concrete.gosper import gosper_sum
from sympy.integrals.integrals import integrate
from sympy.functions.elementary.piecewise import Piecewise
from sympy.polys import apart, PolynomialError
from sympy.solvers import solve
from sympy.series.limits import limit
from sympy.series.order import O
from sympy.core.compatibility import range
from sympy.tensor.indexed import Idx
class Sum(AddWithLimits, ExprWithIntLimits):
r"""Represents unevaluated summation.
``Sum`` represents a finite or infinite series, with the first argument
being the general form of terms in the series, and the second argument
being ``(dummy_variable, start, end)``, with ``dummy_variable`` taking
all integer values from ``start`` through ``end``. In accordance with
long-standing mathematical convention, the end term is included in the
summation.
Finite sums
===========
For finite sums (and sums with symbolic limits assumed to be finite) we
follow the summation convention described by Karr [1], especially
definition 3 of section 1.4. The sum:
.. math::
\sum_{m \leq i < n} f(i)
has *the obvious meaning* for `m < n`, namely:
.. math::
\sum_{m \leq i < n} f(i) = f(m) + f(m+1) + \ldots + f(n-2) + f(n-1)
with the upper limit value `f(n)` excluded. The sum over an empty set is
zero if and only if `m = n`:
.. math::
\sum_{m \leq i < n} f(i) = 0 \quad \mathrm{for} \quad m = n
Finally, for all other sums over empty sets we assume the following
definition:
.. math::
\sum_{m \leq i < n} f(i) = - \sum_{n \leq i < m} f(i) \quad \mathrm{for} \quad m > n
It is important to note that Karr defines all sums with the upper
limit being exclusive. This is in contrast to the usual mathematical notation,
but does not affect the summation convention. Indeed we have:
.. math::
\sum_{m \leq i < n} f(i) = \sum_{i = m}^{n - 1} f(i)
where the difference in notation is intentional to emphasize the meaning,
with limits typeset on the top being inclusive.
Examples
========
>>> from sympy.abc import i, k, m, n, x
>>> from sympy import Sum, factorial, oo, IndexedBase, Function
>>> Sum(k, (k, 1, m))
Sum(k, (k, 1, m))
>>> Sum(k, (k, 1, m)).doit()
m**2/2 + m/2
>>> Sum(k**2, (k, 1, m))
Sum(k**2, (k, 1, m))
>>> Sum(k**2, (k, 1, m)).doit()
m**3/3 + m**2/2 + m/6
>>> Sum(x**k, (k, 0, oo))
Sum(x**k, (k, 0, oo))
>>> Sum(x**k, (k, 0, oo)).doit()
Piecewise((1/(-x + 1), Abs(x) < 1), (Sum(x**k, (k, 0, oo)), True))
>>> Sum(x**k/factorial(k), (k, 0, oo)).doit()
exp(x)
Here are examples to do summation with symbolic indices. You
can use either Function of IndexedBase classes:
>>> f = Function('f')
>>> Sum(f(n), (n, 0, 3)).doit()
f(0) + f(1) + f(2) + f(3)
>>> Sum(f(n), (n, 0, oo)).doit()
Sum(f(n), (n, 0, oo))
>>> f = IndexedBase('f')
>>> Sum(f[n]**2, (n, 0, 3)).doit()
f[0]**2 + f[1]**2 + f[2]**2 + f[3]**2
An example showing that the symbolic result of a summation is still
valid for seemingly nonsensical values of the limits. Then the Karr
convention allows us to give a perfectly valid interpretation to
those sums by interchanging the limits according to the above rules:
>>> S = Sum(i, (i, 1, n)).doit()
>>> S
n**2/2 + n/2
>>> S.subs(n, -4)
6
>>> Sum(i, (i, 1, -4)).doit()
6
>>> Sum(-i, (i, -3, 0)).doit()
6
An explicit example of the Karr summation convention:
>>> S1 = Sum(i**2, (i, m, m+n-1)).doit()
>>> S1
m**2*n + m*n**2 - m*n + n**3/3 - n**2/2 + n/6
>>> S2 = Sum(i**2, (i, m+n, m-1)).doit()
>>> S2
-m**2*n - m*n**2 + m*n - n**3/3 + n**2/2 - n/6
>>> S1 + S2
0
>>> S3 = Sum(i, (i, m, m-1)).doit()
>>> S3
0
See Also
========
summation
Product, product
References
==========
.. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
Volume 28 Issue 2, April 1981, Pages 305-350
http://dl.acm.org/citation.cfm?doid=322248.322255
.. [2] http://en.wikipedia.org/wiki/Summation#Capital-sigma_notation
.. [3] http://en.wikipedia.org/wiki/Empty_sum
"""
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
obj = AddWithLimits.__new__(cls, function, *symbols, **assumptions)
if not hasattr(obj, 'limits'):
return obj
if any(len(l) != 3 or None in l for l in obj.limits):
raise ValueError('Sum requires values for lower and upper bounds.')
return obj
def _eval_is_zero(self):
# a Sum is only zero if its function is zero or if all terms
# cancel out. This only answers whether the summand is zero; if
# not then None is returned since we don't analyze whether all
# terms cancel out.
if self.function.is_zero:
return True
def doit(self, **hints):
if hints.get('deep', True):
f = self.function.doit(**hints)
else:
f = self.function
if self.function.is_Matrix:
return self.expand().doit()
for n, limit in enumerate(self.limits):
i, a, b = limit
dif = b - a
if dif.is_integer and (dif < 0) == True:
a, b = b + 1, a - 1
f = -f
if isinstance(i, Idx):
i = i.label
newf = eval_sum(f, (i, a, b))
if newf is None:
if f == self.function:
return self
else:
return self.func(f, *self.limits[n:])
f = newf
if hints.get('deep', True):
# eval_sum could return partially unevaluated
# result with Piecewise. In this case we won't
# doit() recursively.
if not isinstance(f, Piecewise):
return f.doit(**hints)
return f
def _eval_derivative(self, x):
"""
Differentiate wrt x as long as x is not in the free symbols of any of
the upper or lower limits.
Sum(a*b*x, (x, 1, a)) can be differentiated wrt x or b but not `a`
since the value of the sum is discontinuous in `a`. In a case
involving a limit variable, the unevaluated derivative is returned.
"""
# diff already confirmed that x is in the free symbols of self, but we
# don't want to differentiate wrt any free symbol in the upper or lower
# limits
# XXX remove this test for free_symbols when the default _eval_derivative is in
if x not in self.free_symbols:
return S.Zero
# get limits and the function
f, limits = self.function, list(self.limits)
limit = limits.pop(-1)
if limits: # f is the argument to a Sum
f = self.func(f, *limits)
if len(limit) == 3:
_, a, b = limit
if x in a.free_symbols or x in b.free_symbols:
return None
df = Derivative(f, x, evaluate=True)
rv = self.func(df, limit)
if limit[0] not in df.free_symbols:
rv = rv.doit()
return rv
else:
return NotImplementedError('Lower and upper bound expected.')
def _eval_difference_delta(self, n, step):
k, _, upper = self.args[-1]
new_upper = upper.subs(n, n + step)
if len(self.args) == 2:
f = self.args[0]
else:
f = self.func(*self.args[:-1])
return Sum(f, (k, upper + 1, new_upper)).doit()
def _eval_simplify(self, ratio, measure):
from sympy.simplify.simplify import sum_simplify
return sum_simplify(self)
def _eval_summation(self, f, x):
return None
def is_convergent(self):
"""
Convergence tests are used for checking the convergence of
a series. There are various tests employed to check the convergence,
returns true if convergent and false if divergent and NotImplementedError
if can not be checked. Like divergence test, root test, integral test,
alternating series test, comparison tests, Dirichlet tests.
References
==========
.. [1] https://en.wikipedia.org/wiki/Convergence_tests
Examples
========
>>> from sympy import Interval, factorial, S, Sum, Symbol, oo
>>> n = Symbol('n', integer=True)
>>> Sum(n/(n - 1), (n, 4, 7)).is_convergent()
True
>>> Sum(n/(2*n + 1), (n, 1, oo)).is_convergent()
False
>>> Sum(factorial(n)/5**n, (n, 1, oo)).is_convergent()
False
>>> Sum(1/n**(S(6)/5), (n, 1, oo)).is_convergent()
True
See Also
========
Sum.is_absolute_convergent()
"""
from sympy import Interval, Integral, Limit, log, symbols, Ge, Gt, simplify
p, q = symbols('p q', cls=Wild)
sym = self.limits[0][0]
lower_limit = self.limits[0][1]
upper_limit = self.limits[0][2]
sequence_term = self.function
if len(sequence_term.free_symbols) > 1:
raise NotImplementedError("convergence checking for more that one symbol \
containing series is not handled")
if lower_limit.is_finite and upper_limit.is_finite:
return S.true
# transform sym -> -sym and swap the upper_limit = S.Infinity and lower_limit = - upper_limit
if lower_limit is S.NegativeInfinity:
if upper_limit is S.Infinity:
return Sum(sequence_term, (sym, 0, S.Infinity)).is_convergent() and \
Sum(sequence_term, (sym, S.NegativeInfinity, 0)).is_convergent()
sequence_term = simplify(sequence_term.xreplace({sym: -sym}))
lower_limit = -upper_limit
upper_limit = S.Infinity
interval = Interval(lower_limit, upper_limit)
# Piecewise function handle
if sequence_term.is_Piecewise:
for func_cond in sequence_term.args:
if func_cond[1].func is Ge or func_cond[1].func is Gt or func_cond[1] == True:
return Sum(func_cond[0], (sym, lower_limit, upper_limit)).is_convergent()
return S.true
### -------- Divergence test ----------- ###
try:
lim_val = limit(abs(sequence_term), sym, upper_limit)
if lim_val.is_number and lim_val != S.Zero:
return S.false
except NotImplementedError:
pass
order = O(sequence_term, (sym, S.Infinity))
### --------- p-series test (1/n**p) ---------- ###
p1_series_test = order.expr.match(sym**p)
if p1_series_test is not None:
if p1_series_test[p] < -1:
return S.true
if p1_series_test[p] > -1:
return S.false
p2_series_test = order.expr.match((1/sym)**p)
if p2_series_test is not None:
if p2_series_test[p] > 1:
return S.true
if p2_series_test[p] < 1:
return S.false
### ----------- root test ---------------- ###
lim = Limit(abs(sequence_term)**(1/sym), sym, S.Infinity)
lim_evaluated = lim.doit()
if lim_evaluated.is_number:
if lim_evaluated < 1:
return S.true
if lim_evaluated > 1:
return S.false
### ------------- alternating series test ----------- ###
d = symbols('d', cls=Dummy)
dict_val = sequence_term.match((-1)**(sym + p)*q)
if not dict_val[p].has(sym) and is_decreasing(dict_val[q], interval):
return S.true
### ------------- comparison test ------------- ###
# (1/log(n)**p) comparison
log_test = order.expr.match(1/(log(sym)**p))
if log_test is not None:
return S.false
# (1/(n*log(n)**p)) comparison
log_n_test = order.expr.match(1/(sym*(log(sym))**p))
if log_n_test is not None:
if log_n_test[p] > 1:
return S.true
return S.false
# (1/(n*log(n)*log(log(n))*p)) comparison
log_log_n_test = order.expr.match(1/(sym*(log(sym)*log(log(sym))**p)))
if log_log_n_test is not None:
if log_log_n_test[p] > 1:
return S.true
return S.false
# (1/(n**p*log(n))) comparison
n_log_test = order.expr.match(1/(sym**p*log(sym)))
if n_log_test is not None:
if n_log_test[p] > 1:
return S.true
return S.false
### ------------- integral test -------------- ###
if is_decreasing(sequence_term, interval):
integral_val = Integral(sequence_term, (sym, lower_limit, upper_limit))
try:
integral_val_evaluated = integral_val.doit()
if integral_val_evaluated.is_number:
return S(integral_val_evaluated.is_finite)
except NotImplementedError:
pass
### -------------- Dirichlet tests -------------- ###
if order.expr.is_Mul:
a_n, b_n = order.expr.args[0], order.expr.args[1]
m = Dummy('m', integer=True)
def _dirichlet_test(g_n):
try:
ing_val = limit(Sum(g_n, (sym, interval.inf, m)).doit(), m, S.Infinity)
if ing_val.is_finite:
return S.true
except NotImplementedError:
pass
if is_decreasing(a_n, interval):
dirich1 = _dirichlet_test(b_n)
if dirich1 is not None:
return dirich1
if is_decreasing(b_n, interval):
dirich2 = _dirichlet_test(a_n)
if dirich2 is not None:
return dirich2
raise NotImplementedError("The algorithm to find the convergence of %s "
"is not yet implemented" % (sequence_term))
def is_absolute_convergent(self):
"""
Checks for the absolute convergence of an infinite series.
Same as checking convergence of absolute value of sequence_term of
an infinite series.
References
==========
.. [1] https://en.wikipedia.org/wiki/Absolute_convergence
Examples
========
>>> from sympy import Sum, Symbol, sin, oo
>>> n = Symbol('n', integer=True)
>>> Sum((-1)**n, (n, 1, oo)).is_absolute_convergent()
False
>>> Sum((-1)**n/n**2, (n, 1, oo)).is_absolute_convergent()
True
See Also
========
Sum.is_convergent()
"""
return Sum(abs(self.function), self.limits).is_convergent()
def euler_maclaurin(self, m=0, n=0, eps=0, eval_integral=True):
"""
Return an Euler-Maclaurin approximation of self, where m is the
number of leading terms to sum directly and n is the number of
terms in the tail.
With m = n = 0, this is simply the corresponding integral
plus a first-order endpoint correction.
Returns (s, e) where s is the Euler-Maclaurin approximation
and e is the estimated error (taken to be the magnitude of
the first omitted term in the tail):
>>> from sympy.abc import k, a, b
>>> from sympy import Sum
>>> Sum(1/k, (k, 2, 5)).doit().evalf()
1.28333333333333
>>> s, e = Sum(1/k, (k, 2, 5)).euler_maclaurin()
>>> s
-log(2) + 7/20 + log(5)
>>> from sympy import sstr
>>> print(sstr((s.evalf(), e.evalf()), full_prec=True))
(1.26629073187415, 0.0175000000000000)
The endpoints may be symbolic:
>>> s, e = Sum(1/k, (k, a, b)).euler_maclaurin()
>>> s
-log(a) + log(b) + 1/(2*b) + 1/(2*a)
>>> e
Abs(1/(12*b**2) - 1/(12*a**2))
If the function is a polynomial of degree at most 2n+1, the
Euler-Maclaurin formula becomes exact (and e = 0 is returned):
>>> Sum(k, (k, 2, b)).euler_maclaurin()
(b**2/2 + b/2 - 1, 0)
>>> Sum(k, (k, 2, b)).doit()
b**2/2 + b/2 - 1
With a nonzero eps specified, the summation is ended
as soon as the remainder term is less than the epsilon.
"""
from sympy.functions import bernoulli, factorial
from sympy.integrals import Integral
m = int(m)
n = int(n)
f = self.function
if len(self.limits) != 1:
raise ValueError("More than 1 limit")
i, a, b = self.limits[0]
if (a > b) == True:
if a - b == 1:
return S.Zero,S.Zero
a, b = b + 1, a - 1
f = -f
s = S.Zero
if m:
if b.is_Integer and a.is_Integer:
m = min(m, b - a + 1)
if not eps or f.is_polynomial(i):
for k in range(m):
s += f.subs(i, a + k)
else:
term = f.subs(i, a)
if term:
test = abs(term.evalf(3)) < eps
if test == True:
return s, abs(term)
elif not (test == False):
# a symbolic Relational class, can't go further
return term, S.Zero
s += term
for k in range(1, m):
term = f.subs(i, a + k)
if abs(term.evalf(3)) < eps and term != 0:
return s, abs(term)
s += term
if b - a + 1 == m:
return s, S.Zero
a += m
x = Dummy('x')
I = Integral(f.subs(i, x), (x, a, b))
if eval_integral:
I = I.doit()
s += I
def fpoint(expr):
if b is S.Infinity:
return expr.subs(i, a), 0
return expr.subs(i, a), expr.subs(i, b)
fa, fb = fpoint(f)
iterm = (fa + fb)/2
g = f.diff(i)
for k in range(1, n + 2):
ga, gb = fpoint(g)
term = bernoulli(2*k)/factorial(2*k)*(gb - ga)
if (eps and term and abs(term.evalf(3)) < eps) or (k > n):
break
s += term
g = g.diff(i, 2, simplify=False)
return s + iterm, abs(term)
def reverse_order(self, *indices):
"""
Reverse the order of a limit in a Sum.
Usage
=====
``reverse_order(self, *indices)`` reverses some limits in the expression
``self`` which can be either a ``Sum`` or a ``Product``. The selectors in
the argument ``indices`` specify some indices whose limits get reversed.
These selectors are either variable names or numerical indices counted
starting from the inner-most limit tuple.
Examples
========
>>> from sympy import Sum
>>> from sympy.abc import x, y, a, b, c, d
>>> Sum(x, (x, 0, 3)).reverse_order(x)
Sum(-x, (x, 4, -1))
>>> Sum(x*y, (x, 1, 5), (y, 0, 6)).reverse_order(x, y)
Sum(x*y, (x, 6, 0), (y, 7, -1))
>>> Sum(x, (x, a, b)).reverse_order(x)
Sum(-x, (x, b + 1, a - 1))
>>> Sum(x, (x, a, b)).reverse_order(0)
Sum(-x, (x, b + 1, a - 1))
While one should prefer variable names when specifying which limits
to reverse, the index counting notation comes in handy in case there
are several symbols with the same name.
>>> S = Sum(x**2, (x, a, b), (x, c, d))
>>> S
Sum(x**2, (x, a, b), (x, c, d))
>>> S0 = S.reverse_order(0)
>>> S0
Sum(-x**2, (x, b + 1, a - 1), (x, c, d))
>>> S1 = S0.reverse_order(1)
>>> S1
Sum(x**2, (x, b + 1, a - 1), (x, d + 1, c - 1))
Of course we can mix both notations:
>>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(x, 1)
Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
>>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(y, x)
Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
See Also
========
index, reorder_limit, reorder
References
==========
.. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
Volume 28 Issue 2, April 1981, Pages 305-350
http://dl.acm.org/citation.cfm?doid=322248.322255
"""
l_indices = list(indices)
for i, indx in enumerate(l_indices):
if not isinstance(indx, int):
l_indices[i] = self.index(indx)
e = 1
limits = []
for i, limit in enumerate(self.limits):
l = limit
if i in l_indices:
e = -e
l = (limit[0], limit[2] + 1, limit[1] - 1)
limits.append(l)
return Sum(e * self.function, *limits)
def summation(f, *symbols, **kwargs):
r"""
Compute the summation of f with respect to symbols.
The notation for symbols is similar to the notation used in Integral.
summation(f, (i, a, b)) computes the sum of f with respect to i from a to b,
i.e.,
::
b
____
\ `
summation(f, (i, a, b)) = ) f
/___,
i = a
If it cannot compute the sum, it returns an unevaluated Sum object.
Repeated sums can be computed by introducing additional symbols tuples::
>>> from sympy import summation, oo, symbols, log
>>> i, n, m = symbols('i n m', integer=True)
>>> summation(2*i - 1, (i, 1, n))
n**2
>>> summation(1/2**i, (i, 0, oo))
2
>>> summation(1/log(n)**n, (n, 2, oo))
Sum(log(n)**(-n), (n, 2, oo))
>>> summation(i, (i, 0, n), (n, 0, m))
m**3/6 + m**2/2 + m/3
>>> from sympy.abc import x
>>> from sympy import factorial
>>> summation(x**n/factorial(n), (n, 0, oo))
exp(x)
See Also
========
Sum
Product, product
"""
return Sum(f, *symbols, **kwargs).doit(deep=False)
def telescopic_direct(L, R, n, limits):
"""Returns the direct summation of the terms of a telescopic sum
L is the term with lower index
R is the term with higher index
n difference between the indexes of L and R
For example:
>>> from sympy.concrete.summations import telescopic_direct
>>> from sympy.abc import k, a, b
>>> telescopic_direct(1/k, -1/(k+2), 2, (k, a, b))
-1/(b + 2) - 1/(b + 1) + 1/(a + 1) + 1/a
"""
(i, a, b) = limits
s = 0
for m in range(n):
s += L.subs(i, a + m) + R.subs(i, b - m)
return s
def telescopic(L, R, limits):
'''Tries to perform the summation using the telescopic property
return None if not possible
'''
(i, a, b) = limits
if L.is_Add or R.is_Add:
return None
# We want to solve(L.subs(i, i + m) + R, m)
# First we try a simple match since this does things that
# solve doesn't do, e.g. solve(f(k+m)-f(k), m) fails
k = Wild("k")
sol = (-R).match(L.subs(i, i + k))
s = None
if sol and k in sol:
s = sol[k]
if not (s.is_Integer and L.subs(i, i + s) == -R):
# sometimes match fail(f(x+2).match(-f(x+k))->{k: -2 - 2x}))
s = None
# But there are things that match doesn't do that solve
# can do, e.g. determine that 1/(x + m) = 1/(1 - x) when m = 1
if s is None:
m = Dummy('m')
try:
sol = solve(L.subs(i, i + m) + R, m) or []
except NotImplementedError:
return None
sol = [si for si in sol if si.is_Integer and
(L.subs(i, i + si) + R).expand().is_zero]
if len(sol) != 1:
return None
s = sol[0]
if s < 0:
return telescopic_direct(R, L, abs(s), (i, a, b))
elif s > 0:
return telescopic_direct(L, R, s, (i, a, b))
def eval_sum(f, limits):
from sympy.concrete.delta import deltasummation, _has_simple_delta
from sympy.functions import KroneckerDelta
(i, a, b) = limits
if f is S.Zero:
return S.Zero
if i not in f.free_symbols:
return f*(b - a + 1)
if a == b:
return f.subs(i, a)
if isinstance(f, Piecewise):
if not any(i in arg.args[1].free_symbols for arg in f.args):
# Piecewise conditions do not depend on the dummy summation variable,
# therefore we can fold: Sum(Piecewise((e, c), ...), limits)
# --> Piecewise((Sum(e, limits), c), ...)
newargs = []
for arg in f.args:
newexpr = eval_sum(arg.expr, limits)
if newexpr is None:
return None
newargs.append((newexpr, arg.cond))
return f.func(*newargs)
if f.has(KroneckerDelta) and _has_simple_delta(f, limits[0]):
return deltasummation(f, limits)
dif = b - a
definite = dif.is_Integer
# Doing it directly may be faster if there are very few terms.
if definite and (dif < 100):
return eval_sum_direct(f, (i, a, b))
if isinstance(f, Piecewise):
return None
# Try to do it symbolically. Even when the number of terms is known,
# this can save time when b-a is big.
# We should try to transform to partial fractions
value = eval_sum_symbolic(f.expand(), (i, a, b))
if value is not None:
return value
# Do it directly
if definite:
return eval_sum_direct(f, (i, a, b))
def eval_sum_direct(expr, limits):
from sympy.core import Add
(i, a, b) = limits
dif = b - a
return Add(*[expr.subs(i, a + j) for j in range(dif + 1)])
def eval_sum_symbolic(f, limits):
from sympy.functions import harmonic, bernoulli
f_orig = f
(i, a, b) = limits
if not f.has(i):
return f*(b - a + 1)
# Linearity
if f.is_Mul:
L, R = f.as_two_terms()
if not L.has(i):
sR = eval_sum_symbolic(R, (i, a, b))
if sR:
return L*sR
if not R.has(i):
sL = eval_sum_symbolic(L, (i, a, b))
if sL:
return R*sL
try:
f = apart(f, i) # see if it becomes an Add
except PolynomialError:
pass
if f.is_Add:
L, R = f.as_two_terms()
lrsum = telescopic(L, R, (i, a, b))
if lrsum:
return lrsum
lsum = eval_sum_symbolic(L, (i, a, b))
rsum = eval_sum_symbolic(R, (i, a, b))
if None not in (lsum, rsum):
r = lsum + rsum
if not r is S.NaN:
return r
# Polynomial terms with Faulhaber's formula
n = Wild('n')
result = f.match(i**n)
if result is not None:
n = result[n]
if n.is_Integer:
if n >= 0:
if (b is S.Infinity and not a is S.NegativeInfinity) or \
(a is S.NegativeInfinity and not b is S.Infinity):
return S.Infinity
return ((bernoulli(n + 1, b + 1) - bernoulli(n + 1, a))/(n + 1)).expand()
elif a.is_Integer and a >= 1:
if n == -1:
return harmonic(b) - harmonic(a - 1)
else:
return harmonic(b, abs(n)) - harmonic(a - 1, abs(n))
if not (a.has(S.Infinity, S.NegativeInfinity) or
b.has(S.Infinity, S.NegativeInfinity)):
# Geometric terms
c1 = Wild('c1', exclude=[i])
c2 = Wild('c2', exclude=[i])
c3 = Wild('c3', exclude=[i])
e = f.match(c1**(c2*i + c3))
if e is not None:
p = (c1**c3).subs(e)
q = (c1**c2).subs(e)
r = p*(q**a - q**(b + 1))/(1 - q)
l = p*(b - a + 1)
return Piecewise((l, Eq(q, S.One)), (r, True))
r = gosper_sum(f, (i, a, b))
if not r in (None, S.NaN):
return r
return eval_sum_hyper(f_orig, (i, a, b))
def _eval_sum_hyper(f, i, a):
""" Returns (res, cond). Sums from a to oo. """
from sympy.functions import hyper
from sympy.simplify import hyperexpand, hypersimp, fraction, simplify
from sympy.polys.polytools import Poly, factor
if a != 0:
return _eval_sum_hyper(f.subs(i, i + a), i, 0)
if f.subs(i, 0) == 0:
if simplify(f.subs(i, Dummy('i', integer=True, positive=True))) == 0:
return S(0), True
return _eval_sum_hyper(f.subs(i, i + 1), i, 0)
hs = hypersimp(f, i)
if hs is None:
return None
numer, denom = fraction(factor(hs))
top, topl = numer.as_coeff_mul(i)
bot, botl = denom.as_coeff_mul(i)
ab = [top, bot]
factors = [topl, botl]
params = [[], []]
for k in range(2):
for fac in factors[k]:
mul = 1
if fac.is_Pow:
mul = fac.exp
fac = fac.base
if not mul.is_Integer:
return None
p = Poly(fac, i)
if p.degree() != 1:
return None
m, n = p.all_coeffs()
ab[k] *= m**mul
params[k] += [n/m]*mul
# Add "1" to numerator parameters, to account for implicit n! in
# hypergeometric series.
ap = params[0] + [1]
bq = params[1]
x = ab[0]/ab[1]
h = hyper(ap, bq, x)
return f.subs(i, 0)*hyperexpand(h), h.convergence_statement
def eval_sum_hyper(f, i_a_b):
from sympy.logic.boolalg import And
i, a, b = i_a_b
if (b - a).is_Integer:
# We are never going to do better than doing the sum in the obvious way
return None
old_sum = Sum(f, (i, a, b))
if b != S.Infinity:
if a == S.NegativeInfinity:
res = _eval_sum_hyper(f.subs(i, -i), i, -b)
if res is not None:
return Piecewise(res, (old_sum, True))
else:
res1 = _eval_sum_hyper(f, i, a)
res2 = _eval_sum_hyper(f, i, b + 1)
if res1 is None or res2 is None:
return None
(res1, cond1), (res2, cond2) = res1, res2
cond = And(cond1, cond2)
if cond == False:
return None
return Piecewise((res1 - res2, cond), (old_sum, True))
if a == S.NegativeInfinity:
res1 = _eval_sum_hyper(f.subs(i, -i), i, 1)
res2 = _eval_sum_hyper(f, i, 0)
if res1 is None or res2 is None:
return None
res1, cond1 = res1
res2, cond2 = res2
cond = And(cond1, cond2)
if cond == False:
return None
return Piecewise((res1 + res2, cond), (old_sum, True))
# Now b == oo, a != -oo
res = _eval_sum_hyper(f, i, a)
if res is not None:
r, c = res
if c == False:
if r.is_number:
f = f.subs(i, Dummy('i', integer=True, positive=True) + a)
if f.is_positive or f.is_zero:
return S.Infinity
elif f.is_negative:
return S.NegativeInfinity
return None
return Piecewise(res, (old_sum, True))
|
minrk/nbgrader | refs/heads/master | nbgrader/tests/preprocessors/test_limitoutput.py | 2 | import pytest
from textwrap import dedent
from ...preprocessors import LimitOutput
from .base import BaseTestPreprocessor
from .. import create_code_cell, create_text_cell
@pytest.fixture
def preprocessor():
return LimitOutput()
class TestLimitOutput(BaseTestPreprocessor):
def test_long_output(self):
nb = self._read_nb("files/long-output.ipynb")
cell, = nb.cells
output, = cell.outputs
assert len(output.text.split("\n")) > 1000
pp = LimitOutput()
nb, resources = pp.preprocess(nb, {})
cell, = nb.cells
output, = cell.outputs
assert len(output.text.split("\n")) == 1000
def test_infinite_recursion(self):
nb = self._read_nb("files/infinite-recursion.ipynb")
pp = LimitOutput()
nb, resources = pp.preprocess(nb, {})
cell, = nb.cells
output, = cell.outputs
assert len(output.traceback) == 100
|
jakesyl/fail2ban | refs/heads/master | fail2ban/tests/clientreadertestcase.py | 1 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
__author__ = "Cyril Jaquier, Yaroslav Halchenko"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2011-2013 Yaroslav Halchenko"
__license__ = "GPL"
import os, glob, shutil, tempfile, unittest, re, logging
from ..client.configreader import ConfigReaderUnshared
from ..client import configparserinc
from ..client.jailreader import JailReader
from ..client.filterreader import FilterReader
from ..client.jailsreader import JailsReader
from ..client.actionreader import ActionReader
from ..client.configurator import Configurator
from .utils import LogCaptureTestCase
TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), "files")
from .utils import CONFIG_DIR
STOCK = os.path.exists(os.path.join('config','fail2ban.conf'))
IMPERFECT_CONFIG = os.path.join(os.path.dirname(__file__), 'config')
class ConfigReaderTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
self.d = tempfile.mkdtemp(prefix="f2b-temp")
self.c = ConfigReaderUnshared(basedir=self.d)
def tearDown(self):
"""Call after every test case."""
shutil.rmtree(self.d)
def _write(self, fname, value=None, content=None):
# verify if we don't need to create .d directory
if os.path.sep in fname:
d = os.path.dirname(fname)
d_ = os.path.join(self.d, d)
if not os.path.exists(d_):
os.makedirs(d_)
f = open("%s/%s" % (self.d, fname), "w")
if value is not None:
f.write("""
[section]
option = %s
""" % value)
if content is not None:
f.write(content)
f.close()
def _remove(self, fname):
os.unlink("%s/%s" % (self.d, fname))
self.assertTrue(self.c.read('c')) # we still should have some
def _getoption(self, f='c'):
self.assertTrue(self.c.read(f)) # we got some now
return self.c.getOptions('section', [("int", 'option')])['option']
def testInaccessibleFile(self):
f = os.path.join(self.d, "d.conf") # inaccessible file
self._write('d.conf', 0)
self.assertEqual(self._getoption('d'), 0)
os.chmod(f, 0)
# fragile test and known to fail e.g. under Cygwin where permissions
# seems to be not enforced, thus condition
if not os.access(f, os.R_OK):
self.assertFalse(self.c.read('d')) # should not be readable BUT present
else:
# SkipTest introduced only in 2.7 thus can't yet use generally
# raise unittest.SkipTest("Skipping on %s -- access rights are not enforced" % platform)
pass
def testOptionalDotDDir(self):
self.assertFalse(self.c.read('c')) # nothing is there yet
self._write("c.conf", "1")
self.assertEqual(self._getoption(), 1)
self._write("c.conf", "2") # overwrite
self.assertEqual(self._getoption(), 2)
self._write("c.d/98.conf", "998") # add 1st override in .d/
self.assertEqual(self._getoption(), 998)
self._write("c.d/90.conf", "990") # add previously sorted override in .d/
self.assertEqual(self._getoption(), 998) # should stay the same
self._write("c.d/99.conf", "999") # now override in a way without sorting we possibly get a failure
self.assertEqual(self._getoption(), 999)
self._write("c.local", "3") # add override in .local
self.assertEqual(self._getoption(), 3)
self._write("c.d/1.local", "4") # add override in .local
self.assertEqual(self._getoption(), 4)
self._remove("c.d/1.local")
self._remove("c.local")
self.assertEqual(self._getoption(), 999)
self._remove("c.d/99.conf")
self.assertEqual(self._getoption(), 998)
self._remove("c.d/98.conf")
self.assertEqual(self._getoption(), 990)
self._remove("c.d/90.conf")
self.assertEqual(self._getoption(), 2)
def testInterpolations(self):
self.assertFalse(self.c.read('i')) # nothing is there yet
self._write("i.conf", value=None, content="""
[DEFAULT]
b = a
zz = the%(__name__)s
[section]
y = 4%(b)s
e = 5${b}
z = %(__name__)s
[section2]
z = 3%(__name__)s
""")
self.assertTrue(self.c.read('i'))
self.assertEqual(self.c.sections(), ['section', 'section2'])
self.assertEqual(self.c.get('section', 'y'), '4a') # basic interpolation works
self.assertEqual(self.c.get('section', 'e'), '5${b}') # no extended interpolation
self.assertEqual(self.c.get('section', 'z'), 'section') # __name__ works
self.assertEqual(self.c.get('section', 'zz'), 'thesection') # __name__ works even 'delayed'
self.assertEqual(self.c.get('section2', 'z'), '3section2') # and differs per section ;)
def testComments(self):
self.assertFalse(self.c.read('g')) # nothing is there yet
self._write("g.conf", value=None, content="""
[DEFAULT]
# A comment
b = a
c = d ;in line comment
""")
self.assertTrue(self.c.read('g'))
self.assertEqual(self.c.get('DEFAULT', 'b'), 'a')
self.assertEqual(self.c.get('DEFAULT', 'c'), 'd')
class JailReaderTest(LogCaptureTestCase):
def __init__(self, *args, **kwargs):
super(JailReaderTest, self).__init__(*args, **kwargs)
self.__share_cfg = {}
def testIncorrectJail(self):
jail = JailReader('XXXABSENTXXX', basedir=CONFIG_DIR, share_config = self.__share_cfg)
self.assertRaises(ValueError, jail.read)
def testJailActionEmpty(self):
jail = JailReader('emptyaction', basedir=IMPERFECT_CONFIG, share_config = self.__share_cfg)
self.assertTrue(jail.read())
self.assertTrue(jail.getOptions())
self.assertTrue(jail.isEnabled())
self.assertTrue(self._is_logged('No filter set for jail emptyaction'))
self.assertTrue(self._is_logged('No actions were defined for emptyaction'))
def testJailActionFilterMissing(self):
jail = JailReader('missingbitsjail', basedir=IMPERFECT_CONFIG, share_config = self.__share_cfg)
self.assertTrue(jail.read())
self.assertFalse(jail.getOptions())
self.assertTrue(jail.isEnabled())
self.assertTrue(self._is_logged("Found no accessible config files for 'filter.d/catchallthebadies' under %s" % IMPERFECT_CONFIG))
self.assertTrue(self._is_logged('Unable to read the filter'))
def TODOtestJailActionBrokenDef(self):
jail = JailReader('brokenactiondef', basedir=IMPERFECT_CONFIG, share_config = self.__share_cfg)
self.assertTrue(jail.read())
self.assertFalse(jail.getOptions())
self.assertTrue(jail.isEnabled())
self.printLog()
self.assertTrue(self._is_logged('Error in action definition joho[foo'))
self.assertTrue(self._is_logged('Caught exception: While reading action joho[foo we should have got 1 or 2 groups. Got: 0'))
if STOCK:
def testStockSSHJail(self):
jail = JailReader('sshd', basedir=CONFIG_DIR, share_config = self.__share_cfg) # we are running tests from root project dir atm
self.assertTrue(jail.read())
self.assertTrue(jail.getOptions())
self.assertFalse(jail.isEnabled())
self.assertEqual(jail.getName(), 'sshd')
jail.setName('ssh-funky-blocker')
self.assertEqual(jail.getName(), 'ssh-funky-blocker')
def testSplitOption(self):
# Simple example
option = "mail-whois[name=SSH]"
expected = ('mail-whois', {'name': 'SSH'})
result = JailReader.extractOptions(option)
self.assertEqual(expected, result)
self.assertEqual(('mail.who_is', {}), JailReader.extractOptions("mail.who_is"))
self.assertEqual(('mail.who_is', {'a':'cat', 'b':'dog'}), JailReader.extractOptions("mail.who_is[a=cat,b=dog]"))
self.assertEqual(('mail--ho_is', {}), JailReader.extractOptions("mail--ho_is"))
self.assertEqual(('mail--ho_is', {}), JailReader.extractOptions("mail--ho_is['s']"))
#self.printLog()
#self.assertTrue(self._is_logged("Invalid argument ['s'] in ''s''"))
self.assertEqual(('mail', {'a': ','}), JailReader.extractOptions("mail[a=',']"))
#self.assertRaises(ValueError, JailReader.extractOptions ,'mail-how[')
# Empty option
option = "abc[]"
expected = ('abc', {})
result = JailReader.extractOptions(option)
self.assertEqual(expected, result)
# More complex examples
option = 'option[opt01=abc,opt02="123",opt03="with=okay?",opt04="andwith,okay...",opt05="how about spaces",opt06="single\'in\'double",opt07=\'double"in"single\', opt08= leave some space, opt09=one for luck, opt10=, opt11=]'
expected = ('option', {
'opt01': "abc",
'opt02': "123",
'opt03': "with=okay?",
'opt04': "andwith,okay...",
'opt05': "how about spaces",
'opt06': "single'in'double",
'opt07': "double\"in\"single",
'opt08': "leave some space",
'opt09': "one for luck",
'opt10': "",
'opt11': "",
})
result = JailReader.extractOptions(option)
self.assertEqual(expected, result)
def testGlob(self):
d = tempfile.mkdtemp(prefix="f2b-temp")
# Generate few files
# regular file
f1 = os.path.join(d, 'f1')
open(f1, 'w').close()
# dangling link
f2 = os.path.join(d, 'f2')
os.symlink('nonexisting',f2)
# must be only f1
self.assertEqual(JailReader._glob(os.path.join(d, '*')), [f1])
# since f2 is dangling -- empty list
self.assertEqual(JailReader._glob(f2), [])
self.assertTrue(self._is_logged('File %s is a dangling link, thus cannot be monitored' % f2))
self.assertEqual(JailReader._glob(os.path.join(d, 'nonexisting')), [])
os.remove(f1)
os.remove(f2)
os.rmdir(d)
class FilterReaderTest(unittest.TestCase):
def testConvert(self):
output = [['set', 'testcase01', 'addfailregex',
"^\\s*(?:\\S+ )?(?:kernel: \\[\\d+\\.\\d+\\] )?(?:@vserver_\\S+ )"
"?(?:(?:\\[\\d+\\])?:\\s+[\\[\\(]?sshd(?:\\(\\S+\\))?[\\]\\)]?:?|"
"[\\[\\(]?sshd(?:\\(\\S+\\))?[\\]\\)]?:?(?:\\[\\d+\\])?:)?\\s*(?:"
"error: PAM: )?Authentication failure for .* from <HOST>\\s*$"],
['set', 'testcase01', 'addfailregex',
"^\\s*(?:\\S+ )?(?:kernel: \\[\\d+\\.\\d+\\] )?(?:@vserver_\\S+ )"
"?(?:(?:\\[\\d+\\])?:\\s+[\\[\\(]?sshd(?:\\(\\S+\\))?[\\]\\)]?:?|"
"[\\[\\(]?sshd(?:\\(\\S+\\))?[\\]\\)]?:?(?:\\[\\d+\\])?:)?\\s*(?:"
"error: PAM: )?User not known to the underlying authentication mo"
"dule for .* from <HOST>\\s*$"],
['set', 'testcase01', 'addfailregex',
"^\\s*(?:\\S+ )?(?:kernel: \\[\\d+\\.\\d+\\] )?(?:@vserver_\\S+ )"
"?(?:(?:\\[\\d+\\])?:\\s+[\\[\\(]?sshd(?:\\(\\S+\\))?[\\]\\)]?:?|"
"[\\[\\(]?sshd(?:\\(\\S+\\))?[\\]\\)]?:?(?:\\[\\d+\\])?:)?\\s*(?:"
"error: PAM: )?User not known to the\\nunderlying authentication."
"+$<SKIPLINES>^.+ module for .* from <HOST>\\s*$"],
['set', 'testcase01', 'addignoreregex',
"^.+ john from host 192.168.1.1\\s*$"],
['set', 'testcase01', 'addjournalmatch',
"_COMM=sshd", "+", "_SYSTEMD_UNIT=sshd.service", "_UID=0"],
['set', 'testcase01', 'addjournalmatch',
"FIELD= with spaces ", "+", "AFIELD= with + char and spaces"],
['set', 'testcase01', 'datepattern', "%Y %m %d %H:%M:%S"],
['set', 'testcase01', 'maxlines', "1"], # Last for overide test
]
filterReader = FilterReader("testcase01", "testcase01", {})
filterReader.setBaseDir(TEST_FILES_DIR)
filterReader.read()
#filterReader.getOptions(["failregex", "ignoreregex"])
filterReader.getOptions(None)
# Add sort as configreader uses dictionary and therefore order
# is unreliable
self.assertEqual(sorted(filterReader.convert()), sorted(output))
filterReader = FilterReader(
"testcase01", "testcase01", {'maxlines': "5"})
filterReader.setBaseDir(TEST_FILES_DIR)
filterReader.read()
#filterReader.getOptions(["failregex", "ignoreregex"])
filterReader.getOptions(None)
output[-1][-1] = "5"
self.assertEqual(sorted(filterReader.convert()), sorted(output))
def testFilterReaderSubstitionDefault(self):
output = [['set', 'jailname', 'addfailregex', '[email protected] fromip=<IP>']]
filterReader = FilterReader('substition', "jailname", {})
filterReader.setBaseDir(TEST_FILES_DIR)
filterReader.read()
filterReader.getOptions(None)
c = filterReader.convert()
self.assertEqual(sorted(c), sorted(output))
def testFilterReaderSubstitionSet(self):
output = [['set', 'jailname', 'addfailregex', '[email protected] fromip=<IP>']]
filterReader = FilterReader('substition', "jailname", {'honeypot': '[email protected]'})
filterReader.setBaseDir(TEST_FILES_DIR)
filterReader.read()
filterReader.getOptions(None)
c = filterReader.convert()
self.assertEqual(sorted(c), sorted(output))
def testFilterReaderSubstitionFail(self):
filterReader = FilterReader('substition', "jailname", {'honeypot': '<sweet>', 'sweet': '<honeypot>'})
filterReader.setBaseDir(TEST_FILES_DIR)
filterReader.read()
filterReader.getOptions(None)
self.assertRaises(ValueError, FilterReader.convert, filterReader)
def testFilterReaderExplicit(self):
# read explicit uses absolute path:
path_ = os.path.abspath(os.path.join(TEST_FILES_DIR, "filter.d"))
filterReader = FilterReader(os.path.join(path_, "testcase01.conf"), "testcase01", {})
self.assertEqual(filterReader.readexplicit(),
[os.path.join(path_, "testcase-common.conf"), os.path.join(path_, "testcase01.conf")]
)
try:
filterReader.getOptions(None)
# from included common
filterReader.get('Definition', '__prefix_line')
# from testcase01
filterReader.get('Definition', 'failregex')
filterReader.get('Definition', 'ignoreregex')
except Exception, e: # pragma: no cover - failed if reachable
self.fail('unexpected options after readexplicit: %s' % (e))
class JailsReaderTestCache(LogCaptureTestCase):
def _readWholeConf(self, basedir, force_enable=False, share_config=None):
# read whole configuration like a file2ban-client ...
configurator = Configurator(force_enable=force_enable, share_config=share_config)
configurator.setBaseDir(basedir)
configurator.readEarly()
configurator.getEarlyOptions()
configurator.readAll()
# from here we test a cache with all includes / before / after :
self.assertTrue(configurator.getOptions(None))
def _getLoggedReadCount(self, filematch):
cnt = 0
for s in self.getLog().rsplit('\n'):
if re.match(r"^\s*Reading files?: .*/"+filematch, s):
cnt += 1
return cnt
def testTestJailConfCache(self):
saved_ll = configparserinc.logLevel
configparserinc.logLevel = logging.DEBUG
basedir = tempfile.mkdtemp("fail2ban_conf")
try:
shutil.rmtree(basedir)
shutil.copytree(CONFIG_DIR, basedir)
shutil.copy(CONFIG_DIR + '/jail.conf', basedir + '/jail.local')
shutil.copy(CONFIG_DIR + '/fail2ban.conf', basedir + '/fail2ban.local')
# common sharing handle for this test:
share_cfg = dict()
# read whole configuration like a file2ban-client ...
self._readWholeConf(basedir, share_config=share_cfg)
# how many times jail.local was read:
cnt = self._getLoggedReadCount('jail.local')
# if cnt > 1:
# self.printLog()
self.assertTrue(cnt == 1, "Unexpected count by reading of jail files, cnt = %s" % cnt)
# read whole configuration like a file2ban-client, again ...
# but this time force enable all jails, to check filter and action cached also:
self._readWholeConf(basedir, force_enable=True, share_config=share_cfg)
cnt = self._getLoggedReadCount(r'jail\.local')
# still one (no more reads):
self.assertTrue(cnt == 1, "Unexpected count by second reading of jail files, cnt = %s" % cnt)
# same with filter:
cnt = self._getLoggedReadCount(r'filter\.d/common\.conf')
self.assertTrue(cnt == 1, "Unexpected count by reading of filter files, cnt = %s" % cnt)
# same with action:
cnt = self._getLoggedReadCount(r'action\.d/iptables-common\.conf')
self.assertTrue(cnt == 1, "Unexpected count by reading of action files, cnt = %s" % cnt)
finally:
shutil.rmtree(basedir)
configparserinc.logLevel = saved_ll
class JailsReaderTest(LogCaptureTestCase):
def __init__(self, *args, **kwargs):
super(JailsReaderTest, self).__init__(*args, **kwargs)
self.__share_cfg = {}
def testProvidingBadBasedir(self):
if not os.path.exists('/XXX'):
reader = JailsReader(basedir='/XXX')
self.assertRaises(ValueError, reader.read)
def testReadTestJailConf(self):
jails = JailsReader(basedir=IMPERFECT_CONFIG, share_config=self.__share_cfg)
self.assertTrue(jails.read())
self.assertFalse(jails.getOptions())
self.assertRaises(ValueError, jails.convert)
comm_commands = jails.convert(allow_no_files=True)
self.maxDiff = None
self.assertEqual(sorted(comm_commands),
sorted([['add', 'emptyaction', 'auto'],
['add', 'test-known-interp', 'auto'],
['set', 'test-known-interp', 'addfailregex', 'failure test 1 (filter.d/test.conf) <HOST>'],
['set', 'test-known-interp', 'addfailregex', 'failure test 2 (filter.d/test.local) <HOST>'],
['set', 'test-known-interp', 'addfailregex', 'failure test 3 (jail.local) <HOST>'],
['start', 'test-known-interp'],
['add', 'missinglogfiles', 'auto'],
['set', 'missinglogfiles', 'addfailregex', '<IP>'],
['add', 'brokenaction', 'auto'],
['set', 'brokenaction', 'addfailregex', '<IP>'],
['set', 'brokenaction', 'addaction', 'brokenaction'],
['set',
'brokenaction',
'action',
'brokenaction',
'actionban',
'hit with big stick <ip>'],
['add', 'parse_to_end_of_jail.conf', 'auto'],
['set', 'parse_to_end_of_jail.conf', 'addfailregex', '<IP>'],
['start', 'emptyaction'],
['start', 'missinglogfiles'],
['start', 'brokenaction'],
['start', 'parse_to_end_of_jail.conf'],]))
self.assertTrue(self._is_logged("Errors in jail 'missingbitsjail'. Skipping..."))
self.assertTrue(self._is_logged("No file(s) found for glob /weapons/of/mass/destruction"))
if STOCK:
def testReadStockActionConf(self):
for actionConfig in glob.glob(os.path.join(CONFIG_DIR, 'action.d', '*.conf')):
actionName = os.path.basename(actionConfig).replace('.conf', '')
actionReader = ActionReader(actionName, "TEST", {}, basedir=CONFIG_DIR)
self.assertTrue(actionReader.read())
actionReader.getOptions({}) # populate _opts
if not actionName.endswith('-common'):
self.assertTrue('Definition' in actionReader.sections(),
msg="Action file %r is lacking [Definition] section" % actionConfig)
# all must have some actionban defined
self.assertTrue(actionReader._opts.get('actionban', '').strip(),
msg="Action file %r is lacking actionban" % actionConfig)
self.assertTrue('Init' in actionReader.sections(),
msg="Action file %r is lacking [Init] section" % actionConfig)
def testReadStockJailConf(self):
jails = JailsReader(basedir=CONFIG_DIR, share_config=self.__share_cfg) # we are running tests from root project dir atm
self.assertTrue(jails.read()) # opens fine
self.assertTrue(jails.getOptions()) # reads fine
comm_commands = jails.convert()
# by default None of the jails is enabled and we get no
# commands to communicate to the server
self.assertEqual(comm_commands, [])
# TODO: make sure this is handled well
## We should not "read" some bogus jail
#old_comm_commands = comm_commands[:] # make a copy
#self.assertRaises(ValueError, jails.getOptions, "BOGUS")
#self.printLog()
#self.assertTrue(self._is_logged("No section: 'BOGUS'"))
## and there should be no side-effects
#self.assertEqual(jails.convert(), old_comm_commands)
allFilters = set()
# All jails must have filter and action set
# TODO: evolve into a parametric test
for jail in jails.sections():
if jail == 'INCLUDES':
continue
filterName = jails.get(jail, 'filter')
allFilters.add(filterName)
self.assertTrue(len(filterName))
# moreover we must have a file for it
# and it must be readable as a Filter
filterReader = FilterReader(filterName, jail, {})
filterReader.setBaseDir(CONFIG_DIR)
self.assertTrue(filterReader.read(),"Failed to read filter:" + filterName) # opens fine
filterReader.getOptions({}) # reads fine
# test if filter has failregex set
self.assertTrue(filterReader._opts.get('failregex', '').strip())
actions = jails.get(jail, 'action')
self.assertTrue(len(actions.strip()))
# somewhat duplicating here what is done in JailsReader if
# the jail is enabled
for act in actions.split('\n'):
actName, actOpt = JailReader.extractOptions(act)
self.assertTrue(len(actName))
self.assertTrue(isinstance(actOpt, dict))
if actName == 'iptables-multiport':
self.assertTrue('port' in actOpt)
actionReader = ActionReader(
actName, jail, {}, basedir=CONFIG_DIR)
self.assertTrue(actionReader.read())
actionReader.getOptions({}) # populate _opts
cmds = actionReader.convert()
self.assertTrue(len(cmds))
# all must have some actionban
self.assertTrue(actionReader._opts.get('actionban', '').strip())
# Verify that all filters found under config/ have a jail
def testReadStockJailFilterComplete(self):
jails = JailsReader(basedir=CONFIG_DIR, force_enable=True, share_config=self.__share_cfg)
self.assertTrue(jails.read()) # opens fine
self.assertTrue(jails.getOptions()) # reads fine
# grab all filter names
filters = set(os.path.splitext(os.path.split(a)[1])[0]
for a in glob.glob(os.path.join('config', 'filter.d', '*.conf'))
if not a.endswith('common.conf'))
filters_jail = set(jail.options['filter'] for jail in jails.jails)
self.maxDiff = None
self.assertTrue(filters.issubset(filters_jail),
"More filters exists than are referenced in stock jail.conf %r" % filters.difference(filters_jail))
self.assertTrue(filters_jail.issubset(filters),
"Stock jail.conf references non-existent filters %r" % filters_jail.difference(filters))
def testReadStockJailConfForceEnabled(self):
# more of a smoke test to make sure that no obvious surprises
# on users' systems when enabling shipped jails
jails = JailsReader(basedir=CONFIG_DIR, force_enable=True, share_config=self.__share_cfg) # we are running tests from root project dir atm
self.assertTrue(jails.read()) # opens fine
self.assertTrue(jails.getOptions()) # reads fine
comm_commands = jails.convert(allow_no_files=True)
# by default we have lots of jails ;)
self.assertTrue(len(comm_commands))
# and we know even some of them by heart
for j in ['sshd', 'recidive']:
# by default we have 'auto' backend ATM
self.assertTrue(['add', j, 'auto'] in comm_commands)
# and warn on useDNS
self.assertTrue(['set', j, 'usedns', 'warn'] in comm_commands)
self.assertTrue(['start', j] in comm_commands)
# last commands should be the 'start' commands
self.assertEqual(comm_commands[-1][0], 'start')
for j in jails._JailsReader__jails:
actions = j._JailReader__actions
jail_name = j.getName()
# make sure that all of the jails have actions assigned,
# otherwise it makes little to no sense
self.assertTrue(len(actions),
msg="No actions found for jail %s" % jail_name)
# Test for presence of blocktype (in relation to gh-232)
for action in actions:
commands = action.convert()
action_name = action.getName()
if '<blocktype>' in str(commands):
# Verify that it is among cInfo
self.assertTrue('blocktype' in action._initOpts)
# Verify that we have a call to set it up
blocktype_present = False
target_command = ['set', jail_name, 'action', action_name, 'blocktype']
for command in commands:
if (len(command) > 5 and
command[:5] == target_command):
blocktype_present = True
continue
self.assertTrue(
blocktype_present,
msg="Found no %s command among %s"
% (target_command, str(commands)) )
def testStockConfigurator(self):
configurator = Configurator()
configurator.setBaseDir(CONFIG_DIR)
self.assertEqual(configurator.getBaseDir(), CONFIG_DIR)
configurator.readEarly()
opts = configurator.getEarlyOptions()
# our current default settings
self.assertEqual(opts['socket'], '/var/run/fail2ban/fail2ban.sock')
self.assertEqual(opts['pidfile'], '/var/run/fail2ban/fail2ban.pid')
configurator.getOptions()
configurator.convertToProtocol()
commands = configurator.getConfigStream()
# and there is logging information left to be passed into the
# server
self.assertEqual(sorted(commands),
[['set', 'dbfile',
'/var/lib/fail2ban/fail2ban.sqlite3'],
['set', 'dbpurgeage', 86400],
['set', 'loglevel', "INFO"],
['set', 'logtarget', '/var/log/fail2ban.log']])
# and if we force change configurator's fail2ban's baseDir
# there should be an error message (test visually ;) --
# otherwise just a code smoke test)
configurator._Configurator__jails.setBaseDir('/tmp')
self.assertEqual(configurator._Configurator__jails.getBaseDir(), '/tmp')
self.assertEqual(configurator.getBaseDir(), CONFIG_DIR)
def testMultipleSameAction(self):
basedir = tempfile.mkdtemp("fail2ban_conf")
os.mkdir(os.path.join(basedir, "filter.d"))
os.mkdir(os.path.join(basedir, "action.d"))
open(os.path.join(basedir, "action.d", "testaction1.conf"), 'w').close()
open(os.path.join(basedir, "filter.d", "testfilter1.conf"), 'w').close()
jailfd = open(os.path.join(basedir, "jail.conf"), 'w')
jailfd.write("""
[testjail1]
enabled = true
action = testaction1[actname=test1]
testaction1[actname=test2]
testaction.py
testaction.py[actname=test3]
filter = testfilter1
""")
jailfd.close()
jails = JailsReader(basedir=basedir, share_config=self.__share_cfg)
self.assertTrue(jails.read())
self.assertTrue(jails.getOptions())
comm_commands = jails.convert(allow_no_files=True)
add_actions = [comm[3:] for comm in comm_commands
if comm[:3] == ['set', 'testjail1', 'addaction']]
self.assertEqual(len(set(action[0] for action in add_actions)), 4)
# Python actions should not be passed `actname`
self.assertEqual(add_actions[-1][-1], "{}")
shutil.rmtree(basedir)
|
edcomstock/werkzeug | refs/heads/master | werkzeug/formparser.py | 162 | # -*- coding: utf-8 -*-
"""
werkzeug.formparser
~~~~~~~~~~~~~~~~~~~
This module implements the form parsing. It supports url-encoded forms
as well as non-nested multipart uploads.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import codecs
from io import BytesIO
from tempfile import TemporaryFile
from itertools import chain, repeat, tee
from functools import update_wrapper
from werkzeug._compat import to_native, text_type
from werkzeug.urls import url_decode_stream
from werkzeug.wsgi import make_line_iter, \
get_input_stream, get_content_length
from werkzeug.datastructures import Headers, FileStorage, MultiDict
from werkzeug.http import parse_options_header
#: an iterator that yields empty strings
_empty_string_iter = repeat('')
#: a regular expression for multipart boundaries
_multipart_boundary_re = re.compile('^[ -~]{0,200}[!-~]$')
#: supported http encodings that are also available in python we support
#: for multipart messages.
_supported_multipart_encodings = frozenset(['base64', 'quoted-printable'])
def default_stream_factory(total_content_length, filename, content_type,
content_length=None):
"""The stream factory that is used per default."""
if total_content_length > 1024 * 500:
return TemporaryFile('wb+')
return BytesIO()
def parse_form_data(environ, stream_factory=None, charset='utf-8',
errors='replace', max_form_memory_size=None,
max_content_length=None, cls=None,
silent=True):
"""Parse the form data in the environ and return it as tuple in the form
``(stream, form, files)``. You should only call this method if the
transport method is `POST`, `PUT`, or `PATCH`.
If the mimetype of the data transmitted is `multipart/form-data` the
files multidict will be filled with `FileStorage` objects. If the
mimetype is unknown the input stream is wrapped and returned as first
argument, else the stream is empty.
This is a shortcut for the common usage of :class:`FormDataParser`.
Have a look at :ref:`dealing-with-request-data` for more details.
.. versionadded:: 0.5
The `max_form_memory_size`, `max_content_length` and
`cls` parameters were added.
.. versionadded:: 0.5.1
The optional `silent` flag was added.
:param environ: the WSGI environment to be used for parsing.
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`~BaseResponse._get_file_stream`.
:param charset: The character set for URL and url encoded form data.
:param errors: The encoding error behavior.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
:return: A tuple in the form ``(stream, form, files)``.
"""
return FormDataParser(stream_factory, charset, errors,
max_form_memory_size, max_content_length,
cls, silent).parse_from_environ(environ)
def exhaust_stream(f):
"""Helper decorator for methods that exhausts the stream on return."""
def wrapper(self, stream, *args, **kwargs):
try:
return f(self, stream, *args, **kwargs)
finally:
exhaust = getattr(stream, 'exhaust', None)
if exhaust is not None:
exhaust()
else:
while 1:
chunk = stream.read(1024 * 64)
if not chunk:
break
return update_wrapper(wrapper, f)
class FormDataParser(object):
"""This class implements parsing of form data for Werkzeug. By itself
it can parse multipart and url encoded form data. It can be subclassed
and extended but for most mimetypes it is a better idea to use the
untouched stream and expose it as separate attributes on a request
object.
.. versionadded:: 0.8
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`~BaseResponse._get_file_stream`.
:param charset: The character set for URL and url encoded form data.
:param errors: The encoding error behavior.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
"""
def __init__(self, stream_factory=None, charset='utf-8',
errors='replace', max_form_memory_size=None,
max_content_length=None, cls=None,
silent=True):
if stream_factory is None:
stream_factory = default_stream_factory
self.stream_factory = stream_factory
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
self.max_content_length = max_content_length
if cls is None:
cls = MultiDict
self.cls = cls
self.silent = silent
def get_parse_func(self, mimetype, options):
return self.parse_functions.get(mimetype)
def parse_from_environ(self, environ):
"""Parses the information from the environment as form data.
:param environ: the WSGI environment to be used for parsing.
:return: A tuple in the form ``(stream, form, files)``.
"""
content_type = environ.get('CONTENT_TYPE', '')
content_length = get_content_length(environ)
mimetype, options = parse_options_header(content_type)
return self.parse(get_input_stream(environ), mimetype,
content_length, options)
def parse(self, stream, mimetype, content_length, options=None):
"""Parses the information from the given stream, mimetype,
content length and mimetype parameters.
:param stream: an input stream
:param mimetype: the mimetype of the data
:param content_length: the content length of the incoming data
:param options: optional mimetype parameters (used for
the multipart boundary for instance)
:return: A tuple in the form ``(stream, form, files)``.
"""
if self.max_content_length is not None and \
content_length is not None and \
content_length > self.max_content_length:
raise exceptions.RequestEntityTooLarge()
if options is None:
options = {}
parse_func = self.get_parse_func(mimetype, options)
if parse_func is not None:
try:
return parse_func(self, stream, mimetype,
content_length, options)
except ValueError:
if not self.silent:
raise
return stream, self.cls(), self.cls()
@exhaust_stream
def _parse_multipart(self, stream, mimetype, content_length, options):
parser = MultiPartParser(self.stream_factory, self.charset, self.errors,
max_form_memory_size=self.max_form_memory_size,
cls=self.cls)
boundary = options.get('boundary')
if boundary is None:
raise ValueError('Missing boundary')
if isinstance(boundary, text_type):
boundary = boundary.encode('ascii')
form, files = parser.parse(stream, boundary, content_length)
return stream, form, files
@exhaust_stream
def _parse_urlencoded(self, stream, mimetype, content_length, options):
if self.max_form_memory_size is not None and \
content_length is not None and \
content_length > self.max_form_memory_size:
raise exceptions.RequestEntityTooLarge()
form = url_decode_stream(stream, self.charset,
errors=self.errors, cls=self.cls)
return stream, form, self.cls()
#: mapping of mimetypes to parsing functions
parse_functions = {
'multipart/form-data': _parse_multipart,
'application/x-www-form-urlencoded': _parse_urlencoded,
'application/x-url-encoded': _parse_urlencoded
}
def is_valid_multipart_boundary(boundary):
"""Checks if the string given is a valid multipart boundary."""
return _multipart_boundary_re.match(boundary) is not None
def _line_parse(line):
"""Removes line ending characters and returns a tuple (`stripped_line`,
`is_terminated`).
"""
if line[-2:] in ['\r\n', b'\r\n']:
return line[:-2], True
elif line[-1:] in ['\r', '\n', b'\r', b'\n']:
return line[:-1], True
return line, False
def parse_multipart_headers(iterable):
"""Parses multipart headers from an iterable that yields lines (including
the trailing newline symbol). The iterable has to be newline terminated.
The iterable will stop at the line where the headers ended so it can be
further consumed.
:param iterable: iterable of strings that are newline terminated
"""
result = []
for line in iterable:
line = to_native(line)
line, line_terminated = _line_parse(line)
if not line_terminated:
raise ValueError('unexpected end of line in multipart header')
if not line:
break
elif line[0] in ' \t' and result:
key, value = result[-1]
result[-1] = (key, value + '\n ' + line[1:])
else:
parts = line.split(':', 1)
if len(parts) == 2:
result.append((parts[0].strip(), parts[1].strip()))
# we link the list to the headers, no need to create a copy, the
# list was not shared anyways.
return Headers(result)
_begin_form = 'begin_form'
_begin_file = 'begin_file'
_cont = 'cont'
_end = 'end'
class MultiPartParser(object):
def __init__(self, stream_factory=None, charset='utf-8', errors='replace',
max_form_memory_size=None, cls=None, buffer_size=64 * 1024):
self.stream_factory = stream_factory
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
if stream_factory is None:
stream_factory = default_stream_factory
if cls is None:
cls = MultiDict
self.cls = cls
# make sure the buffer size is divisible by four so that we can base64
# decode chunk by chunk
assert buffer_size % 4 == 0, 'buffer size has to be divisible by 4'
# also the buffer size has to be at least 1024 bytes long or long headers
# will freak out the system
assert buffer_size >= 1024, 'buffer size has to be at least 1KB'
self.buffer_size = buffer_size
def _fix_ie_filename(self, filename):
"""Internet Explorer 6 transmits the full file name if a file is
uploaded. This function strips the full path if it thinks the
filename is Windows-like absolute.
"""
if filename[1:3] == ':\\' or filename[:2] == '\\\\':
return filename.split('\\')[-1]
return filename
def _find_terminator(self, iterator):
"""The terminator might have some additional newlines before it.
There is at least one application that sends additional newlines
before headers (the python setuptools package).
"""
for line in iterator:
if not line:
break
line = line.strip()
if line:
return line
return b''
def fail(self, message):
raise ValueError(message)
def get_part_encoding(self, headers):
transfer_encoding = headers.get('content-transfer-encoding')
if transfer_encoding is not None and \
transfer_encoding in _supported_multipart_encodings:
return transfer_encoding
def get_part_charset(self, headers):
# Figure out input charset for current part
content_type = headers.get('content-type')
if content_type:
mimetype, ct_params = parse_options_header(content_type)
return ct_params.get('charset', self.charset)
return self.charset
def start_file_streaming(self, filename, headers, total_content_length):
if isinstance(filename, bytes):
filename = filename.decode(self.charset, self.errors)
filename = self._fix_ie_filename(filename)
content_type = headers.get('content-type')
try:
content_length = int(headers['content-length'])
except (KeyError, ValueError):
content_length = 0
container = self.stream_factory(total_content_length, content_type,
filename, content_length)
return filename, container
def in_memory_threshold_reached(self, bytes):
raise exceptions.RequestEntityTooLarge()
def validate_boundary(self, boundary):
if not boundary:
self.fail('Missing boundary')
if not is_valid_multipart_boundary(boundary):
self.fail('Invalid boundary: %s' % boundary)
if len(boundary) > self.buffer_size: # pragma: no cover
# this should never happen because we check for a minimum size
# of 1024 and boundaries may not be longer than 200. The only
# situation when this happens is for non debug builds where
# the assert is skipped.
self.fail('Boundary longer than buffer size')
def parse_lines(self, file, boundary, content_length):
"""Generate parts of
``('begin_form', (headers, name))``
``('begin_file', (headers, name, filename))``
``('cont', bytestring)``
``('end', None)``
Always obeys the grammar
parts = ( begin_form cont* end |
begin_file cont* end )*
"""
next_part = b'--' + boundary
last_part = next_part + b'--'
iterator = chain(make_line_iter(file, limit=content_length,
buffer_size=self.buffer_size),
_empty_string_iter)
terminator = self._find_terminator(iterator)
if terminator == last_part:
return
elif terminator != next_part:
self.fail('Expected boundary at start of multipart data')
while terminator != last_part:
headers = parse_multipart_headers(iterator)
disposition = headers.get('content-disposition')
if disposition is None:
self.fail('Missing Content-Disposition header')
disposition, extra = parse_options_header(disposition)
transfer_encoding = self.get_part_encoding(headers)
name = extra.get('name')
filename = extra.get('filename')
# if no content type is given we stream into memory. A list is
# used as a temporary container.
if filename is None:
yield _begin_form, (headers, name)
# otherwise we parse the rest of the headers and ask the stream
# factory for something we can write in.
else:
yield _begin_file, (headers, name, filename)
buf = b''
for line in iterator:
if not line:
self.fail('unexpected end of stream')
if line[:2] == b'--':
terminator = line.rstrip()
if terminator in (next_part, last_part):
break
if transfer_encoding is not None:
if transfer_encoding == 'base64':
transfer_encoding = 'base64_codec'
try:
line = codecs.decode(line, transfer_encoding)
except Exception:
self.fail('could not decode transfer encoded chunk')
# we have something in the buffer from the last iteration.
# this is usually a newline delimiter.
if buf:
yield _cont, buf
buf = b''
# If the line ends with windows CRLF we write everything except
# the last two bytes. In all other cases however we write
# everything except the last byte. If it was a newline, that's
# fine, otherwise it does not matter because we will write it
# the next iteration. this ensures we do not write the
# final newline into the stream. That way we do not have to
# truncate the stream. However we do have to make sure that
# if something else than a newline is in there we write it
# out.
if line[-2:] == b'\r\n':
buf = b'\r\n'
cutoff = -2
else:
buf = line[-1:]
cutoff = -1
yield _cont, line[:cutoff]
else: # pragma: no cover
raise ValueError('unexpected end of part')
# if we have a leftover in the buffer that is not a newline
# character we have to flush it, otherwise we will chop of
# certain values.
if buf not in (b'', b'\r', b'\n', b'\r\n'):
yield _cont, buf
yield _end, None
def parse_parts(self, file, boundary, content_length):
"""Generate ``('file', (name, val))`` and
``('form', (name, val))`` parts.
"""
in_memory = 0
for ellt, ell in self.parse_lines(file, boundary, content_length):
if ellt == _begin_file:
headers, name, filename = ell
is_file = True
guard_memory = False
filename, container = self.start_file_streaming(
filename, headers, content_length)
_write = container.write
elif ellt == _begin_form:
headers, name = ell
is_file = False
container = []
_write = container.append
guard_memory = self.max_form_memory_size is not None
elif ellt == _cont:
_write(ell)
# if we write into memory and there is a memory size limit we
# count the number of bytes in memory and raise an exception if
# there is too much data in memory.
if guard_memory:
in_memory += len(ell)
if in_memory > self.max_form_memory_size:
self.in_memory_threshold_reached(in_memory)
elif ellt == _end:
if is_file:
container.seek(0)
yield ('file',
(name, FileStorage(container, filename, name,
headers=headers)))
else:
part_charset = self.get_part_charset(headers)
yield ('form',
(name, b''.join(container).decode(
part_charset, self.errors)))
def parse(self, file, boundary, content_length):
formstream, filestream = tee(
self.parse_parts(file, boundary, content_length), 2)
form = (p[1] for p in formstream if p[0] == 'form')
files = (p[1] for p in filestream if p[0] == 'file')
return self.cls(form), self.cls(files)
from werkzeug import exceptions
|
ORGAN-IZE/fiftythree-client | refs/heads/master | setup.py | 1 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import re
with open('fiftythree/__init__.py', 'r') as init_file:
version = re.search(
'^__version__ = [\'"]([^\'"]+)[\'"]',
init_file.read(),
re.MULTILINE,
).group(1)
try:
long_description = open("README.rst").read()
except IOError:
long_description = ''
setup(
name='fiftythree-client',
version=version,
description='A python client for the 53 API provided by ORGANIZE.org',
license='MIT',
author='Dana Spiegel',
author_email='[email protected]',
url='https://github.com/ORGAN-IZE/fiftythree-client',
packages=find_packages(),
long_description=long_description,
install_requires=[
'requests>=2.4',
],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
]
)
|
arnaud-morvan/QGIS | refs/heads/master | python/plugins/processing/core/defaultproviders.py | 32 | # -*- coding: utf-8 -*-
"""
***************************************************************************
defaultproviders.py
---------------------
Date : May 2016
Copyright : (C) 2016 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'May 2016'
__copyright__ = '(C) 2016, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
def loadDefaultProviders():
# this is here just to "trigger" the above imports so providers are loaded
# and can be found by the Processing.initialize() method
pass
|
alon/servo | refs/heads/master | tests/wpt/css-tests/tools/pytest/testing/code/test_source.py | 171 | # flake8: noqa
# disable flake check on this file because some constructs are strange
# or redundant on purpose and can't be disable on a line-by-line basis
import sys
import _pytest._code
import py
import pytest
from _pytest._code import Source
from _pytest._code.source import _ast
if _ast is not None:
astonly = pytest.mark.nothing
else:
astonly = pytest.mark.xfail("True", reason="only works with AST-compile")
failsonjython = pytest.mark.xfail("sys.platform.startswith('java')")
def test_source_str_function():
x = Source("3")
assert str(x) == "3"
x = Source(" 3")
assert str(x) == "3"
x = Source("""
3
""", rstrip=False)
assert str(x) == "\n3\n "
x = Source("""
3
""", rstrip=True)
assert str(x) == "\n3"
def test_unicode():
try:
unicode
except NameError:
return
x = Source(unicode("4"))
assert str(x) == "4"
co = _pytest._code.compile(unicode('u"\xc3\xa5"', 'utf8'), mode='eval')
val = eval(co)
assert isinstance(val, unicode)
def test_source_from_function():
source = _pytest._code.Source(test_source_str_function)
assert str(source).startswith('def test_source_str_function():')
def test_source_from_method():
class TestClass:
def test_method(self):
pass
source = _pytest._code.Source(TestClass().test_method)
assert source.lines == ["def test_method(self):",
" pass"]
def test_source_from_lines():
lines = ["a \n", "b\n", "c"]
source = _pytest._code.Source(lines)
assert source.lines == ['a ', 'b', 'c']
def test_source_from_inner_function():
def f():
pass
source = _pytest._code.Source(f, deindent=False)
assert str(source).startswith(' def f():')
source = _pytest._code.Source(f)
assert str(source).startswith('def f():')
def test_source_putaround_simple():
source = Source("raise ValueError")
source = source.putaround(
"try:", """\
except ValueError:
x = 42
else:
x = 23""")
assert str(source)=="""\
try:
raise ValueError
except ValueError:
x = 42
else:
x = 23"""
def test_source_putaround():
source = Source()
source = source.putaround("""
if 1:
x=1
""")
assert str(source).strip() == "if 1:\n x=1"
def test_source_strips():
source = Source("")
assert source == Source()
assert str(source) == ''
assert source.strip() == source
def test_source_strip_multiline():
source = Source()
source.lines = ["", " hello", " "]
source2 = source.strip()
assert source2.lines == [" hello"]
def test_syntaxerror_rerepresentation():
ex = pytest.raises(SyntaxError, _pytest._code.compile, 'xyz xyz')
assert ex.value.lineno == 1
assert ex.value.offset in (4,7) # XXX pypy/jython versus cpython?
assert ex.value.text.strip(), 'x x'
def test_isparseable():
assert Source("hello").isparseable()
assert Source("if 1:\n pass").isparseable()
assert Source(" \nif 1:\n pass").isparseable()
assert not Source("if 1:\n").isparseable()
assert not Source(" \nif 1:\npass").isparseable()
assert not Source(chr(0)).isparseable()
class TestAccesses:
source = Source("""\
def f(x):
pass
def g(x):
pass
""")
def test_getrange(self):
x = self.source[0:2]
assert x.isparseable()
assert len(x.lines) == 2
assert str(x) == "def f(x):\n pass"
def test_getline(self):
x = self.source[0]
assert x == "def f(x):"
def test_len(self):
assert len(self.source) == 4
def test_iter(self):
l = [x for x in self.source]
assert len(l) == 4
class TestSourceParsingAndCompiling:
source = Source("""\
def f(x):
assert (x ==
3 +
4)
""").strip()
def test_compile(self):
co = _pytest._code.compile("x=3")
d = {}
exec (co, d)
assert d['x'] == 3
def test_compile_and_getsource_simple(self):
co = _pytest._code.compile("x=3")
exec (co)
source = _pytest._code.Source(co)
assert str(source) == "x=3"
def test_compile_and_getsource_through_same_function(self):
def gensource(source):
return _pytest._code.compile(source)
co1 = gensource("""
def f():
raise KeyError()
""")
co2 = gensource("""
def f():
raise ValueError()
""")
source1 = py.std.inspect.getsource(co1)
assert 'KeyError' in source1
source2 = py.std.inspect.getsource(co2)
assert 'ValueError' in source2
def test_getstatement(self):
#print str(self.source)
ass = str(self.source[1:])
for i in range(1, 4):
#print "trying start in line %r" % self.source[i]
s = self.source.getstatement(i)
#x = s.deindent()
assert str(s) == ass
def test_getstatementrange_triple_quoted(self):
#print str(self.source)
source = Source("""hello('''
''')""")
s = source.getstatement(0)
assert s == str(source)
s = source.getstatement(1)
assert s == str(source)
@astonly
def test_getstatementrange_within_constructs(self):
source = Source("""\
try:
try:
raise ValueError
except SomeThing:
pass
finally:
42
""")
assert len(source) == 7
# check all lineno's that could occur in a traceback
#assert source.getstatementrange(0) == (0, 7)
#assert source.getstatementrange(1) == (1, 5)
assert source.getstatementrange(2) == (2, 3)
assert source.getstatementrange(3) == (3, 4)
assert source.getstatementrange(4) == (4, 5)
#assert source.getstatementrange(5) == (0, 7)
assert source.getstatementrange(6) == (6, 7)
def test_getstatementrange_bug(self):
source = Source("""\
try:
x = (
y +
z)
except:
pass
""")
assert len(source) == 6
assert source.getstatementrange(2) == (1, 4)
def test_getstatementrange_bug2(self):
source = Source("""\
assert (
33
==
[
X(3,
b=1, c=2
),
]
)
""")
assert len(source) == 9
assert source.getstatementrange(5) == (0, 9)
def test_getstatementrange_ast_issue58(self):
source = Source("""\
def test_some():
for a in [a for a in
CAUSE_ERROR]: pass
x = 3
""")
assert getstatement(2, source).lines == source.lines[2:3]
assert getstatement(3, source).lines == source.lines[3:4]
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_getstatementrange_out_of_bounds_py3(self):
source = Source("if xxx:\n from .collections import something")
r = source.getstatementrange(1)
assert r == (1,2)
def test_getstatementrange_with_syntaxerror_issue7(self):
source = Source(":")
pytest.raises(SyntaxError, lambda: source.getstatementrange(0))
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_compile_to_ast(self):
import ast
source = Source("x = 4")
mod = source.compile(flag=ast.PyCF_ONLY_AST)
assert isinstance(mod, ast.Module)
compile(mod, "<filename>", "exec")
def test_compile_and_getsource(self):
co = self.source.compile()
py.builtin.exec_(co, globals())
f(7)
excinfo = pytest.raises(AssertionError, "f(6)")
frame = excinfo.traceback[-1].frame
stmt = frame.code.fullsource.getstatement(frame.lineno)
#print "block", str(block)
assert str(stmt).strip().startswith('assert')
def test_compilefuncs_and_path_sanity(self):
def check(comp, name):
co = comp(self.source, name)
if not name:
expected = "codegen %s:%d>" %(mypath, mylineno+2+1)
else:
expected = "codegen %r %s:%d>" % (name, mypath, mylineno+2+1)
fn = co.co_filename
assert fn.endswith(expected)
mycode = _pytest._code.Code(self.test_compilefuncs_and_path_sanity)
mylineno = mycode.firstlineno
mypath = mycode.path
for comp in _pytest._code.compile, _pytest._code.Source.compile:
for name in '', None, 'my':
yield check, comp, name
def test_offsetless_synerr(self):
pytest.raises(SyntaxError, _pytest._code.compile, "lambda a,a: 0", mode='eval')
def test_getstartingblock_singleline():
class A:
def __init__(self, *args):
frame = sys._getframe(1)
self.source = _pytest._code.Frame(frame).statement
x = A('x', 'y')
l = [i for i in x.source.lines if i.strip()]
assert len(l) == 1
def test_getstartingblock_multiline():
class A:
def __init__(self, *args):
frame = sys._getframe(1)
self.source = _pytest._code.Frame(frame).statement
x = A('x',
'y' \
,
'z')
l = [i for i in x.source.lines if i.strip()]
assert len(l) == 4
def test_getline_finally():
def c(): pass
excinfo = pytest.raises(TypeError, """
teardown = None
try:
c(1)
finally:
if teardown:
teardown()
""")
source = excinfo.traceback[-1].statement
assert str(source).strip() == 'c(1)'
def test_getfuncsource_dynamic():
source = """
def f():
raise ValueError
def g(): pass
"""
co = _pytest._code.compile(source)
py.builtin.exec_(co, globals())
assert str(_pytest._code.Source(f)).strip() == 'def f():\n raise ValueError'
assert str(_pytest._code.Source(g)).strip() == 'def g(): pass'
def test_getfuncsource_with_multine_string():
def f():
c = '''while True:
pass
'''
assert str(_pytest._code.Source(f)).strip() == "def f():\n c = '''while True:\n pass\n'''"
def test_deindent():
from _pytest._code.source import deindent as deindent
assert deindent(['\tfoo', '\tbar', ]) == ['foo', 'bar']
def f():
c = '''while True:
pass
'''
import inspect
lines = deindent(inspect.getsource(f).splitlines())
assert lines == ["def f():", " c = '''while True:", " pass", "'''"]
source = """
def f():
def g():
pass
"""
lines = deindent(source.splitlines())
assert lines == ['', 'def f():', ' def g():', ' pass', ' ']
@pytest.mark.xfail("sys.version_info[:3] < (2,7,0) or "
"((3,0) <= sys.version_info[:2] < (3,2))")
def test_source_of_class_at_eof_without_newline(tmpdir):
# this test fails because the implicit inspect.getsource(A) below
# does not return the "x = 1" last line.
source = _pytest._code.Source('''
class A(object):
def method(self):
x = 1
''')
path = tmpdir.join("a.py")
path.write(source)
s2 = _pytest._code.Source(tmpdir.join("a.py").pyimport().A)
assert str(source).strip() == str(s2).strip()
if True:
def x():
pass
def test_getsource_fallback():
from _pytest._code.source import getsource
expected = """def x():
pass"""
src = getsource(x)
assert src == expected
def test_idem_compile_and_getsource():
from _pytest._code.source import getsource
expected = "def x(): pass"
co = _pytest._code.compile(expected)
src = getsource(co)
assert src == expected
def test_findsource_fallback():
from _pytest._code.source import findsource
src, lineno = findsource(x)
assert 'test_findsource_simple' in str(src)
assert src[lineno] == ' def x():'
def test_findsource():
from _pytest._code.source import findsource
co = _pytest._code.compile("""if 1:
def x():
pass
""")
src, lineno = findsource(co)
assert 'if 1:' in str(src)
d = {}
eval(co, d)
src, lineno = findsource(d['x'])
assert 'if 1:' in str(src)
assert src[lineno] == " def x():"
def test_getfslineno():
from _pytest._code import getfslineno
def f(x):
pass
fspath, lineno = getfslineno(f)
assert fspath.basename == "test_source.py"
assert lineno == _pytest._code.getrawcode(f).co_firstlineno - 1 # see findsource
class A(object):
pass
fspath, lineno = getfslineno(A)
_, A_lineno = py.std.inspect.findsource(A)
assert fspath.basename == "test_source.py"
assert lineno == A_lineno
assert getfslineno(3) == ("", -1)
class B:
pass
B.__name__ = "B2"
assert getfslineno(B)[1] == -1
def test_code_of_object_instance_with_call():
class A:
pass
pytest.raises(TypeError, lambda: _pytest._code.Source(A()))
class WithCall:
def __call__(self):
pass
code = _pytest._code.Code(WithCall())
assert 'pass' in str(code.source())
class Hello(object):
def __call__(self):
pass
pytest.raises(TypeError, lambda: _pytest._code.Code(Hello))
def getstatement(lineno, source):
from _pytest._code.source import getstatementrange_ast
source = _pytest._code.Source(source, deindent=False)
ast, start, end = getstatementrange_ast(lineno, source)
return source[start:end]
def test_oneline():
source = getstatement(0, "raise ValueError")
assert str(source) == "raise ValueError"
def test_comment_and_no_newline_at_end():
from _pytest._code.source import getstatementrange_ast
source = Source(['def test_basic_complex():',
' assert 1 == 2',
'# vim: filetype=pyopencl:fdm=marker'])
ast, start, end = getstatementrange_ast(1, source)
assert end == 2
def test_oneline_and_comment():
source = getstatement(0, "raise ValueError\n#hello")
assert str(source) == "raise ValueError"
@pytest.mark.xfail(hasattr(sys, "pypy_version_info"),
reason='does not work on pypy')
def test_comments():
source = '''def test():
"comment 1"
x = 1
# comment 2
# comment 3
assert False
"""
comment 4
"""
'''
for line in range(2,6):
assert str(getstatement(line, source)) == ' x = 1'
for line in range(6,10):
assert str(getstatement(line, source)) == ' assert False'
assert str(getstatement(10, source)) == '"""'
def test_comment_in_statement():
source = '''test(foo=1,
# comment 1
bar=2)
'''
for line in range(1,3):
assert str(getstatement(line, source)) == \
'test(foo=1,\n # comment 1\n bar=2)'
def test_single_line_else():
source = getstatement(1, "if False: 2\nelse: 3")
assert str(source) == "else: 3"
def test_single_line_finally():
source = getstatement(1, "try: 1\nfinally: 3")
assert str(source) == "finally: 3"
def test_issue55():
source = ('def round_trip(dinp):\n assert 1 == dinp\n'
'def test_rt():\n round_trip("""\n""")\n')
s = getstatement(3, source)
assert str(s) == ' round_trip("""\n""")'
def XXXtest_multiline():
source = getstatement(0, """\
raise ValueError(
23
)
x = 3
""")
assert str(source) == "raise ValueError(\n 23\n)"
class TestTry:
pytestmark = astonly
source = """\
try:
raise ValueError
except Something:
raise IndexError(1)
else:
raise KeyError()
"""
def test_body(self):
source = getstatement(1, self.source)
assert str(source) == " raise ValueError"
def test_except_line(self):
source = getstatement(2, self.source)
assert str(source) == "except Something:"
def test_except_body(self):
source = getstatement(3, self.source)
assert str(source) == " raise IndexError(1)"
def test_else(self):
source = getstatement(5, self.source)
assert str(source) == " raise KeyError()"
class TestTryFinally:
source = """\
try:
raise ValueError
finally:
raise IndexError(1)
"""
def test_body(self):
source = getstatement(1, self.source)
assert str(source) == " raise ValueError"
def test_finally(self):
source = getstatement(3, self.source)
assert str(source) == " raise IndexError(1)"
class TestIf:
pytestmark = astonly
source = """\
if 1:
y = 3
elif False:
y = 5
else:
y = 7
"""
def test_body(self):
source = getstatement(1, self.source)
assert str(source) == " y = 3"
def test_elif_clause(self):
source = getstatement(2, self.source)
assert str(source) == "elif False:"
def test_elif(self):
source = getstatement(3, self.source)
assert str(source) == " y = 5"
def test_else(self):
source = getstatement(5, self.source)
assert str(source) == " y = 7"
def test_semicolon():
s = """\
hello ; pytest.skip()
"""
source = getstatement(0, s)
assert str(source) == s.strip()
def test_def_online():
s = """\
def func(): raise ValueError(42)
def something():
pass
"""
source = getstatement(0, s)
assert str(source) == "def func(): raise ValueError(42)"
def XXX_test_expression_multiline():
source = """\
something
'''
'''"""
result = getstatement(1, source)
assert str(result) == "'''\n'''"
|
eenchev/idea-note-taking-app | refs/heads/master | env/lib/python2.7/site-packages/pip/_vendor/progress/counter.py | 510 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Giorgos Verigakis <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from . import Infinite, Progress
from .helpers import WriteMixin
class Counter(WriteMixin, Infinite):
message = ''
hide_cursor = True
def update(self):
self.write(str(self.index))
class Countdown(WriteMixin, Progress):
hide_cursor = True
def update(self):
self.write(str(self.remaining))
class Stack(WriteMixin, Progress):
phases = (u' ', u'▁', u'▂', u'▃', u'▄', u'▅', u'▆', u'▇', u'█')
hide_cursor = True
def update(self):
nphases = len(self.phases)
i = min(nphases - 1, int(self.progress * nphases))
self.write(self.phases[i])
class Pie(Stack):
phases = (u'○', u'◔', u'◑', u'◕', u'●')
|
JTCunning/sentry | refs/heads/master | src/sentry/migrations/0019_auto__del_field_projectmember_api_key__add_field_projectmember_public_.py | 36 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'ProjectMember.api_key'
db.delete_column('sentry_projectmember', 'api_key')
# Adding field 'ProjectMember.public_key'
db.add_column('sentry_projectmember', 'public_key', self.gf('django.db.models.fields.CharField')(max_length=32, unique=True, null=True), keep_default=False)
# Adding field 'ProjectMember.secret_key'
db.add_column('sentry_projectmember', 'secret_key', self.gf('django.db.models.fields.CharField')(max_length=32, unique=True, null=True), keep_default=False)
def backwards(self, orm):
# Adding field 'ProjectMember.api_key'
db.add_column('sentry_projectmember', 'api_key', self.gf('django.db.models.fields.CharField')(unique=True, max_length=32, null=True), keep_default=False)
# Deleting field 'ProjectMember.public_key'
db.delete_column('sentry_projectmember', 'public_key')
# Deleting field 'ProjectMember.secret_key'
db.delete_column('sentry_projectmember', 'secret_key')
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.groupedmessage': {
'Meta': {'unique_together': "(('project', 'logger', 'view', 'checksum'),)", 'object_name': 'GroupedMessage'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.message': {
'Meta': {'object_name': 'Message'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'message_set'", 'null': 'True', 'to': "orm['sentry.GroupedMessage']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.GroupedMessage']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.GroupedMessage']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'permissions': ('django.db.models.fields.BigIntegerField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'project_set'", 'to': "orm['sentry.User']"})
}
}
complete_apps = ['sentry']
|
dessHub/bc-14-online-store-application | refs/heads/master | flask/lib/python2.7/site-packages/wtforms/ext/django/fields.py | 175 | """
Useful form fields for use with the Django ORM.
"""
from __future__ import unicode_literals
import datetime
import operator
try:
from django.conf import settings
from django.utils import timezone
has_timezone = True
except ImportError:
has_timezone = False
from wtforms import fields, widgets
from wtforms.compat import string_types
from wtforms.validators import ValidationError
__all__ = (
'ModelSelectField', 'QuerySetSelectField', 'DateTimeField'
)
class QuerySetSelectField(fields.SelectFieldBase):
"""
Given a QuerySet either at initialization or inside a view, will display a
select drop-down field of choices. The `data` property actually will
store/keep an ORM model instance, not the ID. Submitting a choice which is
not in the queryset will result in a validation error.
Specify `get_label` to customize the label associated with each option. If
a string, this is the name of an attribute on the model object to use as
the label text. If a one-argument callable, this callable will be passed
model instance and expected to return the label text. Otherwise, the model
object's `__str__` or `__unicode__` will be used.
If `allow_blank` is set to `True`, then a blank choice will be added to the
top of the list. Selecting this choice will result in the `data` property
being `None`. The label for the blank choice can be set by specifying the
`blank_text` parameter.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, queryset=None, get_label=None, allow_blank=False, blank_text='', **kwargs):
super(QuerySetSelectField, self).__init__(label, validators, **kwargs)
self.allow_blank = allow_blank
self.blank_text = blank_text
self._set_data(None)
if queryset is not None:
self.queryset = queryset.all() # Make sure the queryset is fresh
if get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, string_types):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
def _get_data(self):
if self._formdata is not None:
for obj in self.queryset:
if obj.pk == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
if self.allow_blank:
yield ('__None', self.blank_text, self.data is None)
for obj in self.queryset:
yield (obj.pk, self.get_label(obj), obj == self.data)
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = int(valuelist[0])
def pre_validate(self, form):
if not self.allow_blank or self.data is not None:
for obj in self.queryset:
if self.data == obj:
break
else:
raise ValidationError(self.gettext('Not a valid choice'))
class ModelSelectField(QuerySetSelectField):
"""
Like a QuerySetSelectField, except takes a model class instead of a
queryset and lists everything in it.
"""
def __init__(self, label=None, validators=None, model=None, **kwargs):
super(ModelSelectField, self).__init__(label, validators, queryset=model._default_manager.all(), **kwargs)
class DateTimeField(fields.DateTimeField):
"""
Adds support for Django's timezone utilities.
Requires Django >= 1.5
"""
def __init__(self, *args, **kwargs):
if not has_timezone:
raise ImportError('DateTimeField requires Django >= 1.5')
super(DateTimeField, self).__init__(*args, **kwargs)
def process_formdata(self, valuelist):
super(DateTimeField, self).process_formdata(valuelist)
date = self.data
if settings.USE_TZ and date is not None and timezone.is_naive(date):
current_timezone = timezone.get_current_timezone()
self.data = timezone.make_aware(date, current_timezone)
def _value(self):
date = self.data
if settings.USE_TZ and isinstance(date, datetime.datetime) and timezone.is_aware(date):
self.data = timezone.localtime(date)
return super(DateTimeField, self)._value()
|
viewportvr/daysinvr | refs/heads/master | backend/migrations/versions/29c431c803bc_.py | 1 | """empty message
Revision ID: 29c431c803bc
Revises: 938d2342e478
Create Date: 2019-07-17 16:31:08.485954
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '29c431c803bc'
down_revision = '938d2342e478'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(None, 'classroom', ['slug'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'classroom', type_='unique')
# ### end Alembic commands ###
|
valkjsaaa/sl4a | refs/heads/master | python/src/Mac/Tools/fixapplepython23.py | 29 | #!/usr/bin/python
"""fixapplepython23 - Fix Apple-installed Python 2.3 (on Mac OS X 10.3)
Python 2.3 (and 2.3.X for X<5) have the problem that building an extension
for a framework installation may accidentally pick up the framework
of a newer Python, in stead of the one that was used to build the extension.
This script modifies the Makefile (in .../lib/python2.3/config) to use
the newer method of linking extensions with "-undefined dynamic_lookup"
which fixes this problem.
The script will first check all prerequisites, and return a zero exit
status also when nothing needs to be fixed.
"""
import sys
import os
import gestalt
MAKEFILE='/System/Library/Frameworks/Python.framework/Versions/2.3/lib/python2.3/config/Makefile'
CHANGES=((
'LDSHARED=\t$(CC) $(LDFLAGS) -bundle -framework $(PYTHONFRAMEWORK)\n',
'LDSHARED=\t$(CC) $(LDFLAGS) -bundle -undefined dynamic_lookup\n'
),(
'BLDSHARED=\t$(CC) $(LDFLAGS) -bundle -framework $(PYTHONFRAMEWORK)\n',
'BLDSHARED=\t$(CC) $(LDFLAGS) -bundle -undefined dynamic_lookup\n'
),(
'CC=\t\tgcc\n',
'CC=\t\t/System/Library/Frameworks/Python.framework/Versions/2.3/lib/python2.3/config/PantherPythonFix/run-gcc\n'
),(
'CXX=\t\tc++\n',
'CXX=\t\t/System/Library/Frameworks/Python.framework/Versions/2.3/lib/python2.3/config/PantherPythonFix/run-g++\n'
))
GCC_SCRIPT='/System/Library/Frameworks/Python.framework/Versions/2.3/lib/python2.3/config/PantherPythonFix/run-gcc'
GXX_SCRIPT='/System/Library/Frameworks/Python.framework/Versions/2.3/lib/python2.3/config/PantherPythonFix/run-g++'
SCRIPT="""#!/bin/sh
export MACOSX_DEPLOYMENT_TARGET=10.3
exec %s "${@}"
"""
def findline(lines, start):
"""return line starting with given string or -1"""
for i in range(len(lines)):
if lines[i][:len(start)] == start:
return i
return -1
def fix(makefile, do_apply):
"""Fix the Makefile, if required."""
fixed = False
lines = open(makefile).readlines()
for old, new in CHANGES:
i = findline(lines, new)
if i >= 0:
# Already fixed
continue
i = findline(lines, old)
if i < 0:
print 'fixapplepython23: Python installation not fixed (appears broken)'
print 'fixapplepython23: missing line:', old
return 2
lines[i] = new
fixed = True
if fixed:
if do_apply:
print 'fixapplepython23: Fix to Apple-installed Python 2.3 applied'
os.rename(makefile, makefile + '~')
open(makefile, 'w').writelines(lines)
return 0
else:
print 'fixapplepython23: Fix to Apple-installed Python 2.3 should be applied'
return 1
else:
print 'fixapplepython23: No fix needed, appears to have been applied before'
return 0
def makescript(filename, compiler):
"""Create a wrapper script for a compiler"""
dirname = os.path.split(filename)[0]
if not os.access(dirname, os.X_OK):
os.mkdir(dirname, 0755)
fp = open(filename, 'w')
fp.write(SCRIPT % compiler)
fp.close()
os.chmod(filename, 0755)
print 'fixapplepython23: Created', filename
def main():
# Check for -n option
if len(sys.argv) > 1 and sys.argv[1] == '-n':
do_apply = False
else:
do_apply = True
# First check OS version
if sys.byteorder == 'little':
# All intel macs are fine
print "fixapplypython23: no fix is needed on MacOSX on Intel"
sys.exit(0)
if gestalt.gestalt('sysv') < 0x1030:
print 'fixapplepython23: no fix needed on MacOSX < 10.3'
sys.exit(0)
if gestalt.gestalt('sysv') >= 0x1040:
print 'fixapplepython23: no fix needed on MacOSX >= 10.4'
sys.exit(0)
# Test that a framework Python is indeed installed
if not os.path.exists(MAKEFILE):
print 'fixapplepython23: Python framework does not appear to be installed (?), nothing fixed'
sys.exit(0)
# Check that we can actually write the file
if do_apply and not os.access(MAKEFILE, os.W_OK):
print 'fixapplepython23: No write permission, please run with "sudo"'
sys.exit(2)
# Create the shell scripts
if do_apply:
if not os.access(GCC_SCRIPT, os.X_OK):
makescript(GCC_SCRIPT, "gcc")
if not os.access(GXX_SCRIPT, os.X_OK):
makescript(GXX_SCRIPT, "g++")
# Finally fix the makefile
rv = fix(MAKEFILE, do_apply)
#sys.exit(rv)
sys.exit(0)
if __name__ == '__main__':
main()
|
noellak/ace-iphone | refs/heads/master | tools/python/linphone-daemon.py | 32 | import argparse
import logging
import sys
import threading
import time
import linphone
class Response:
Ok = 0
Error = 1
def __init__(self, status, msg = ''):
self.status = status
if status == Response.Ok:
self.body = msg
self.reason = None
else:
self.body = None
self.reason = msg
def __str__(self):
status_str = ["Ok", "Error"][self.status]
body = ''
if self.reason:
body += "Reason: {reason}\n".format(reason=self.reason)
if self.body:
body += '\n' + self.body + '\n'
return \
"""Status: {status}
{body}""".format(status=status_str, body=body)
class RegisterStatusResponse(Response):
def __init__(self):
Response.__init__(self, Response.Ok)
def append(self, id, proxy_cfg):
self.body += \
"""Id: {id}
State: {state}
""".format(id=id, state=str(linphone.RegistrationState.string(proxy_cfg.state)))
class CommandExample:
def __init__(self, command, output):
self.command = command
self.output = output
def __str__(self):
return \
"""> {command}
{output}""".format(command=self.command, output=self.output)
class Command:
def __init__(self, name, proto):
self.name = name
self.proto = proto
self.examples = []
def exec_command(self, app, args):
pass
def add_example(self, example):
self.examples.append(example)
def help(self):
body = \
"""{proto}
Description:
{description}
""".format(proto=self.proto, description=self.__doc__)
idx = 0
for example in self.examples:
idx += 1
body += \
"""
Example {idx}:
{example}
""".format(idx=idx, example=str(example))
return body
class CallCommand(Command):
"""Place a call."""
def __init__(self):
Command.__init__(self, "call", "call <sip-address>")
self.add_example(CommandExample(
"call [email protected]",
"Status: Ok\n\nId: 1"
))
self.add_example(CommandExample(
"call [email protected]",
"Status: Error\nReason: Call creation failed."
))
def exec_command(self, app, args):
if len(args) >= 1:
call = app.core.invite(args[0])
if call is None:
app.send_response(Response(Response.Error, "Call creation failed."))
else:
id = app.update_call_id(call)
app.send_response(Response(Response.Ok, "Id: " + str(id)))
else:
app.send_response(Response(Response.Error, "Missing parameter."))
class CallPauseCommand(Command):
"""Pause a call (pause current if no id is specified)."""
def __init__(self):
Command.__init__(self, "call-pause", "call-pause [call-id]")
self.add_example(CommandExample(
"call-pause 1",
"Status: Ok\n\nCall was paused"
))
self.add_example(CommandExample(
"call-pause 2",
"Status: Error\nReason: No call with such id."
))
self.add_example(CommandExample(
"call-pause",
"Status: Error\nReason: No current call available."
))
def exec_command(self, app, args):
current = False
if len(args) >= 1:
call = app.find_call(args[0])
if call is None:
app.send_response(Response(Response.Error, "No call with such id."))
return
else:
current = True
call = app.core.current_call
if call is None:
app.send_response(Response(Response.Error, "No current call available."))
return
if app.core.pause_call(call) == 0:
msg = "Call was paused."
if current:
msg = "Current call was paused."
app.send_response(Response(Response.Ok, msg))
else:
app.send_response(Response(Response.Error, "Error pausing call."))
class CallResumeCommand(Command):
"""Resume a call (resume current if no id is specified)."""
def __init__(self):
Command.__init__(self, "call-resume", "call-resume [call-id]")
self.add_example(CommandExample(
"call-resume 1",
"Status: Ok\n\nCall was resumed"
))
self.add_example(CommandExample(
"call-resume 2",
"Status: Error\nReason: No call with such id."
))
self.add_example(CommandExample(
"call-resume",
"Status: Error\nReason: No current call available."
))
def exec_command(self, app, args):
current = False
if len(args) >= 1:
call = app.find_call(args[0])
if call is None:
app.send_response(Response(Response.Error, "No call with such id."))
return
else:
current = True
call = app.core.current_call
if call is None:
app.send_response(Response(Response.Error, "No current call available."))
return
if app.core.resume_call(call) == 0:
msg = "Call was resumed."
if current:
msg = "Current call was resumed."
app.send_response(Response(Response.Ok, msg))
else:
app.send_response(Response(Response.Error, "Error resuming call."))
class CallStatusCommand(Command):
"""Return status of the specified call or of the current call if no id is given."""
def __init__(self):
Command.__init__(self, "call-status", "call-status [call-id]")
self.add_example(CommandExample(
"call-status 1",
"Status: Ok\n\nState: LinphoneCallStreamsRunning\nFrom: <sip:[email protected]>\nDirection: out\nDuration: 6"
))
self.add_example(CommandExample(
"call-status 2",
"Status: Error\nReason: No call with such id."
))
self.add_example(CommandExample(
"call-status",
"Status: Error\nReason: No current call available."
))
def exec_command(self, app, args):
if len(args) >= 1:
call = app.find_call(args[0])
if call is None:
app.send_response(Response(Response.Error, "No call with such id."))
return
else:
call = app.core.current_call
if call is None:
app.send_response(Response(Response.Error, "No current call available."))
return
state = call.state
body = "State: {state}".format(state=linphone.CallState.string(state))
if state == linphone.CallState.CallOutgoingInit \
or state == linphone.CallState.CallOutgoingProgress \
or state == linphone.CallState.CallOutgoingRinging \
or state == linphone.CallState.CallPaused \
or state == linphone.CallState.CallStreamsRunning \
or state == linphone.CallState.CallConnected \
or state == linphone.CallState.CallIncomingReceived:
body += "\nFrom: {address}".format(address=call.remote_address.as_string())
if state == linphone.CallState.CallStreamsRunning \
or state == linphone.CallState.CallConnected:
direction_str = 'in'
if call.dir == linphone.CallDir.CallOutgoing:
direction_str = 'out'
body += "\nDirection: {direction}\nDuration: {duration}".format(direction=direction_str, duration=call.duration)
app.send_response(Response(Response.Ok, body))
class HelpCommand(Command):
"""Show <command> help notice, if command is unspecified or inexistent show all commands."""
def __init__(self):
Command.__init__(self, "help", "help <command>")
def exec_command(self, app, args):
body = ''
if args:
command = [item for item in app.commands if item.name == args[0]]
if command:
body = command[0].help()
else:
app.send_response(Response(Response.Error, "Unknown command '{command}'.".format(command=args[0])))
return
else:
for command in app.commands:
body += command.proto + '\n'
app.send_response(Response(Response.Ok, body))
class QuitCommand(Command):
"""Quit the application."""
def __init__(self):
Command.__init__(self, "quit", "quit")
def exec_command(self, app, args):
app.quit()
app.send_response(Response(Response.Ok))
class RegisterCommand(Command):
"""Register the daemon to a SIP proxy. If one of the parameters <password>, <userid> and <realm> is not needed, send the string "NULL"."""
def __init__(self):
Command.__init__(self, "register", "register <identity> <proxy-address> [password] [userid] [realm] [contact-parameters]")
self.add_example(CommandExample(
"register sip:[email protected] sip.linphone.org password bob linphone.org",
"Status: Ok\n\nId: 1"
))
def exec_command(self, app, args):
if len(args) >= 2:
password = None
userid = None
realm = None
contact_parameters = None
identity = args[0]
proxy = args[1]
if len(args) > 2 and args[2] != "NULL":
password = args[2]
if len(args) > 3 and args[3] != "NULL":
userid = args[3]
if len(args) > 4 and args[4] != "NULL":
realm = args[4]
if len(args) > 5 and args[5] != "NULL":
contact_parameters = args[5]
proxy_cfg = app.core.create_proxy_config()
if password is not None:
addr = linphone.Address.new(identity)
if addr is not None:
info = linphone.AuthInfo.new(addr.username, userid, password, None, realm, None)
app.core.add_auth_info(info)
print(info)
proxy_cfg.identity = identity
proxy_cfg.server_addr = proxy
proxy_cfg.register_enabled = True
proxy_cfg.contact_parameters = contact_parameters
app.core.add_proxy_config(proxy_cfg)
id = app.update_proxy_id(proxy_cfg)
app.send_response(Response(Response.Ok, "Id: " + str(id)))
else:
app.send_response(Response(Response.Error, "Missing/Incorrect parameter(s)."))
class RegisterStatusCommand(Command):
"""Return status of a registration or of all registrations."""
def __init__(self):
Command.__init__(self, "register-status", "register-status <register_id|ALL>")
self.add_example(CommandExample(
"register-status 1",
"Status: Ok\n\nId: 1\nState: LinphoneRegistrationOk"
))
self.add_example(CommandExample(
"register-status ALL",
"Status: Ok\n\nId: 1\nState: LinphoneRegistrationOk\n\nId: 2\nState: LinphoneRegistrationFailed"
))
self.add_example(CommandExample(
"register-status 3",
"Status: Error\nReason: No register with such id."
))
def exec_command(self, app, args):
if len(args) == 0:
app.send_response(Response(Response.Error, "Missing parameter."))
else:
id = args[0]
if id == "ALL":
response = RegisterStatusResponse()
for id in app.proxy_ids_map:
response.append(id, app.proxy_ids_map[id])
app.send_response(response)
else:
proxy_cfg = app.find_proxy(id)
if proxy_cfg is None:
app.send_response(Response(Response.Error, "No register with such id."))
else:
app.send_response(RegisterStatusResponse().append(id, proxy_cfg))
class TerminateCommand(Command):
"""Terminate the specified call or the current call if no id is given."""
def __init__(self):
Command.__init__(self, "terminate", "terminate [call id]")
self.add_example(CommandExample(
"terminate 2",
"Status: Error\nReason: No call with such id."
))
self.add_example(CommandExample(
"terminate 1",
"Status: Ok\n"
))
self.add_example(CommandExample(
"terminate",
"Status: Ok\n"
))
self.add_example(CommandExample(
"terminate",
"Status: Error\nReason: No active call."
))
def exec_command(self, app, args):
if len(args) >= 1:
call = app.find_call(args[0])
if call is None:
app.send_response(Response(Response.Error, "No call with such id."))
return
else:
call = app.core.current_call
if call is None:
app.send_response(Response(Response.Error, "No active call."))
return
app.core.terminate_call(call)
app.send_response(Response(Response.Ok))
class Daemon:
def __init__(self):
self.quitting = False
self._next_proxy_id = 1
self.proxy_ids_map = {}
self._next_call_id = 1
self.call_ids_map = {}
self.command_mutex = threading.Lock()
self.command_executed_event = threading.Event()
self.command_to_execute = None
self.commands = [
CallCommand(),
CallPauseCommand(),
CallResumeCommand(),
CallStatusCommand(),
HelpCommand(),
QuitCommand(),
RegisterCommand(),
RegisterStatusCommand(),
TerminateCommand()
]
def global_state_changed(self, core, state, message):
logging.warning("[PYTHON] global_state_changed: " + str(state) + ", " + message)
if state == linphone.GlobalState.GlobalOn:
logging.warning("[PYTHON] core version: " + str(core.version))
def registration_state_changed(self, core, proxy_cfg, state, message):
logging.warning("[PYTHON] registration_state_changed: " + str(state) + ", " + message)
def call_state_changed(self, core, call, state, message):
logging.warning("[PYTHON] call_state_changed: " + str(state) + ", " + message)
def send_response(self, response):
print(response)
def exec_command(self, command_line):
splitted_command_line = command_line.split()
name = splitted_command_line[0]
args = splitted_command_line[1:]
command = [item for item in self.commands if item.name == name]
if command:
command[0].exec_command(self, args)
else:
self.send_response(Response(Response.Error, "Unknown command."))
def interact(self):
command_line = raw_input('> ').strip()
if command_line != '':
self.command_mutex.acquire()
self.command_to_execute = command_line
self.command_mutex.release()
self.command_executed_event.wait()
self.command_executed_event.clear()
def run(self, args):
def command_read(daemon):
while not daemon.quitting:
daemon.interact()
callbacks = {
'global_state_changed':self.global_state_changed,
'registration_state_changed':self.registration_state_changed,
'call_state_changed':self.call_state_changed
}
# Create a linphone core and iterate every 20 ms
self.core = linphone.Core.new(callbacks, args.config, args.factory_config)
t = threading.Thread(target=command_read, kwargs={'daemon':self})
t.start()
while not self.quitting:
self.command_mutex.acquire()
command_line = self.command_to_execute
if command_line is not None:
self.exec_command(command_line)
self.command_to_execute = None
self.command_executed_event.set()
self.command_mutex.release()
self.core.iterate()
time.sleep(0.02)
t.join()
def quit(self):
self.quitting = True
def update_proxy_id(self, proxy):
id = self._next_proxy_id
self.proxy_ids_map[str(id)] = proxy
self._next_proxy_id += 1
return id
def find_proxy(self, id):
if self.proxy_ids_map.has_key(id):
return self.proxy_ids_map[id]
return None
def update_call_id(self, call):
id = self._next_call_id
self.call_ids_map[str(id)] = call
self._next_call_id += 1
return id
def find_call(self, id):
if self.call_ids_map.has_key(id):
return self.call_ids_map[id]
return None
def setup_log_colors():
logging.addLevelName(logging.DEBUG, "\033[1;37m%s\033[1;0m" % logging.getLevelName(logging.DEBUG))
logging.addLevelName(logging.INFO, "\033[1;36m%s\033[1;0m" % logging.getLevelName(logging.INFO))
logging.addLevelName(logging.WARNING, "\033[1;31m%s\033[1;0m" % logging.getLevelName(logging.WARNING))
logging.addLevelName(logging.ERROR, "\033[1;41m%s\033[1;0m" % logging.getLevelName(logging.ERROR))
def setup_log(log, trace):
if log is None:
setup_log_colors()
format = "%(asctime)s.%(msecs)03d %(levelname)s: %(message)s"
datefmt = "%H:%M:%S"
if trace:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(filename=log, level=level, format=format, datefmt=datefmt)
# Define the linphone module log handler
def log_handler(level, msg):
method = getattr(logging, level)
if not msg.strip().startswith('[PYLINPHONE]'):
msg = '[CORE] ' + msg
method(msg)
def main(argv = None):
if argv is None:
argv = sys.argv
argparser = argparse.ArgumentParser(description="Linphone console interface in Python.")
argparser.add_argument('--config', default=None, help="Path to the linphonerc configuration file to use.")
argparser.add_argument('--factory_config', default=None, help="Path to the linphonerc factory configuration file to use.")
argparser.add_argument('--log', default=None, help="Path to the file used for logging (default is the standard output).")
argparser.add_argument('--trace', action='store_true', help="Output linphone Python module tracing logs (for debug purposes).")
args = argparser.parse_args()
setup_log(args.log, args.trace)
linphone.set_log_handler(log_handler)
d = Daemon()
d.run(args)
if __name__ == "__main__":
sys.exit(main())
|
CydarLtd/ansible | refs/heads/devel | lib/ansible/modules/remote_management/wakeonlan.py | 60 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Dag Wieers <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: wakeonlan
version_added: '2.2'
short_description: Send a magic Wake-on-LAN (WoL) broadcast packet
description:
- The C(wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets.
options:
mac:
description:
- MAC address to send Wake-on-LAN broadcast packet for.
required: true
broadcast:
description:
- Network broadcast address to use for broadcasting magic Wake-on-LAN packet.
default: 255.255.255.255
port:
description:
- UDP port to use for magic Wake-on-LAN packet.
default: 7
author: "Dag Wieers (@dagwieers)"
todo:
- Add arping support to check whether the system is up (before and after)
- Enable check-mode support (when we have arping support)
- Does not have SecureOn password support
notes:
- This module sends a magic packet, without knowing whether it worked
- Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS)
- Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first) when turned off
'''
EXAMPLES = '''
- name: Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66
wakeonlan:
mac: '00:00:5E:00:53:66'
broadcast: 192.0.2.23
delegate_to: localhost
- wakeonlan:
mac: 00:00:5E:00:53:66
port: 9
delegate_to: localhost
'''
RETURN='''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
import socket
import struct
def wakeonlan(module, mac, broadcast, port):
""" Send a magic Wake-on-LAN packet. """
mac_orig = mac
# Remove possible separator from MAC address
if len(mac) == 12 + 5:
mac = mac.replace(mac[2], '')
# If we don't end up with 12 hexadecimal characters, fail
if len(mac) != 12:
module.fail_json(msg="Incorrect MAC address length: %s" % mac_orig)
# Test if it converts to an integer, otherwise fail
try:
int(mac, 16)
except ValueError:
module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig)
# Create payload for magic packet
data = ''
padding = ''.join(['FFFFFFFFFFFF', mac * 20])
for i in range(0, len(padding), 2):
data = ''.join([data, struct.pack('B', int(padding[i: i + 2], 16))])
# Broadcast payload to network
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
try:
sock.sendto(data, (broadcast, port))
except socket.error:
e = get_exception()
sock.close()
module.fail_json(msg=str(e))
sock.close()
def main():
module = AnsibleModule(
argument_spec = dict(
mac = dict(type='str', required=True),
broadcast = dict(type='str', default='255.255.255.255'),
port = dict(type='int', default=7),
),
supports_check_mode = True,
)
mac = module.params['mac']
broadcast = module.params['broadcast']
port = module.params['port']
if not module.check_mode:
wakeonlan(module, mac, broadcast, port)
module.exit_json(changed=True)
if __name__ == '__main__':
main()
|
pgmillon/ansible | refs/heads/devel | lib/ansible/module_utils/common/process.py | 73 | # Copyright (c) 2018, Ansible Project
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.module_utils.common.file import is_executable
def get_bin_path(arg, required=False, opt_dirs=None):
'''
find system executable in PATH.
Optional arguments:
- required: if executable is not found and required is true it produces an Exception
- opt_dirs: optional list of directories to search in addition to PATH
if found return full path; otherwise return None
'''
opt_dirs = [] if opt_dirs is None else opt_dirs
sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
paths = []
for d in opt_dirs:
if d is not None and os.path.exists(d):
paths.append(d)
paths += os.environ.get('PATH', '').split(os.pathsep)
bin_path = None
# mangle PATH to include /sbin dirs
for p in sbin_paths:
if p not in paths and os.path.exists(p):
paths.append(p)
for d in paths:
if not d:
continue
path = os.path.join(d, arg)
if os.path.exists(path) and not os.path.isdir(path) and is_executable(path):
bin_path = path
break
if required and bin_path is None:
raise ValueError('Failed to find required executable %s in paths: %s' % (arg, os.pathsep.join(paths)))
return bin_path
|
xhqu1981/pymatgen | refs/heads/master | pymatgen/analysis/tests/test_ewald.py | 3 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest
import os
import warnings
from pymatgen.analysis.ewald import EwaldSummation, EwaldMinimizer
from pymatgen.io.vasp.inputs import Poscar
import numpy as np
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class EwaldSummationTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'POSCAR')
p = Poscar.from_file(filepath)
original_s = p.structure
s = original_s.copy()
s.add_oxidation_state_by_element({"Li": 1, "Fe": 2,
"P": 5, "O": -2})
ham = EwaldSummation(s, compute_forces=True)
self.assertAlmostEqual(ham.real_space_energy, -502.23549897772602, 4)
self.assertAlmostEqual(ham.reciprocal_space_energy, 6.1541071599534654, 4)
self.assertAlmostEqual(ham.point_energy, -620.22598358035918, 4)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertAlmostEqual(ham.total_energy, -1116.30737539811, 2)
self.assertAlmostEqual(ham.forces[0, 0], -1.98818620e-01, 4)
self.assertAlmostEqual(sum(sum(abs(ham.forces))), 915.925354346, 4,
"Forces incorrect")
self.assertAlmostEqual(sum(sum(ham.real_space_energy_matrix)),
ham.real_space_energy, 4)
self.assertAlmostEqual(sum(sum(ham.reciprocal_space_energy_matrix)),
ham.reciprocal_space_energy, 4)
self.assertAlmostEqual(sum(ham.point_energy_matrix), ham.point_energy,
4)
self.assertAlmostEqual(sum(sum(ham.total_energy_matrix)),
ham.total_energy, 2)
# note that forces are not individually tested, but should work fine.
self.assertRaises(ValueError, EwaldSummation, original_s)
# try sites with charge.
charges = []
for site in original_s:
if site.specie.symbol == "Li":
charges.append(1)
elif site.specie.symbol == "Fe":
charges.append(2)
elif site.specie.symbol == "P":
charges.append(5)
else:
charges.append(-2)
original_s.add_site_property('charge', charges)
ham2 = EwaldSummation(original_s)
self.assertAlmostEqual(ham2.real_space_energy, -502.23549897772602, 4)
class EwaldMinimizerTest(unittest.TestCase):
def test_init(self):
matrix = np.array([[-3., 3., 4., -0., 3., 3., 1., 14., 9., -4.],
[1., -3., -3., 12., -4., -1., 5., 11., 1., 12.],
[14., 7., 13., 15., 13., 5., -5., 10., 14., -2.],
[9., 13., 4., 1., 3., -4., 7., 0., 6., -4.],
[4., -4., 6., 1., 12., -4., -2., 13., 0., 6.],
[13., 7., -4., 12., -2., 9., 8., -5., 3., 1.],
[8., 1., 10., -4., -2., 4., 13., 12., -3., 13.],
[2., 11., 8., 1., -1., 5., -3., 4., 5., 0.],
[-0., 14., 4., 3., -1., -5., 7., -1., -1., 3.],
[2., -2., 10., 1., 6., -5., -3., 12., 0., 13.]])
m_list = [[.9, 4, [1, 2, 3, 4, 8], 'a'], [-1, 2, [5, 6, 7], 'b']]
e_min = EwaldMinimizer(matrix, m_list, 50)
self.assertEqual(len(e_min.output_lists), 15,
"Wrong number of permutations returned")
self.assertAlmostEqual(e_min.minimized_sum, 111.63, 3,
"Returned wrong minimum value")
self.assertEqual(len(e_min.best_m_list), 6,
"Returned wrong number of permutations")
if __name__ == "__main__":
unittest.main()
|
seem-sky/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/unittest/test/test_assertions.py | 82 | import datetime
import warnings
import weakref
import unittest
from itertools import product
class Test_Assertions(unittest.TestCase):
def test_AlmostEqual(self):
self.assertAlmostEqual(1.00000001, 1.0)
self.assertNotAlmostEqual(1.0000001, 1.0)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 1.0000001, 1.0)
self.assertRaises(self.failureException,
self.assertNotAlmostEqual, 1.00000001, 1.0)
self.assertAlmostEqual(1.1, 1.0, places=0)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 1.1, 1.0, places=1)
self.assertAlmostEqual(0, .1+.1j, places=0)
self.assertNotAlmostEqual(0, .1+.1j, places=1)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 0, .1+.1j, places=1)
self.assertRaises(self.failureException,
self.assertNotAlmostEqual, 0, .1+.1j, places=0)
self.assertAlmostEqual(float('inf'), float('inf'))
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
float('inf'), float('inf'))
def test_AmostEqualWithDelta(self):
self.assertAlmostEqual(1.1, 1.0, delta=0.5)
self.assertAlmostEqual(1.0, 1.1, delta=0.5)
self.assertNotAlmostEqual(1.1, 1.0, delta=0.05)
self.assertNotAlmostEqual(1.0, 1.1, delta=0.05)
self.assertAlmostEqual(1.0, 1.0, delta=0.5)
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
1.0, 1.0, delta=0.5)
self.assertRaises(self.failureException, self.assertAlmostEqual,
1.1, 1.0, delta=0.05)
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
1.1, 1.0, delta=0.5)
self.assertRaises(TypeError, self.assertAlmostEqual,
1.1, 1.0, places=2, delta=2)
self.assertRaises(TypeError, self.assertNotAlmostEqual,
1.1, 1.0, places=2, delta=2)
first = datetime.datetime.now()
second = first + datetime.timedelta(seconds=10)
self.assertAlmostEqual(first, second,
delta=datetime.timedelta(seconds=20))
self.assertNotAlmostEqual(first, second,
delta=datetime.timedelta(seconds=5))
def test_assertRaises(self):
def _raise(e):
raise e
self.assertRaises(KeyError, _raise, KeyError)
self.assertRaises(KeyError, _raise, KeyError("key"))
try:
self.assertRaises(KeyError, lambda: None)
except self.failureException as e:
self.assertIn("KeyError not raised", str(e))
else:
self.fail("assertRaises() didn't fail")
try:
self.assertRaises(KeyError, _raise, ValueError)
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
with self.assertRaises(KeyError) as cm:
try:
raise KeyError
except Exception as e:
exc = e
raise
self.assertIs(cm.exception, exc)
with self.assertRaises(KeyError):
raise KeyError("key")
try:
with self.assertRaises(KeyError):
pass
except self.failureException as e:
self.assertIn("KeyError not raised", str(e))
else:
self.fail("assertRaises() didn't fail")
try:
with self.assertRaises(KeyError):
raise ValueError
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
def test_assertRaises_frames_survival(self):
# Issue #9815: assertRaises should avoid keeping local variables
# in a traceback alive.
class A:
pass
wr = None
class Foo(unittest.TestCase):
def foo(self):
nonlocal wr
a = A()
wr = weakref.ref(a)
try:
raise IOError
except IOError:
raise ValueError
def test_functional(self):
self.assertRaises(ValueError, self.foo)
def test_with(self):
with self.assertRaises(ValueError):
self.foo()
Foo("test_functional").run()
self.assertIsNone(wr())
Foo("test_with").run()
self.assertIsNone(wr())
def testAssertNotRegex(self):
self.assertNotRegex('Ala ma kota', r'r+')
try:
self.assertNotRegex('Ala ma kota', r'k.t', 'Message')
except self.failureException as e:
self.assertIn("'kot'", e.args[0])
self.assertIn('Message', e.args[0])
else:
self.fail('assertNotRegex should have failed.')
class TestLongMessage(unittest.TestCase):
"""Test that the individual asserts honour longMessage.
This actually tests all the message behaviour for
asserts that use longMessage."""
def setUp(self):
class TestableTestFalse(unittest.TestCase):
longMessage = False
failureException = self.failureException
def testTest(self):
pass
class TestableTestTrue(unittest.TestCase):
longMessage = True
failureException = self.failureException
def testTest(self):
pass
self.testableTrue = TestableTestTrue('testTest')
self.testableFalse = TestableTestFalse('testTest')
def testDefault(self):
self.assertTrue(unittest.TestCase.longMessage)
def test_formatMsg(self):
self.assertEqual(self.testableFalse._formatMessage(None, "foo"), "foo")
self.assertEqual(self.testableFalse._formatMessage("foo", "bar"), "foo")
self.assertEqual(self.testableTrue._formatMessage(None, "foo"), "foo")
self.assertEqual(self.testableTrue._formatMessage("foo", "bar"), "bar : foo")
# This blows up if _formatMessage uses string concatenation
self.testableTrue._formatMessage(object(), 'foo')
def test_formatMessage_unicode_error(self):
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing msg
self.testableTrue._formatMessage(one, '\uFFFD')
def assertMessages(self, methodName, args, errors):
"""
Check that methodName(*args) raises the correct error messages.
errors should be a list of 4 regex that match the error when:
1) longMessage = False and no msg passed;
2) longMessage = False and msg passed;
3) longMessage = True and no msg passed;
4) longMessage = True and msg passed;
"""
def getMethod(i):
useTestableFalse = i < 2
if useTestableFalse:
test = self.testableFalse
else:
test = self.testableTrue
return getattr(test, methodName)
for i, expected_regex in enumerate(errors):
testMethod = getMethod(i)
kwargs = {}
withMsg = i % 2
if withMsg:
kwargs = {"msg": "oops"}
with self.assertRaisesRegex(self.failureException,
expected_regex=expected_regex):
testMethod(*args, **kwargs)
def testAssertTrue(self):
self.assertMessages('assertTrue', (False,),
["^False is not true$", "^oops$", "^False is not true$",
"^False is not true : oops$"])
def testAssertFalse(self):
self.assertMessages('assertFalse', (True,),
["^True is not false$", "^oops$", "^True is not false$",
"^True is not false : oops$"])
def testNotEqual(self):
self.assertMessages('assertNotEqual', (1, 1),
["^1 == 1$", "^oops$", "^1 == 1$",
"^1 == 1 : oops$"])
def testAlmostEqual(self):
self.assertMessages('assertAlmostEqual', (1, 2),
["^1 != 2 within 7 places$", "^oops$",
"^1 != 2 within 7 places$", "^1 != 2 within 7 places : oops$"])
def testNotAlmostEqual(self):
self.assertMessages('assertNotAlmostEqual', (1, 1),
["^1 == 1 within 7 places$", "^oops$",
"^1 == 1 within 7 places$", "^1 == 1 within 7 places : oops$"])
def test_baseAssertEqual(self):
self.assertMessages('_baseAssertEqual', (1, 2),
["^1 != 2$", "^oops$", "^1 != 2$", "^1 != 2 : oops$"])
def testAssertSequenceEqual(self):
# Error messages are multiline so not testing on full message
# assertTupleEqual and assertListEqual delegate to this method
self.assertMessages('assertSequenceEqual', ([], [None]),
["\+ \[None\]$", "^oops$", r"\+ \[None\]$",
r"\+ \[None\] : oops$"])
def testAssertSetEqual(self):
self.assertMessages('assertSetEqual', (set(), set([None])),
["None$", "^oops$", "None$",
"None : oops$"])
def testAssertIn(self):
self.assertMessages('assertIn', (None, []),
['^None not found in \[\]$', "^oops$",
'^None not found in \[\]$',
'^None not found in \[\] : oops$'])
def testAssertNotIn(self):
self.assertMessages('assertNotIn', (None, [None]),
['^None unexpectedly found in \[None\]$', "^oops$",
'^None unexpectedly found in \[None\]$',
'^None unexpectedly found in \[None\] : oops$'])
def testAssertDictEqual(self):
self.assertMessages('assertDictEqual', ({}, {'key': 'value'}),
[r"\+ \{'key': 'value'\}$", "^oops$",
"\+ \{'key': 'value'\}$",
"\+ \{'key': 'value'\} : oops$"])
def testAssertDictContainsSubset(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertMessages('assertDictContainsSubset', ({'key': 'value'}, {}),
["^Missing: 'key'$", "^oops$",
"^Missing: 'key'$",
"^Missing: 'key' : oops$"])
def testAssertMultiLineEqual(self):
self.assertMessages('assertMultiLineEqual', ("", "foo"),
[r"\+ foo$", "^oops$",
r"\+ foo$",
r"\+ foo : oops$"])
def testAssertLess(self):
self.assertMessages('assertLess', (2, 1),
["^2 not less than 1$", "^oops$",
"^2 not less than 1$", "^2 not less than 1 : oops$"])
def testAssertLessEqual(self):
self.assertMessages('assertLessEqual', (2, 1),
["^2 not less than or equal to 1$", "^oops$",
"^2 not less than or equal to 1$",
"^2 not less than or equal to 1 : oops$"])
def testAssertGreater(self):
self.assertMessages('assertGreater', (1, 2),
["^1 not greater than 2$", "^oops$",
"^1 not greater than 2$",
"^1 not greater than 2 : oops$"])
def testAssertGreaterEqual(self):
self.assertMessages('assertGreaterEqual', (1, 2),
["^1 not greater than or equal to 2$", "^oops$",
"^1 not greater than or equal to 2$",
"^1 not greater than or equal to 2 : oops$"])
def testAssertIsNone(self):
self.assertMessages('assertIsNone', ('not None',),
["^'not None' is not None$", "^oops$",
"^'not None' is not None$",
"^'not None' is not None : oops$"])
def testAssertIsNotNone(self):
self.assertMessages('assertIsNotNone', (None,),
["^unexpectedly None$", "^oops$",
"^unexpectedly None$",
"^unexpectedly None : oops$"])
def testAssertIs(self):
self.assertMessages('assertIs', (None, 'foo'),
["^None is not 'foo'$", "^oops$",
"^None is not 'foo'$",
"^None is not 'foo' : oops$"])
def testAssertIsNot(self):
self.assertMessages('assertIsNot', (None, None),
["^unexpectedly identical: None$", "^oops$",
"^unexpectedly identical: None$",
"^unexpectedly identical: None : oops$"])
def assertMessagesCM(self, methodName, args, func, errors):
"""
Check that the correct error messages are raised while executing:
with method(*args):
func()
*errors* should be a list of 4 regex that match the error when:
1) longMessage = False and no msg passed;
2) longMessage = False and msg passed;
3) longMessage = True and no msg passed;
4) longMessage = True and msg passed;
"""
p = product((self.testableFalse, self.testableTrue),
({}, {"msg": "oops"}))
for (cls, kwargs), err in zip(p, errors):
method = getattr(cls, methodName)
with self.assertRaisesRegex(cls.failureException, err):
with method(*args, **kwargs) as cm:
func()
def testAssertRaises(self):
self.assertMessagesCM('assertRaises', (TypeError,), lambda: None,
['^TypeError not raised$', '^oops$',
'^TypeError not raised$',
'^TypeError not raised : oops$'])
def testAssertRaisesRegex(self):
# test error not raised
self.assertMessagesCM('assertRaisesRegex', (TypeError, 'unused regex'),
lambda: None,
['^TypeError not raised$', '^oops$',
'^TypeError not raised$',
'^TypeError not raised : oops$'])
# test error raised but with wrong message
def raise_wrong_message():
raise TypeError('foo')
self.assertMessagesCM('assertRaisesRegex', (TypeError, 'regex'),
raise_wrong_message,
['^"regex" does not match "foo"$', '^oops$',
'^"regex" does not match "foo"$',
'^"regex" does not match "foo" : oops$'])
def testAssertWarns(self):
self.assertMessagesCM('assertWarns', (UserWarning,), lambda: None,
['^UserWarning not triggered$', '^oops$',
'^UserWarning not triggered$',
'^UserWarning not triggered : oops$'])
def testAssertWarnsRegex(self):
# test error not raised
self.assertMessagesCM('assertWarnsRegex', (UserWarning, 'unused regex'),
lambda: None,
['^UserWarning not triggered$', '^oops$',
'^UserWarning not triggered$',
'^UserWarning not triggered : oops$'])
# test warning raised but with wrong message
def raise_wrong_message():
warnings.warn('foo')
self.assertMessagesCM('assertWarnsRegex', (UserWarning, 'regex'),
raise_wrong_message,
['^"regex" does not match "foo"$', '^oops$',
'^"regex" does not match "foo"$',
'^"regex" does not match "foo" : oops$'])
if __name__ == "__main__":
unittest.main()
|
goliveirab/odoo | refs/heads/8.0 | openerp/addons/test_convert/tests/test_convert.py | 382 | import collections
import unittest2
from lxml import etree as ET
from lxml.builder import E
from openerp.tests import common
from openerp.tools.convert import _eval_xml
Field = E.field
Value = E.value
class TestEvalXML(common.TransactionCase):
def eval_xml(self, node, obj=None, idref=None):
return _eval_xml(obj, node, pool=None, cr=self.cr, uid=self.uid,
idref=idref, context=None)
def test_char(self):
self.assertEqual(
self.eval_xml(Field("foo")),
"foo")
self.assertEqual(
self.eval_xml(Field("None")),
"None")
def test_int(self):
self.assertIsNone(
self.eval_xml(Field("None", type='int')),
"what the fuck?")
self.assertEqual(
self.eval_xml(Field(" 42 ", type="int")),
42)
with self.assertRaises(ValueError):
self.eval_xml(Field("4.82", type="int"))
with self.assertRaises(ValueError):
self.eval_xml(Field("Whelp", type="int"))
def test_float(self):
self.assertEqual(
self.eval_xml(Field("4.78", type="float")),
4.78)
with self.assertRaises(ValueError):
self.eval_xml(Field("None", type="float"))
with self.assertRaises(ValueError):
self.eval_xml(Field("Foo", type="float"))
def test_list(self):
self.assertEqual(
self.eval_xml(Field(type="list")),
[])
self.assertEqual(
self.eval_xml(Field(
Value("foo"),
Value("5", type="int"),
Value("4.76", type="float"),
Value("None", type="int"),
type="list"
)),
["foo", 5, 4.76, None])
def test_file(self):
Obj = collections.namedtuple('Obj', 'module')
obj = Obj('test_convert')
self.assertEqual(
self.eval_xml(Field('test_file.txt', type='file'), obj),
'test_convert,test_file.txt')
with self.assertRaises(IOError):
self.eval_xml(Field('test_nofile.txt', type='file'), obj)
@unittest2.skip("not tested")
def test_xml(self):
pass
@unittest2.skip("not tested")
def test_html(self):
pass
|
maestrano/openerp | refs/heads/mno-master | openerp/addons/point_of_sale/report/pos_receipt.py | 53 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
from openerp import pooler
def titlize(journal_name):
words = journal_name.split()
while words.pop() != 'journal':
continue
return ' '.join(words)
class order(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(order, self).__init__(cr, uid, name, context=context)
user = pooler.get_pool(cr.dbname).get('res.users').browse(cr, uid, uid, context=context)
partner = user.company_id.partner_id
self.localcontext.update({
'time': time,
'disc': self.discount,
'net': self.netamount,
'get_journal_amt': self._get_journal_amt,
'address': partner or False,
'titlize': titlize
})
def netamount(self, order_line_id):
sql = 'select (qty*price_unit) as net_price from pos_order_line where id = %s'
self.cr.execute(sql, (order_line_id,))
res = self.cr.fetchone()
return res[0]
def discount(self, order_id):
sql = 'select discount, price_unit, qty from pos_order_line where order_id = %s '
self.cr.execute(sql, (order_id,))
res = self.cr.fetchall()
dsum = 0
for line in res:
if line[0] != 0:
dsum = dsum +(line[2] * (line[0]*line[1]/100))
return dsum
def _get_journal_amt(self, order_id):
data={}
sql = """ select aj.name,absl.amount as amt from account_bank_statement as abs
LEFT JOIN account_bank_statement_line as absl ON abs.id = absl.statement_id
LEFT JOIN account_journal as aj ON aj.id = abs.journal_id
WHERE absl.pos_statement_id =%d"""%(order_id)
self.cr.execute(sql)
data = self.cr.dictfetchall()
return data
report_sxw.report_sxw('report.pos.receipt', 'pos.order', 'addons/point_of_sale/report/pos_receipt.rml', parser=order, header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ruuk/script.module.youtube.dl | refs/heads/master | lib/youtube_dl/extractor/wsj.py | 30 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
float_or_none,
unified_strdate,
)
class WSJIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
https?://video-api\.wsj\.com/api-video/player/iframe\.html\?.*?\bguid=|
https?://(?:www\.)?(?:wsj|barrons)\.com/video/(?:[^/]+/)+|
wsj:
)
(?P<id>[a-fA-F0-9-]{36})
'''
IE_DESC = 'Wall Street Journal'
_TESTS = [{
'url': 'http://video-api.wsj.com/api-video/player/iframe.html?guid=1BD01A4C-BFE8-40A5-A42F-8A8AF9898B1A',
'md5': 'e230a5bb249075e40793b655a54a02e4',
'info_dict': {
'id': '1BD01A4C-BFE8-40A5-A42F-8A8AF9898B1A',
'ext': 'mp4',
'upload_date': '20150202',
'uploader_id': 'jdesai',
'creator': 'jdesai',
'categories': list, # a long list
'duration': 90,
'title': 'Bills Coach Rex Ryan Updates His Old Jets Tattoo',
},
}, {
'url': 'http://www.wsj.com/video/can-alphabet-build-a-smarter-city/359DDAA8-9AC1-489C-82E6-0429C1E430E0.html',
'only_matching': True,
}, {
'url': 'http://www.barrons.com/video/capitalism-deserves-more-respect-from-millennials/F301217E-6F46-43AE-B8D2-B7180D642EE9.html',
'only_matching': True,
}, {
'url': 'https://www.wsj.com/video/series/a-brief-history-of/the-modern-cell-carrier-how-we-got-here/980E2187-401D-48A1-B82B-1486CEE06CB9',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
info = self._download_json(
'http://video-api.wsj.com/api-video/find_all_videos.asp', video_id,
query={
'type': 'guid',
'count': 1,
'query': video_id,
'fields': ','.join((
'type', 'hls', 'videoMP4List', 'thumbnailList', 'author',
'description', 'name', 'duration', 'videoURL', 'titletag',
'formattedCreationDate', 'keywords', 'editor')),
})['items'][0]
title = info.get('name', info.get('titletag'))
formats = []
f4m_url = info.get('videoURL')
if f4m_url:
formats.extend(self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False))
m3u8_url = info.get('hls')
if m3u8_url:
formats.extend(self._extract_m3u8_formats(
info['hls'], video_id, ext='mp4',
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
for v in info.get('videoMP4List', []):
mp4_url = v.get('url')
if not mp4_url:
continue
tbr = int_or_none(v.get('bitrate'))
formats.append({
'url': mp4_url,
'format_id': 'http' + ('-%d' % tbr if tbr else ''),
'tbr': tbr,
'width': int_or_none(v.get('width')),
'height': int_or_none(v.get('height')),
'fps': float_or_none(v.get('fps')),
})
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
# Thumbnails are conveniently in the correct format already
'thumbnails': info.get('thumbnailList'),
'creator': info.get('author'),
'uploader_id': info.get('editor'),
'duration': int_or_none(info.get('duration')),
'upload_date': unified_strdate(info.get(
'formattedCreationDate'), day_first=False),
'title': title,
'categories': info.get('keywords'),
}
class WSJArticleIE(InfoExtractor):
_VALID_URL = r'(?i)https?://(?:www\.)?wsj\.com/articles/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'https://www.wsj.com/articles/dont-like-china-no-pandas-for-you-1490366939?',
'info_dict': {
'id': '4B13FA62-1D8C-45DB-8EA1-4105CB20B362',
'ext': 'mp4',
'upload_date': '20170221',
'uploader_id': 'ralcaraz',
'title': 'Bao Bao the Panda Leaves for China',
}
}
def _real_extract(self, url):
article_id = self._match_id(url)
webpage = self._download_webpage(url, article_id)
video_id = self._search_regex(
r'data-src=["\']([a-fA-F0-9-]{36})', webpage, 'video id')
return self.url_result('wsj:%s' % video_id, WSJIE.ie_key(), video_id)
|
pfmoore/sandbox | refs/heads/master | basicproject/{{cookiecutter.repo_name}}/tests/test_{{cookiecutter.repo_name}}.py | 1 | import {{cookiecutter.repo_name}}
def test_({{cookiecutter.repo_name}}):
"""A dummy test"""
assert True
|
saurabh6790/omni-libs | refs/heads/master | core/doctype/customize_form/customize_form.py | 34 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Customize Form is a Single DocType used to mask the Property Setter
Thus providing a better UI from user perspective
"""
import webnotes
from webnotes.utils import cstr
class DocType:
def __init__(self, doc, doclist=[]):
self.doc, self.doclist = doc, doclist
self.doctype_properties = [
'search_fields',
'default_print_format',
'read_only_onload',
'allow_print',
'allow_email',
'allow_copy',
'allow_attach',
'max_attachments'
]
self.docfield_properties = [
'idx',
'label',
'fieldtype',
'fieldname',
'options',
'permlevel',
'width',
'print_width',
'reqd',
'in_filter',
'in_list_view',
'hidden',
'print_hide',
'report_hide',
'allow_on_submit',
'depends_on',
'description',
'default',
'name'
]
self.property_restrictions = {
'fieldtype': [['Currency', 'Float'], ['Small Text', 'Data'], ['Text', 'Text Editor', 'Code']],
}
self.forbidden_properties = ['idx']
def get(self):
"""
Gets DocFields applied with Property Setter customizations via Customize Form Field
"""
self.clear()
if self.doc.doc_type:
from webnotes.model.doc import addchild
for d in self.get_ref_doclist():
if d.doctype=='DocField':
new = addchild(self.doc, 'fields', 'Customize Form Field',
self.doclist)
self.set(
{
'list': self.docfield_properties,
'doc' : d,
'doc_to_set': new
}
)
elif d.doctype=='DocType':
self.set({ 'list': self.doctype_properties, 'doc': d })
def get_ref_doclist(self):
"""
* Gets doclist of type self.doc.doc_type
* Applies property setter properties on the doclist
* returns the modified doclist
"""
from webnotes.model.doctype import get
ref_doclist = get(self.doc.doc_type)
ref_doclist = webnotes.doclist([ref_doclist[0]]
+ ref_doclist.get({"parent": self.doc.doc_type}))
return ref_doclist
def clear(self):
"""
Clear fields in the doc
"""
# Clear table before adding new doctype's fields
self.doclist = self.doc.clear_table(self.doclist, 'fields')
self.set({ 'list': self.doctype_properties, 'value': None })
def set(self, args):
"""
Set a list of attributes of a doc to a value
or to attribute values of a doc passed
args can contain:
* list --> list of attributes to set
* doc_to_set --> defaults to self.doc
* value --> to set all attributes to one value eg. None
* doc --> copy attributes from doc to doc_to_set
"""
if not 'doc_to_set' in args:
args['doc_to_set'] = self.doc
if 'list' in args:
if 'value' in args:
for f in args['list']:
args['doc_to_set'].fields[f] = None
elif 'doc' in args:
for f in args['list']:
args['doc_to_set'].fields[f] = args['doc'].fields.get(f)
else:
webnotes.msgprint("Please specify args['list'] to set", raise_exception=1)
def post(self):
"""
Save diff between Customize Form Bean and DocType Bean as property setter entries
"""
if self.doc.doc_type:
from webnotes.model import doc
from core.doctype.doctype.doctype import validate_fields_for_doctype
this_doclist = webnotes.doclist([self.doc] + self.doclist)
ref_doclist = self.get_ref_doclist()
dt_doclist = doc.get('DocType', self.doc.doc_type)
# get a list of property setter docs
diff_list = self.diff(this_doclist, ref_doclist, dt_doclist)
self.set_properties(diff_list)
validate_fields_for_doctype(self.doc.doc_type)
webnotes.clear_cache(doctype=self.doc.doc_type)
webnotes.msgprint("Updated")
def diff(self, new_dl, ref_dl, dt_dl):
"""
Get difference between new_dl doclist and ref_dl doclist
then check how it differs from dt_dl i.e. default doclist
"""
import re
self.defaults = self.get_defaults()
diff_list = []
for new_d in new_dl:
for ref_d in ref_dl:
if ref_d.doctype == 'DocField' and new_d.name == ref_d.name:
for prop in self.docfield_properties:
# do not set forbidden properties like idx
if prop in self.forbidden_properties: continue
# check if its custom field:
if ref_d.get("__custom_field"):
# update custom field
if self.has_property_changed(ref_d, new_d, prop):
# using set_value not bean because validations are called
# in the end anyways
webnotes.conn.set_value("Custom Field", ref_d.name, prop, new_d.get(prop))
else:
d = self.prepare_to_set(prop, new_d, ref_d, dt_dl)
if d: diff_list.append(d)
break
elif ref_d.doctype == 'DocType' and new_d.doctype == 'Customize Form':
for prop in self.doctype_properties:
d = self.prepare_to_set(prop, new_d, ref_d, dt_dl)
if d: diff_list.append(d)
break
return diff_list
def get_defaults(self):
"""
Get fieldtype and default value for properties of a field
"""
df_defaults = webnotes.conn.sql("""
SELECT fieldname, fieldtype, `default`, label
FROM `tabDocField`
WHERE parent='DocField' or parent='DocType'""", as_dict=1)
defaults = {}
for d in df_defaults:
defaults[d['fieldname']] = d
defaults['idx'] = {'fieldname' : 'idx', 'fieldtype' : 'Int', 'default' : 1, 'label' : 'idx'}
defaults['previous_field'] = {'fieldname' : 'previous_field', 'fieldtype' : 'Data', 'default' : None, 'label' : 'Previous Field'}
return defaults
def has_property_changed(self, ref_d, new_d, prop):
return new_d.fields.get(prop) != ref_d.fields.get(prop) \
and not \
( \
new_d.fields.get(prop) in [None, 0] \
and ref_d.fields.get(prop) in [None, 0] \
) and not \
( \
new_d.fields.get(prop) in [None, ''] \
and ref_d.fields.get(prop) in [None, ''] \
)
def prepare_to_set(self, prop, new_d, ref_d, dt_doclist, delete=0):
"""
Prepares docs of property setter
sets delete property if it is required to be deleted
"""
# Check if property has changed compared to when it was loaded
if self.has_property_changed(ref_d, new_d, prop):
#webnotes.msgprint("new: " + str(new_d.fields[prop]) + " | old: " + str(ref_d.fields[prop]))
# Check if the new property is same as that in original doctype
# If yes, we need to delete the property setter entry
for dt_d in dt_doclist:
if dt_d.name == ref_d.name \
and (new_d.fields.get(prop) == dt_d.fields.get(prop) \
or \
( \
new_d.fields.get(prop) in [None, 0] \
and dt_d.fields.get(prop) in [None, 0] \
) or \
( \
new_d.fields.get(prop) in [None, ''] \
and dt_d.fields.get(prop) in [None, ''] \
)):
delete = 1
break
value = new_d.fields.get(prop)
if prop in self.property_restrictions:
allow_change = False
for restrict_list in self.property_restrictions.get(prop):
if value in restrict_list and \
ref_d.fields.get(prop) in restrict_list:
allow_change = True
break
if not allow_change:
webnotes.msgprint("""\
You cannot change '%s' of '%s' from '%s' to '%s'.
%s can only be changed among %s.
<i>Ignoring this change and saving.</i>""" % \
(self.defaults.get(prop, {}).get("label") or prop,
new_d.fields.get("label") or new_d.fields.get("idx"),
ref_d.fields.get(prop), value,
self.defaults.get(prop, {}).get("label") or prop,
" -or- ".join([", ".join(r) for r in \
self.property_restrictions.get(prop)])), raise_exception=True)
return None
# If the above conditions are fulfilled,
# create a property setter doc, but dont save it yet.
from webnotes.model.doc import Document
d = Document('Property Setter')
d.doctype_or_field = ref_d.doctype=='DocField' and 'DocField' or 'DocType'
d.doc_type = self.doc.doc_type
d.field_name = ref_d.fieldname
d.property = prop
d.value = value
d.property_type = self.defaults[prop]['fieldtype']
#d.default_value = self.defaults[prop]['default']
if delete: d.delete = 1
if d.select_item:
d.select_item = self.remove_forbidden(d.select_item)
# return the property setter doc
return d
else: return None
def set_properties(self, ps_doclist):
"""
* Delete a property setter entry
+ if it already exists
+ if marked for deletion
* Save the property setter doc in the list
"""
for d in ps_doclist:
# Delete existing property setter entry
if not d.fields.get("field_name"):
webnotes.conn.sql("""
DELETE FROM `tabProperty Setter`
WHERE doc_type = %(doc_type)s
AND property = %(property)s""", d.fields)
else:
webnotes.conn.sql("""
DELETE FROM `tabProperty Setter`
WHERE doc_type = %(doc_type)s
AND field_name = %(field_name)s
AND property = %(property)s""", d.fields)
# Save the property setter doc if not marked for deletion i.e. delete=0
if not d.delete:
d.insert()
def delete(self):
"""
Deletes all property setter entries for the selected doctype
and resets it to standard
"""
if self.doc.doc_type:
webnotes.conn.sql("""
DELETE FROM `tabProperty Setter`
WHERE doc_type = %s""", self.doc.doc_type)
webnotes.clear_cache(doctype=self.doc.doc_type)
self.get()
def remove_forbidden(self, string):
"""
Replace forbidden characters with a space
"""
forbidden = ['%', "'", '"', '#', '*', '?', '`']
for f in forbidden:
string.replace(f, ' ')
|
Stavitsky/neutron | refs/heads/master | neutron/plugins/ml2/drivers/cisco/ucsm/mech_cisco_ucsm.py | 25 | # Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from networking_cisco.plugins.ml2.drivers.cisco.ucsm import constants as const
from networking_cisco.plugins.ml2.drivers.cisco.ucsm import ucsm_db
from networking_cisco.plugins.ml2.drivers.cisco.ucsm import ucsm_network_driver
from oslo_log import log as logging
from neutron.common import constants
from neutron.extensions import portbindings
from neutron.i18n import _LE, _LW
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2 import driver_api as api
LOG = logging.getLogger(__name__)
class CiscoUcsmMechanismDriver(api.MechanismDriver):
"""ML2 Mechanism Driver for Cisco UCS Manager."""
def initialize(self):
self.vif_type = portbindings.VIF_TYPE_802_QBH
self.vif_details = {portbindings.CAP_PORT_FILTER: False}
self.driver = ucsm_network_driver.CiscoUcsmDriver()
self.ucsm_db = ucsm_db.UcsmDbModel()
def _get_vlanid(self, context):
"""Returns vlan_id associated with a bound VLAN segment."""
segment = context.bottom_bound_segment
if segment and self.check_segment(segment):
return segment.get(api.SEGMENTATION_ID)
def update_port_precommit(self, context):
"""Adds port profile and vlan information to the DB.
Assign a port profile to this port. To do that:
1. Get the vlan_id associated with the bound segment
2. Check if a port profile already exists for this vlan_id
3. If yes, associate that port profile with this port.
4. If no, create a new port profile with this vlan_id and
associate with this port
"""
LOG.debug("Inside update_port_precommit")
vnic_type = context.current.get(portbindings.VNIC_TYPE,
portbindings.VNIC_NORMAL)
profile = context.current.get(portbindings.PROFILE, {})
if not self.driver.check_vnic_type_and_vendor_info(vnic_type,
profile):
LOG.debug("update_port_precommit encountered a non-SR-IOV port")
return
# If this is an Intel SR-IOV vnic, then no need to create port
# profile on the UCS manager. So no need to update the DB.
if not self.driver.is_vmfex_port(profile):
LOG.debug("update_port_precommit has nothing to do for this "
"sr-iov port")
return
vlan_id = self._get_vlanid(context)
if not vlan_id:
LOG.warn(_LW("update_port_precommit: vlan_id is None."))
return
p_profile_name = self.make_profile_name(vlan_id)
LOG.debug("update_port_precommit: Profile: %s, VLAN_id: %d",
p_profile_name, vlan_id)
# Create a new port profile entry in the db
self.ucsm_db.add_port_profile(p_profile_name, vlan_id)
def update_port_postcommit(self, context):
"""Creates a port profile on UCS Manager.
Creates a Port Profile for this VLAN if it does not already
exist.
"""
LOG.debug("Inside update_port_postcommit")
vlan_id = self._get_vlanid(context)
if not vlan_id:
LOG.warn(_LW("update_port_postcommit: vlan_id is None."))
return
# Check if UCS Manager needs to create a Port Profile.
# 1. Make sure this is a vm_fex_port.(Port profiles are created
# only for VM-FEX ports.)
# 2. Make sure update_port_precommit added an entry in the DB
# for this port profile
# 3. Make sure that the Port Profile hasn't already been created.
profile = context.current.get(portbindings.PROFILE, {})
vnic_type = context.current.get(portbindings.VNIC_TYPE,
portbindings.VNIC_NORMAL)
if (self.driver.check_vnic_type_and_vendor_info(vnic_type, profile) and
self.driver.is_vmfex_port(profile)):
LOG.debug("update_port_postcommit: VM-FEX port updated for "
"vlan_id %d", vlan_id)
profile_name = self.ucsm_db.get_port_profile_for_vlan(vlan_id)
if self.ucsm_db.is_port_profile_created(vlan_id):
LOG.debug("update_port_postcommit: Port Profile %s for "
"vlan_id %d already exists. Nothing to do.",
profile_name, vlan_id)
return
# Ask the UCS Manager driver to create the above Port Profile.
# Connection to the UCS Manager is managed from within the driver.
if self.driver.create_portprofile(profile_name, vlan_id,
vnic_type):
# Port profile created on UCS, record that in the DB.
self.ucsm_db.set_port_profile_created(vlan_id, profile_name)
return
else:
# Enable vlan-id for this regular Neutron virtual port.
host_id = context.current.get(portbindings.HOST_ID)
LOG.debug("update_port_postcommit: Host_id is %s", host_id)
self.driver.update_serviceprofile(host_id, vlan_id)
def delete_network_precommit(self, context):
"""Delete entry corresponding to Network's VLAN in the DB."""
segments = context.network_segments
vlan_id = segments[0]['segmentation_id']
if vlan_id:
self.ucsm_db.delete_vlan_entry(vlan_id)
def delete_network_postcommit(self, context):
"""Delete all configuration added to UCS Manager for the vlan_id."""
segments = context.network_segments
vlan_id = segments[0]['segmentation_id']
port_profile = self.make_profile_name(vlan_id)
if vlan_id:
self.driver.delete_all_config_for_vlan(vlan_id, port_profile)
def bind_port(self, context):
"""Binds port to current network segment.
Binds port only if the vnic_type is direct or macvtap and
the port is from a supported vendor. While binding port set it
in ACTIVE state and provide the Port Profile or Vlan Id as part
vif_details.
"""
vnic_type = context.current.get(portbindings.VNIC_TYPE,
portbindings.VNIC_NORMAL)
LOG.debug("Attempting to bind port %(port)s with vnic_type "
"%(vnic_type)s on network %(network)s",
{'port': context.current['id'],
'vnic_type': vnic_type,
'network': context.network.current['id']})
profile = context.current.get(portbindings.PROFILE, {})
if not self.driver.check_vnic_type_and_vendor_info(vnic_type,
profile):
return
for segment in context.network.network_segments:
if self.check_segment(segment):
vlan_id = segment[api.SEGMENTATION_ID]
if not vlan_id:
LOG.warn(_LW("Bind port: vlan_id is None."))
return
LOG.debug("Port binding to Vlan_id: %s", str(vlan_id))
# Check if this is a Cisco VM-FEX port or Intel SR_IOV port
if self.driver.is_vmfex_port(profile):
profile_name = self.make_profile_name(vlan_id)
self.vif_details[
const.VIF_DETAILS_PROFILEID] = profile_name
else:
self.vif_details[
portbindings.VIF_DETAILS_VLAN] = str(vlan_id)
context.set_binding(segment[api.ID],
self.vif_type,
self.vif_details,
constants.PORT_STATUS_ACTIVE)
return
LOG.error(_LE("UCS Mech Driver: Failed binding port ID %(id)s "
"on any segment of network %(network)s"),
{'id': context.current['id'],
'network': context.network.current['id']})
@staticmethod
def check_segment(segment):
network_type = segment[api.NETWORK_TYPE]
return network_type == p_const.TYPE_VLAN
@staticmethod
def make_profile_name(vlan_id):
return const.PORT_PROFILE_NAME_PREFIX + str(vlan_id)
|
pfhayes/boto | refs/heads/develop | boto/pyami/installers/ubuntu/mysql.py | 153 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
This installer will install mysql-server on an Ubuntu machine.
In addition to the normal installation done by apt-get, it will
also configure the new MySQL server to store it's data files in
a different location. By default, this is /mnt but that can be
configured in the [MySQL] section of the boto config file passed
to the instance.
"""
from boto.pyami.installers.ubuntu.installer import Installer
import os
import boto
from boto.utils import ShellCommand
from boto.compat import ConfigParser
import time
ConfigSection = """
[MySQL]
root_password = <will be used as MySQL root password, default none>
data_dir = <new data dir for MySQL, default is /mnt>
"""
class MySQL(Installer):
def install(self):
self.run('apt-get update')
self.run('apt-get -y install mysql-server', notify=True, exit_on_error=True)
# def set_root_password(self, password=None):
# if not password:
# password = boto.config.get('MySQL', 'root_password')
# if password:
# self.run('mysqladmin -u root password %s' % password)
# return password
def change_data_dir(self, password=None):
data_dir = boto.config.get('MySQL', 'data_dir', '/mnt')
fresh_install = False
is_mysql_running_command = ShellCommand('mysqladmin ping') # exit status 0 if mysql is running
is_mysql_running_command.run()
if is_mysql_running_command.getStatus() == 0:
# mysql is running. This is the state apt-get will leave it in. If it isn't running,
# that means mysql was already installed on the AMI and there's no need to stop it,
# saving 40 seconds on instance startup.
time.sleep(10) #trying to stop mysql immediately after installing it fails
# We need to wait until mysql creates the root account before we kill it
# or bad things will happen
i = 0
while self.run("echo 'quit' | mysql -u root") != 0 and i < 5:
time.sleep(5)
i = i + 1
self.run('/etc/init.d/mysql stop')
self.run("pkill -9 mysql")
mysql_path = os.path.join(data_dir, 'mysql')
if not os.path.exists(mysql_path):
self.run('mkdir %s' % mysql_path)
fresh_install = True
self.run('chown -R mysql:mysql %s' % mysql_path)
fp = open('/etc/mysql/conf.d/use_mnt.cnf', 'w')
fp.write('# created by pyami\n')
fp.write('# use the %s volume for data\n' % data_dir)
fp.write('[mysqld]\n')
fp.write('datadir = %s\n' % mysql_path)
fp.write('log_bin = %s\n' % os.path.join(mysql_path, 'mysql-bin.log'))
fp.close()
if fresh_install:
self.run('cp -pr /var/lib/mysql/* %s/' % mysql_path)
self.start('mysql')
else:
#get the password ubuntu expects to use:
config_parser = ConfigParser()
config_parser.read('/etc/mysql/debian.cnf')
password = config_parser.get('client', 'password')
# start the mysql deamon, then mysql with the required grant statement piped into it:
self.start('mysql')
time.sleep(10) #time for mysql to start
grant_command = "echo \"GRANT ALL PRIVILEGES ON *.* TO 'debian-sys-maint'@'localhost' IDENTIFIED BY '%s' WITH GRANT OPTION;\" | mysql" % password
while self.run(grant_command) != 0:
time.sleep(5)
# leave mysqld running
def main(self):
self.install()
# change_data_dir runs 'mysql -u root' which assumes there is no mysql password, i
# and changing that is too ugly to be worth it:
#self.set_root_password()
self.change_data_dir()
|
nkhuyu/data-science-from-scratch | refs/heads/master | code/neural_networks.py | 54 | from __future__ import division
from collections import Counter
from functools import partial
from linear_algebra import dot
import math, random
import matplotlib
import matplotlib.pyplot as plt
def step_function(x):
return 1 if x >= 0 else 0
def perceptron_output(weights, bias, x):
"""returns 1 if the perceptron 'fires', 0 if not"""
return step_function(dot(weights, x) + bias)
def sigmoid(t):
return 1 / (1 + math.exp(-t))
def neuron_output(weights, inputs):
return sigmoid(dot(weights, inputs))
def feed_forward(neural_network, input_vector):
"""takes in a neural network (represented as a list of lists of lists of weights)
and returns the output from forward-propagating the input"""
outputs = []
for layer in neural_network:
input_with_bias = input_vector + [1] # add a bias input
output = [neuron_output(neuron, input_with_bias) # compute the output
for neuron in layer] # for this layer
outputs.append(output) # and remember it
# the input to the next layer is the output of this one
input_vector = output
return outputs
def backpropagate(network, input_vector, target):
hidden_outputs, outputs = feed_forward(network, input_vector)
# the output * (1 - output) is from the derivative of sigmoid
output_deltas = [output * (1 - output) * (output - target[i])
for i, output in enumerate(outputs)]
# adjust weights for output layer (network[-1])
for i, output_neuron in enumerate(network[-1]):
for j, hidden_output in enumerate(hidden_outputs + [1]):
output_neuron[j] -= output_deltas[i] * hidden_output
# back-propagate errors to hidden layer
hidden_deltas = [hidden_output * (1 - hidden_output) *
dot(output_deltas, [n[i] for n in network[-1]])
for i, hidden_output in enumerate(hidden_outputs)]
# adjust weights for hidden layer (network[0])
for i, hidden_neuron in enumerate(network[0]):
for j, input in enumerate(input_vector + [1]):
hidden_neuron[j] -= hidden_deltas[i] * input
def patch(x, y, hatch, color):
"""return a matplotlib 'patch' object with the specified
location, crosshatch pattern, and color"""
return matplotlib.patches.Rectangle((x - 0.5, y - 0.5), 1, 1,
hatch=hatch, fill=False, color=color)
def show_weights(neuron_idx):
weights = network[0][neuron_idx]
abs_weights = map(abs, weights)
grid = [abs_weights[row:(row+5)] # turn the weights into a 5x5 grid
for row in range(0,25,5)] # [weights[0:5], ..., weights[20:25]]
ax = plt.gca() # to use hatching, we'll need the axis
ax.imshow(grid, # here same as plt.imshow
cmap=matplotlib.cm.binary, # use white-black color scale
interpolation='none') # plot blocks as blocks
# cross-hatch the negative weights
for i in range(5): # row
for j in range(5): # column
if weights[5*i + j] < 0: # row i, column j = weights[5*i + j]
# add black and white hatches, so visible whether dark or light
ax.add_patch(patch(j, i, '/', "white"))
ax.add_patch(patch(j, i, '\\', "black"))
plt.show()
if __name__ == "__main__":
raw_digits = [
"""11111
1...1
1...1
1...1
11111""",
"""..1..
..1..
..1..
..1..
..1..""",
"""11111
....1
11111
1....
11111""",
"""11111
....1
11111
....1
11111""",
"""1...1
1...1
11111
....1
....1""",
"""11111
1....
11111
....1
11111""",
"""11111
1....
11111
1...1
11111""",
"""11111
....1
....1
....1
....1""",
"""11111
1...1
11111
1...1
11111""",
"""11111
1...1
11111
....1
11111"""]
def make_digit(raw_digit):
return [1 if c == '1' else 0
for row in raw_digit.split("\n")
for c in row.strip()]
inputs = map(make_digit, raw_digits)
targets = [[1 if i == j else 0 for i in range(10)]
for j in range(10)]
random.seed(0) # to get repeatable results
input_size = 25 # each input is a vector of length 25
num_hidden = 5 # we'll have 5 neurons in the hidden layer
output_size = 10 # we need 10 outputs for each input
# each hidden neuron has one weight per input, plus a bias weight
hidden_layer = [[random.random() for __ in range(input_size + 1)]
for __ in range(num_hidden)]
# each output neuron has one weight per hidden neuron, plus a bias weight
output_layer = [[random.random() for __ in range(num_hidden + 1)]
for __ in range(output_size)]
# the network starts out with random weights
network = [hidden_layer, output_layer]
# 10,000 iterations seems enough to converge
for __ in range(10000):
for input_vector, target_vector in zip(inputs, targets):
backpropagate(network, input_vector, target_vector)
def predict(input):
return feed_forward(network, input)[-1]
for i, input in enumerate(inputs):
outputs = predict(input)
print i, [round(p,2) for p in outputs]
print """.@@@.
...@@
..@@.
...@@
.@@@."""
print [round(x, 2) for x in
predict( [0,1,1,1,0, # .@@@.
0,0,0,1,1, # ...@@
0,0,1,1,0, # ..@@.
0,0,0,1,1, # ...@@
0,1,1,1,0]) # .@@@.
]
print
print """.@@@.
@..@@
.@@@.
@..@@
.@@@."""
print [round(x, 2) for x in
predict( [0,1,1,1,0, # .@@@.
1,0,0,1,1, # @..@@
0,1,1,1,0, # .@@@.
1,0,0,1,1, # @..@@
0,1,1,1,0]) # .@@@.
]
print
|
gregtampa/coreemu | refs/heads/master | daemon/setup.py | 7 | # Copyright (c)2010-2012 the Boeing Company.
# See the LICENSE file included in this distribution.
import os, glob
from distutils.core import setup
from core.constants import COREDPY_VERSION
setup(name = "core-python",
version = COREDPY_VERSION,
packages = [
"core",
"core.addons",
"core.api",
"core.emane",
"core.misc",
"core.bsd",
"core.netns",
"core.phys",
"core.xen",
"core.services",
],
description = "Python components of CORE",
url = "http://www.nrl.navy.mil/itd/ncs/products/core",
author = "Boeing Research & Technology",
author_email = "[email protected]",
license = "BSD",
long_description="Python scripts and modules for building virtual " \
"emulated networks.")
|
toshywoshy/ansible | refs/heads/devel | lib/ansible/modules/network/aci/mso_schema_template_filter_entry.py | 13 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Dag Wieers (@dagwieers) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_schema_template_filter_entry
short_description: Manage filter entries in schema templates
description:
- Manage filter entries in schema templates on Cisco ACI Multi-Site.
author:
- Dag Wieers (@dagwieers)
version_added: '2.8'
options:
schema:
description:
- The name of the schema.
type: str
required: yes
template:
description:
- The name of the template.
type: str
required: yes
filter:
description:
- The name of the filter to manage.
type: str
required: yes
filter_display_name:
description:
- The name as displayed on the MSO web interface.
type: str
entry:
description:
- The filter entry name to manage.
type: str
aliases: [ name ]
display_name:
description:
- The name as displayed on the MSO web interface.
type: str
aliases: [ entry_display_name ]
description:
description:
- The description of this filer entry.
type: str
aliases: [ entry_description ]
ethertype:
description:
- The ethernet type to use for this filter entry.
type: str
choices: [ arp, fcoe, ip, ipv4, ipv6, mac-security, mpls-unicast, trill, unspecified ]
ip_protocol:
description:
- The IP protocol to use for this filter entry.
type: str
choices: [ eigrp, egp, icmp, icmpv6, igmp, igp, l2tp, ospfigp, pim, tcp, udp, unspecified ]
tcp_session_rules:
description:
- A list of TCP session rules.
type: list
choices: [ acknowledgement, established, finish, synchronize, reset, unspecified ]
source_from:
description:
- The source port range from.
type: str
source_to:
description:
- The source port range to.
type: str
destination_from:
description:
- The destination port range from.
type: str
destination_to:
description:
- The destination port range to.
type: str
arp_flag:
description:
- The ARP flag to use for this filter entry.
type: str
choices: [ reply, request, unspecified ]
stateful:
description:
- Whether this filter entry is stateful.
type: bool
default: no
fragments_only:
description:
- Whether this filter entry only matches fragments.
type: bool
default: no
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
seealso:
- module: mso_schema_template_contract_filter
notes:
- Due to restrictions of the MSO REST API this module creates filters when needed, and removes them when the last entry has been removed.
extends_documentation_fragment: mso
'''
EXAMPLES = r'''
- name: Add a new filter entry
mso_schema_template_filter_entry:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
filter: Filter 1
state: present
delegate_to: localhost
- name: Remove a filter entry
mso_schema_template_filter_entry:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
filter: Filter 1
state: absent
delegate_to: localhost
- name: Query a specific filter entry
mso_schema_template_filter_entry:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
filter: Filter 1
state: query
delegate_to: localhost
register: query_result
- name: Query all filter entries
mso_schema_template_filter_entry:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec, mso_reference_spec, issubset
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
schema=dict(type='str', required=True),
template=dict(type='str', required=True),
filter=dict(type='str', required=True),
filter_display_name=dict(type='str'),
entry=dict(type='str', aliases=['name']), # This parameter is not required for querying all objects
description=dict(type='str', aliases=['entry_description']),
display_name=dict(type='str', aliases=['entry_display_name']),
ethertype=dict(type='str', choices=['arp', 'fcoe', 'ip', 'ipv4', 'ipv6', 'mac-security', 'mpls-unicast', 'trill', 'unspecified']),
ip_protocol=dict(type='str', choices=['eigrp', 'egp', 'icmp', 'icmpv6', 'igmp', 'igp', 'l2tp', 'ospfigp', 'pim', 'tcp', 'udp', 'unspecified']),
tcp_session_rules=dict(type='list', choices=['acknowledgement', 'established', 'finish', 'synchronize', 'reset', 'unspecified']),
source_from=dict(type='str'),
source_to=dict(type='str'),
destination_from=dict(type='str'),
destination_to=dict(type='str'),
arp_flag=dict(type='str', choices=['reply', 'request', 'unspecified']),
stateful=dict(type='bool'),
fragments_only=dict(type='bool'),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['entry']],
['state', 'present', ['entry']],
],
)
schema = module.params.get('schema')
template = module.params.get('template')
filter_name = module.params.get('filter')
filter_display_name = module.params.get('filter_display_name')
entry = module.params.get('entry')
display_name = module.params.get('display_name')
description = module.params.get('description')
ethertype = module.params.get('ethertype')
ip_protocol = module.params.get('ip_protocol')
tcp_session_rules = module.params.get('tcp_session_rules')
source_from = module.params.get('source_from')
source_to = module.params.get('source_to')
destination_from = module.params.get('destination_from')
destination_to = module.params.get('destination_to')
arp_flag = module.params.get('arp_flag')
stateful = module.params.get('stateful')
fragments_only = module.params.get('fragments_only')
state = module.params.get('state')
mso = MSOModule(module)
# Get schema
schema_obj = mso.get_obj('schemas', displayName=schema)
if not schema_obj:
mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema))
schema_path = 'schemas/{id}'.format(**schema_obj)
# Get template
templates = [t.get('name') for t in schema_obj.get('templates')]
if template not in templates:
mso.fail_json(msg="Provided template '{template}' does not exist. Existing templates: {templates}".format(template=template,
templates=', '.join(templates)))
template_idx = templates.index(template)
# Get filters
mso.existing = {}
filter_idx = None
entry_idx = None
filters = [f.get('name') for f in schema_obj.get('templates')[template_idx]['filters']]
if filter_name in filters:
filter_idx = filters.index(filter_name)
entries = [f.get('name') for f in schema_obj.get('templates')[template_idx]['filters'][filter_idx]['entries']]
if entry in entries:
entry_idx = entries.index(entry)
mso.existing = schema_obj.get('templates')[template_idx]['filters'][filter_idx]['entries'][entry_idx]
if state == 'query':
if entry is None:
if filter_idx is None:
mso.fail_json(msg="Filter '{filter}' not found".format(filter=filter_name))
mso.existing = schema_obj.get('templates')[template_idx]['filters'][filter_idx]['entries']
elif not mso.existing:
mso.fail_json(msg="Entry '{entry}' not found".format(entry=entry))
mso.exit_json()
filters_path = '/templates/{0}/filters'.format(template)
filter_path = '/templates/{0}/filters/{1}'.format(template, filter_name)
entries_path = '/templates/{0}/filters/{1}/entries'.format(template, filter_name)
entry_path = '/templates/{0}/filters/{1}/entries/{2}'.format(template, filter_name, entry)
ops = []
mso.previous = mso.existing
if state == 'absent':
mso.proposed = mso.sent = {}
if filter_idx is None:
# There was no filter to begin with
pass
elif entry_idx is None:
# There was no entry to begin with
pass
elif len(entries) == 1:
# There is only one entry, remove filter
mso.existing = {}
ops.append(dict(op='remove', path=filter_path))
else:
mso.existing = {}
ops.append(dict(op='remove', path=entry_path))
elif state == 'present':
if not mso.existing:
if display_name is None:
display_name = entry
if description is None:
description = ''
if ethertype is None:
ethertype = 'unspecified'
if ip_protocol is None:
ip_protocol = 'unspecified'
if tcp_session_rules is None:
tcp_session_rules = ['unspecified']
if source_from is None:
source_from = 'unspecified'
if source_to is None:
source_to = 'unspecified'
if destination_from is None:
destination_from = 'unspecified'
if destination_to is None:
destination_to = 'unspecified'
if arp_flag is None:
arp_flag = 'unspecified'
if stateful is None:
stateful = False
if fragments_only is None:
fragments_only = False
payload = dict(
name=entry,
displayName=display_name,
description=description,
etherType=ethertype,
ipProtocol=ip_protocol,
tcpSessionRules=tcp_session_rules,
sourceFrom=source_from,
sourceTo=source_to,
destinationFrom=destination_from,
destinationTo=destination_to,
arpFlag=arp_flag,
stateful=stateful,
matchOnlyFragments=fragments_only,
)
mso.sanitize(payload, collate=True)
if filter_idx is None:
# Filter does not exist, so we have to create it
if filter_display_name is None:
filter_display_name = filter_name
payload = dict(
name=filter_name,
displayName=filter_display_name,
entries=[mso.sent],
)
ops.append(dict(op='add', path=filters_path + '/-', value=payload))
elif entry_idx is None:
# Entry does not exist, so we have to add it
ops.append(dict(op='add', path=entries_path + '/-', value=mso.sent))
else:
# Entry exists, we have to update it
for (key, value) in mso.sent.items():
ops.append(dict(op='replace', path=entry_path + '/' + key, value=value))
mso.existing = mso.proposed
if not module.check_mode:
mso.request(schema_path, method='PATCH', data=ops)
mso.exit_json()
if __name__ == "__main__":
main()
|
gangadhar-kadam/sms-erpnext | refs/heads/master | support/report/maintenance_schedules/__init__.py | 12133432 | |
agfor/chipy.org | refs/heads/master | chipy_org/apps/profiles/__init__.py | 12133432 | |
shikhardb/scikit-learn | refs/heads/master | sklearn/neighbors/tests/test_ball_tree.py | 10 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
ind1, dist1 = bt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
ind2, dist2 = bt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
if __name__ == '__main__':
import nose
nose.runmodule()
|
Widiot/simpleblog | refs/heads/master | venv/lib/python3.5/site-packages/bleach/utils.py | 6 | from collections import OrderedDict
import six
def _attr_key(attr):
"""Returns appropriate key for sorting attribute names
Attribute names are a tuple of ``(namespace, name)`` where namespace can be
``None`` or a string. These can't be compared in Python 3, so we conver the
``None`` to an empty string.
"""
key = (attr[0][0] or ''), attr[0][1]
return key
def alphabetize_attributes(attrs):
"""Takes a dict of attributes (or None) and returns them alphabetized"""
if not attrs:
return attrs
return OrderedDict(
[(k, v) for k, v in sorted(attrs.items(), key=_attr_key)]
)
def force_unicode(text):
"""Takes a text (Python 2: str/unicode; Python 3: unicode) and converts to unicode
:arg str/unicode text: the text in question
:returns: text as unicode
:raises UnicodeDecodeError: if the text was a Python 2 str and isn't in
utf-8
"""
# If it's already unicode, then return it
if isinstance(text, six.text_type):
return text
# If not, convert it
return six.text_type(text, 'utf-8', 'strict')
|
ekmartin/mal | refs/heads/master | rpython/mal_types.py | 23 | import sys, copy, types as pytypes
# General functions
def _equal_Q(a, b):
assert isinstance(a, MalType) and isinstance(b, MalType)
ota, otb = a.__class__, b.__class__
if not (ota is otb or (_sequential_Q(a) and _sequential_Q(b))):
return False
if isinstance(a, MalSym) and isinstance(b, MalSym):
return a.value == b.value
elif isinstance(a, MalStr) and isinstance(b, MalStr):
return a.value == b.value
elif isinstance(a, MalInt) and isinstance(b, MalInt):
return a.value == b.value
elif _list_Q(a) or _vector_Q(a):
if len(a) != len(b): return False
for i in range(len(a)):
if not _equal_Q(a[i], b[i]): return False
return True
## elif _hash_map_Q(a):
## akeys = a.keys()
## akeys.sort()
## bkeys = b.keys()
## bkeys.sort()
## if len(akeys) != len(bkeys): return False
## for i in range(len(akeys)):
## if akeys[i] != bkeys[i]: return False
## if not equal_Q(a[akeys[i]], b[bkeys[i]]): return False
## return True
elif a is b:
return True
else:
throw_str("no = op defined for %s" % a.__class__.__name__)
def _sequential_Q(seq): return _list_Q(seq) or _vector_Q(seq)
def _clone(obj):
if isinstance(obj, MalFunc):
return MalFunc(obj.fn, obj.ast, obj.env, obj.params,
obj.EvalFunc, obj.ismacro)
elif isinstance(obj, MalList):
return obj.__class__(obj.values)
elif isinstance(obj, MalHashMap):
return MalHashMap(obj.dct)
elif isinstance(obj, MalAtom):
return MalAtom(obj.value)
else:
raise Exception("_clone on invalid type")
def _replace(match, sub, old_str):
new_str = u""
idx = 0
while idx < len(old_str):
midx = old_str.find(match, idx)
if midx < 0: break
assert midx >= 0 and midx < len(old_str)
new_str = new_str + old_str[idx:midx]
new_str = new_str + sub
idx = midx + len(match)
new_str = new_str + old_str[idx:]
return new_str
#
# Mal Types
#
class MalException(Exception):
def __init__(self, object):
self.object = object
def throw_str(s):
raise MalException(MalStr(unicode(s)))
### Parent types
class MalType(): pass
class MalMeta(MalType): pass
### Scalars
class MalNil(MalType): pass
nil = MalNil()
def _nil_Q(exp):
assert isinstance(exp, MalType)
return exp is nil
class MalTrue(MalType): pass
true = MalTrue()
def _true_Q(exp):
assert isinstance(exp, MalType)
return exp is true
class MalFalse(MalType): pass
false = MalFalse()
def _false_Q(exp):
assert isinstance(exp, MalType)
return exp is false
# Numbers
class MalInt(MalType):
def __init__(self, value):
assert isinstance(value, int)
self.value = value
def _int_Q(exp):
assert isinstance(exp, MalType)
return exp.__class__ is MalInt
# String
class MalStr(MalType):
def __init__(self, value):
assert isinstance(value, unicode)
self.value = value
def __len__(self):
return len(self.value)
def _string_Q(exp):
assert isinstance(exp, MalType)
return exp.__class__ is MalStr
# Keywords
# A specially prefixed string
def _keyword(mstr):
assert isinstance(mstr, MalType)
if isinstance(mstr, MalStr):
val = mstr.value
if val[0] == u"\u029e": return mstr
else: return MalStr(u"\u029e" + val)
else:
throw_str("_keyword called on non-string")
# Create keyword from unicode string
def _keywordu(strn):
assert isinstance(strn, unicode)
return MalStr(u"\u029e" + strn)
def _keyword_Q(exp):
if isinstance(exp, MalStr):
return exp.value[0] == u"\u029e"
else:
return False
# Symbols
class MalSym(MalMeta):
def __init__(self, value):
assert isinstance(value, unicode)
self.value = value
self.meta = nil
def _symbol(strn):
assert isinstance(strn, unicode)
return MalSym(strn)
def _symbol_Q(exp):
assert isinstance(exp, MalType)
return exp.__class__ is MalSym
# lists
class MalList(MalMeta):
def __init__(self, vals):
assert isinstance(vals, list)
self.values = vals
self.meta = nil
def append(self, val):
self.values.append(val)
def rest(self):
return MalList(self.values[1:])
def __len__(self):
return len(self.values)
def __getitem__(self, i):
assert isinstance(i, int)
return self.values[i]
def slice(self, start):
return MalList(self.values[start:len(self.values)])
def slice2(self, start, end):
assert end >= 0
return MalList(self.values[start:end])
def _list(*vals): return MalList(list(vals))
def _listl(lst): return MalList(lst)
def _list_Q(exp):
assert isinstance(exp, MalType)
return exp.__class__ is MalList
### vectors
class MalVector(MalList):
pass
def _vector(*vals): return MalVector(list(vals))
def _vectorl(lst): return MalVector(lst)
def _vector_Q(exp):
assert isinstance(exp, MalType)
return exp.__class__ is MalVector
### hash maps
class MalHashMap(MalMeta):
def __init__(self, dct):
self.dct = dct
self.meta = nil
def append(self, val):
self.dct.append(val)
def __getitem__(self, k):
assert isinstance(k, unicode)
if not isinstance(k, unicode):
throw_str("hash-map lookup by non-string/non-keyword")
return self.dct[k]
def __setitem__(self, k, v):
if not isinstance(k, unicode):
throw_str("hash-map key must be string or keyword")
assert isinstance(v, MalType)
self.dct[k] = v
return v
def _hash_mapl(kvs):
dct = {}
for i in range(0, len(kvs), 2):
k = kvs[i]
if not isinstance(k, MalStr):
throw_str("hash-map key must be string or keyword")
v = kvs[i+1]
dct[k.value] = v
return MalHashMap(dct)
def _hash_map_Q(exp):
assert isinstance(exp, MalType)
return exp.__class__ is MalHashMap
# Functions
# env import must happen after MalSym and MalList definitions to allow
# circular dependency
from env import Env
class MalFunc(MalMeta):
def __init__(self, fn, ast=None, env=None, params=None,
EvalFunc=None, ismacro=False):
if fn is None and EvalFunc is None:
throw_str("MalFunc requires either fn or EvalFunc")
self.fn = fn
self.ast = ast
self.env = env
self.params = params
self.EvalFunc = EvalFunc
self.ismacro = ismacro
self.meta = nil
def apply(self, args):
if self.EvalFunc:
return self.EvalFunc(self.ast, self.gen_env(args))
else:
return self.fn(args)
def gen_env(self, args):
return Env(self.env, self.params, args)
def _function_Q(exp):
assert isinstance(exp, MalType)
return exp.__class__ is MalFunc
# atoms
class MalAtom(MalMeta):
def __init__(self, value):
self.value = value
self.meta = nil
def _atom(val): return MalAtom(val)
def _atom_Q(exp): return exp.__class__ is MalAtom
|
thnee/ansible | refs/heads/devel | lib/ansible/modules/cloud/vmware/vmware_host_ssl_info.py | 21 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_ssl_info
short_description: Gather info of ESXi host system about SSL
description:
- This module can be used to gather information of the SSL thumbprint information for a host.
version_added: '2.9'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- SSL thumbprint information about all ESXi host system in the given cluster will be reported.
- If C(esxi_hostname) is not given, this parameter is required.
type: str
esxi_hostname:
description:
- ESXi hostname.
- SSL thumbprint information of this ESXi host system will be reported.
- If C(cluster_name) is not given, this parameter is required.
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather SSL thumbprint information about all ESXi Hosts in given Cluster
vmware_host_ssl_info:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: '{{ cluster_name }}'
delegate_to: localhost
register: all_host_ssl_info
- name: Get SSL Thumbprint info about "{{ esxi_hostname }}"
vmware_host_ssl_info:
hostname: "{{ vcenter_server }}"
username: "{{ vcenter_user }}"
password: "{{ vcenter_pass }}"
esxi_hostname: '{{ esxi_hostname }}'
register: ssl_info
- set_fact:
ssl_thumbprint: "{{ ssl_info['host_ssl_info'][esxi_hostname]['ssl_thumbprints'][0] }}"
- debug:
msg: "{{ ssl_thumbprint }}"
- name: Add ESXi Host to vCenter
vmware_host:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter_name: '{{ datacenter_name }}'
cluster_name: '{{ cluster_name }}'
esxi_hostname: '{{ esxi_hostname }}'
esxi_username: '{{ esxi_username }}'
esxi_password: '{{ esxi_password }}'
esxi_ssl_thumbprint: '{{ ssl_thumbprint }}'
state: present
'''
RETURN = r'''
host_ssl_info:
description:
- dict with hostname as key and dict with SSL thumbprint related info
returned: info
type: dict
sample:
{
"10.76.33.215": {
"owner_tag": "",
"principal": "vpxuser",
"ssl_thumbprints": [
"E3:E8:A9:20:8D:32:AE:59:C6:8D:A5:91:B0:20:EF:00:A2:7C:27:EE",
"F1:AC:DA:6E:D8:1E:37:36:4A:5C:07:E5:04:0B:87:C8:75:FB:42:01"
]
}
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
class VMwareHostSslManager(PyVmomi):
def __init__(self, module):
super(VMwareHostSslManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
self.hosts_info = {}
def gather_ssl_info(self):
for host in self.hosts:
self.hosts_info[host.name] = dict(
principal='',
owner_tag='',
ssl_thumbprints=[])
host_ssl_info_mgr = host.config.sslThumbprintInfo
if host_ssl_info_mgr:
self.hosts_info[host.name]['principal'] = host_ssl_info_mgr.principal
self.hosts_info[host.name]['owner_tag'] = host_ssl_info_mgr.ownerTag
self.hosts_info[host.name]['ssl_thumbprints'] = [i for i in host_ssl_info_mgr.sslThumbprints]
self.module.exit_json(changed=False, host_ssl_info=self.hosts_info)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str'),
esxi_hostname=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
supports_check_mode=True,
)
vmware_host_accept_config = VMwareHostSslManager(module)
vmware_host_accept_config.gather_ssl_info()
if __name__ == "__main__":
main()
|
bugobliterator/ardupilot | refs/heads/master | Tools/ardupilotwaf/gbenchmark.py | 20 | #!/usr/bin/env python
# encoding: utf-8
"""
gbenchmark is a Waf tool for benchmark builds in Ardupilot
"""
from waflib import Build, Context, Task
from waflib.Configure import conf
from waflib.TaskGen import feature, before_method, after_method
from waflib.Errors import WafError
def configure(cfg):
env = cfg.env
env.HAS_GBENCHMARK = False
if env.TOOLCHAIN != 'native':
cfg.msg(
'Gbenchmark',
'cross-compilation currently not supported',
color='YELLOW',
)
return
cfg.load('cmake')
env.GBENCHMARK_PREFIX_REL = 'gbenchmark'
bldnode = cfg.bldnode.make_node(cfg.variant)
prefix_node = bldnode.make_node(env.GBENCHMARK_PREFIX_REL)
env.INCLUDES_GBENCHMARK = [prefix_node.make_node('include').abspath()]
env.LIBPATH_GBENCHMARK = [prefix_node.make_node('lib').abspath()]
env.LIB_GBENCHMARK = ['benchmark']
env.append_value('GIT_SUBMODULES', 'gbenchmark')
env.HAS_GBENCHMARK = True
@conf
def libbenchmark(bld):
prefix_node = bld.bldnode.make_node(bld.env.GBENCHMARK_PREFIX_REL)
gbenchmark = bld.cmake(
name='gbenchmark',
cmake_src='modules/gbenchmark',
cmake_bld='gbenchmark_build',
cmake_vars=dict(
CMAKE_BUILD_TYPE='Release',
CMAKE_INSTALL_PREFIX=prefix_node.abspath(),
BENCHMARK_ENABLE_GTEST_TESTS='OFF',
BENCHMARK_ENABLE_TESTING='OFF',
),
)
prefix_node = bld.bldnode.make_node(bld.env.GBENCHMARK_PREFIX_REL)
output_paths = (
'lib/libbenchmark.a',
'include/benchmark/benchmark.h',
)
outputs = [prefix_node.make_node(path) for path in output_paths]
gbenchmark.build('install', target=outputs)
@feature('gbenchmark')
@before_method('process_use')
def append_gbenchmark_use(self):
self.use = self.to_list(getattr(self, 'use', []))
if 'GBENCHMARK' not in self.use:
self.use.append('GBENCHMARK')
@feature('gbenchmark')
@after_method('process_source')
def wait_for_gbenchmark_install(self):
gbenchmark_install = self.bld.get_tgen_by_name('gbenchmark_install')
gbenchmark_install.post()
for task in self.compiled_tasks:
task.set_run_after(gbenchmark_install.cmake_build_task)
task.dep_nodes.extend(gbenchmark_install.cmake_build_task.outputs)
|
lastralab/Statistics | refs/heads/master | Specialization/Dr. Chuck-s Code/twitter2.py | 2 | import urllib
import twurl
import json
TWITTER_URL = 'https://api.twitter.com/1.1/friends/list.json'
while True:
print ''
acct = raw_input('Enter Twitter Account:')
if ( len(acct) < 1 ) : break
url = twurl.augment(TWITTER_URL,
{'screen_name': acct, 'count': '5'} )
print 'Retrieving', url
connection = urllib.urlopen(url)
data = connection.read()
headers = connection.info().dict
print 'Remaining', headers['x-rate-limit-remaining']
js = json.loads(data)
print json.dumps(js, indent=4)
for u in js['users'] :
print u['screen_name']
s = u['status']['text']
print ' ',s[:50]
|
ismailsunni/inasafe | refs/heads/develop | safe/definitions/peta_bencana.py | 11 | # coding=utf-8
"""Definitions relating to API that used in Peta Bencana downloader."""
from safe.utilities.i18n import tr
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "[email protected]"
__revision__ = '$Format:%H$'
development_api = {
'key': 'development_api',
'name': tr('Development API'),
'url': 'https://data-dev.petabencana.id/floods'
'?city={city_code}&geoformat=geojson&format=json&minimum_state=1',
'help_url': 'https://docs.petabencana.id/',
'available_data': [
{
'code': 'jbd',
'name': 'Jabodetabek'
},
{
'code': 'bdg',
'name': 'Bandung'
},
{
'code': 'sby',
'name': 'Surabaya'
}
]
}
production_api = {
'key': 'production_api',
'name': tr('Production API'),
'url': 'https://data.petabencana.id/floods'
'?city={city_code}&geoformat=geojson&format=json&minimum_state=1',
'help_url': 'https://docs.petabencana.id/',
'available_data': [
{
'code': 'jbd',
'name': 'Jabodetabek'
},
{
'code': 'bdg',
'name': 'Bandung'
},
{
'code': 'sby',
'name': 'Surabaya'
}
]
}
|
goddardl/gaffer | refs/heads/master | python/GafferRenderManUI/InteractiveRenderManRenderUI.py | 2 | ##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferRenderMan
Gaffer.Metadata.registerNode(
GafferRenderMan.InteractiveRenderManRender,
"description",
"""
Performs interactive renders using a RenderMan renderer
which supports interactive edits. In 3delight the following
edits are currently supported :
- Adding/removing lights
- Adjusting light parameters
- Moving the camera
- Moving coordinate systems
- Changing shader assignments
- Editing shader networks
- Editing shader parameters
""",
)
|
michaellaier/pymor | refs/heads/master | src/pymor/playground/functions/__init__.py | 34 | # This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: Rene Milk, Stephan Rave, Felix Schindler
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
|
libkml/libkml | refs/heads/master | tests/swig/kmlengine_test.py | 2 | #!/usr/bin/env python
# Copyright 2008, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of Google Inc. nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This file contains unit tests for the KML Engine Python SWIG bindings.
"""
import unittest
import kmldom
import kmlengine
import os
# These VerySimple tests really only verify that the function/class exists.
class VerySimpleKmlDomTestCase(unittest.TestCase):
""" Verify very simple KML DOM usage """
def runTest(self):
factory = kmldom.KmlFactory_GetFactory()
placemark = factory.CreatePlacemark()
name = 'hi'
placemark.set_name(name)
assert name == placemark.get_name()
class VerySimpleBboxTestCase(unittest.TestCase):
""" Verify very simple usage of class Bbox """
def runTest(self):
bbox = kmlengine.Bbox(38, 36, -120, -122)
assert bbox.Contains(37, -121)
class AllMethodsBboxTestCase(unittest.TestCase):
""" Verify basic usage of each method of kmlengine.Bbox """
def runTest(self):
bbox = kmlengine.Bbox()
assert -180.0 == bbox.get_north()
assert 180.0 == bbox.get_south()
assert -180.0 == bbox.get_east()
assert 180.0 == bbox.get_west()
bbox_a = kmlengine.Bbox(10.1, -11.2, 99.99, -88.88)
bbox.ExpandFromBbox(bbox_a)
assert 10.1 == bbox.get_north()
assert -11.2 == bbox.get_south()
assert 99.99 == bbox.get_east()
assert -88.88 == bbox.get_west()
bbox.ExpandLatitude(20.2)
assert 20.2 == bbox.get_north()
assert -11.2 == bbox.get_south()
bbox.ExpandLatitude(-20.2)
assert 20.2 == bbox.get_north()
assert -20.2 == bbox.get_south()
bbox.ExpandLongitude(101.101)
assert 101.101 == bbox.get_east()
assert -88.88 == bbox.get_west()
bbox.ExpandLongitude(-101.101)
assert 101.101 == bbox.get_east()
assert -101.101 == bbox.get_west()
bbox.ExpandLatLon(30.3, -111.222);
assert 30.3 == bbox.get_north()
assert -20.2 == bbox.get_south()
assert 101.101 == bbox.get_east()
assert -111.222 == bbox.get_west()
class VerySimpleCloneTestCase(unittest.TestCase):
""" Verify very simple usage of Clone() """
def runTest(self):
factory = kmldom.KmlFactory_GetFactory()
placemark = factory.CreatePlacemark()
name = 'hi'
placemark.set_name(name)
clone_element = kmlengine.Clone(placemark)
assert clone_element
clone_placemark = kmldom.AsPlacemark(clone_element)
assert clone_placemark
assert name == clone_placemark.get_name()
# Change the name in the original
new_name = 'new name'
placemark.set_name(new_name)
# Verify that the clone still has the old name
assert name == clone_placemark.get_name()
class VerySimpleGetFeatureLatLonTestCase(unittest.TestCase):
def runTest(self):
kml = '<Placemark>' \
'<Point><coordinates>1,2,3</coordinates></Point>' \
'</Placemark>'
element = kmldom.ParseKml(kml)
assert element
feature = kmldom.AsFeature(element)
assert feature
status,lat,lon = kmlengine.GetFeatureLatLon(feature)
assert True == status
assert 1 == lon
assert 2 == lat
class VerySimpleGetFeatureBoundsTestCase(unittest.TestCase):
def runTest(self):
kml = '<Placemark>' \
'<LineString><coordinates>1,2 -1,-2</coordinates></LineString>' \
'</Placemark>'
element = kmldom.ParseKml(kml)
assert element
feature = kmldom.AsFeature(element)
assert feature
bbox = kmlengine.Bbox()
status = kmlengine.GetFeatureBounds(feature, bbox)
assert status
assert 2 == bbox.get_north()
assert -2 == bbox.get_south()
assert 1 == bbox.get_east()
assert -1 == bbox.get_west()
class VerySimpleGetRootFeatureTestCase(unittest.TestCase):
""" Verify very simple usage of GetRootFeature() """
def runTest(self):
factory = kmldom.KmlFactory_GetFactory()
placemark = factory.CreatePlacemark()
feature = kmlengine.GetRootFeature(placemark)
assert feature
assert kmldom.AsPlacemark(feature)
class VerySimpleKmzSplitTestCase(unittest.TestCase):
def runTest(self):
kml_url = 'http://foo.com/goo.kmz/bar.jpg'
status,kmz_url,kmz_path = kmlengine.KmzSplit(kml_url)
assert True == status
assert 'http://foo.com/goo.kmz' == kmz_url
assert 'bar.jpg' == kmz_path
class VerySimpleSplitUriTestCase(unittest.TestCase):
def runTest(self):
status,scheme,host,port,path,query,fragment = \
kmlengine.SplitUri('http://host.com:82/a/b/c.kml?p=q#id')
assert True == status
assert 'http' == scheme
assert 'host.com' == host
assert 'a/b/c.kml' == path
assert 'p=q' == query
assert 'id' == fragment
class BasicKmlFileCreateFromParseTestCase(unittest.TestCase):
""" Verify very simple usage of the KmlFile::CreateFromParse() method """
def runTest(self):
id = 'pm123'
kml = '<Placemark id="%s"><name>hello</name></Placemark>' % id
kmlfile,errors = kmlengine.KmlFile.CreateFromParse(kml)
assert kmlfile
object = kmlfile.GetObjectById(id)
assert object
placemark = kmldom.AsPlacemark(object)
assert placemark
assert placemark.has_id()
assert id == placemark.get_id()
class KmlFileCreateFromParseOfBasicElementTestCase(unittest.TestCase):
def runTest(self):
kmlfile,errors = kmlengine.KmlFile.CreateFromParse('<kml/>')
assert kmlfile
root = kmlfile.get_root()
assert kmldom.Type_kml == root.Type()
class BasicKmlFileCreateFromImportTestCase(unittest.TestCase):
def runTest(self):
factory = kmldom.KmlFactory_GetFactory()
placemark = factory.CreatePlacemark()
id = 'placemark123'
name = 'some name'
placemark.set_id(id)
placemark.set_name(name)
folder = factory.CreateFolder()
folder.add_feature(placemark)
kmlfile = kmlengine.KmlFile.CreateFromImport(folder)
assert kmlfile
object = kmlfile.GetObjectById(id)
assert object
placemark = kmldom.AsPlacemark(object)
assert placemark
assert placemark.has_id()
assert id == placemark.get_id()
assert placemark.has_name()
assert name == placemark.get_name()
class KmlFileCreateFromImportOfBasicElementTestCase(unittest.TestCase):
def runTest(self):
factory = kmldom.KmlFactory_GetFactory()
# TODO: This crashes CreateFromImport as do all non-Object complex elements
# kml = factory.CreateKml()
# This returns an ElementPtr for the given element and works fine in
# CreateFromImport:
kml_as_element = factory.CreateElementById(kmldom.Type_kml)
kml = kmldom.AsKml(kml_as_element)
kml.set_feature(factory.CreatePlacemark())
kmlfile = kmlengine.KmlFile.CreateFromImport(kml_as_element)
assert kmlfile
root = kmlfile.get_root()
assert root
kml = kmldom.AsKml(root)
assert kml
assert kml.has_feature()
placemark = kmldom.AsPlacemark(kml.get_feature())
assert placemark
class BasicKmlFileSerializeToStringTestCase(unittest.TestCase):
def runTest(self):
factory = kmldom.KmlFactory_GetFactory()
kml = factory.CreateElementById(kmldom.Type_kml)
assert kml
kmlfile = kmlengine.KmlFile.CreateFromImport(kml)
assert kmlfile
(ok, xml) = kmlfile.SerializeToString()
assert ok
kExpectedXml = '<?xml version="1.0" encoding="utf-8"?>\n' \
'<kml xmlns="http://www.opengis.net/kml/2.2"/>\n'
assert kExpectedXml == xml
class BasicKmzFileTestCase(unittest.TestCase):
def runTest(self):
kmz_filepath = os.path.join(os.path.dirname(__file__), '../../testdata/kmz/model-macky.kmz')
kmzfile = kmlengine.KmzFile.OpenFromFile(kmz_filepath)
assert kmzfile
(ok, kml) = kmzfile.ReadKml()
assert ok
(kmlfile,errors) = kmlengine.KmlFile.CreateFromParse(kml)
assert kmlfile
root = kmldom.AsKml(kmlfile.get_root())
assert root
placemark = kmldom.AsPlacemark(root.get_feature())
assert placemark
assert 'SketchUp Model of Macky Auditorium' == placemark.get_name()
assert placemark.has_geometry()
model = kmldom.AsModel(placemark.get_geometry())
assert model
assert 'model_4' == model.get_id()
(ok, dae) = kmzfile.ReadFile('geometry/CU-Macky.dae')
assert ok
assert 268477 == len(dae)
def suite():
suite = unittest.TestSuite()
suite.addTest(VerySimpleKmlDomTestCase())
suite.addTest(VerySimpleBboxTestCase())
suite.addTest(AllMethodsBboxTestCase())
suite.addTest(VerySimpleCloneTestCase())
suite.addTest(VerySimpleGetRootFeatureTestCase())
suite.addTest(VerySimpleGetFeatureLatLonTestCase())
suite.addTest(VerySimpleGetFeatureBoundsTestCase())
suite.addTest(VerySimpleKmzSplitTestCase())
suite.addTest(VerySimpleSplitUriTestCase())
suite.addTest(KmlFileCreateFromParseOfBasicElementTestCase())
suite.addTest(BasicKmlFileCreateFromParseTestCase())
suite.addTest(BasicKmlFileCreateFromImportTestCase())
suite.addTest(KmlFileCreateFromImportOfBasicElementTestCase())
suite.addTest(BasicKmlFileSerializeToStringTestCase())
suite.addTest(BasicKmzFileTestCase())
return suite
runner = unittest.TextTestRunner()
runner.run(suite())
|
janusnic/21v-python | refs/heads/master | unit_07/dron/7.py | 1 | # -*- coding:utf-8 -*-
class Y:
"""The vertical motion of a ball."""
def __init__(self, v0):
self.v0 = v0
self.g = 9.81
def value(self, t):
return self.v0*t - 0.5*self.g*t**2
def formula(self):
return 'v0*t - 0.5*g*t**2; v0=%g' % self.v0
def diff(f, x, h=1E-10):
return (f(x+h) - f(x))/h
y1 = Y(1)
y2 = Y(1.5)
y3 = Y(-3)
dy1dt = diff(y1.value, 0.1)
dy2dt = diff(y2.value, 0.1)
dy3dt = diff(y3.value, 0.2)
print y1.v0
print y2.v0
print y3.v0
|
MyMusicTaste/django-oscar-uplus | refs/heads/master | uplus/admin.py | 1 | from django.contrib import admin
from . import models
class UplusTransactionAdmin(admin.ModelAdmin):
list_display = ['amount', 'basket_id', 'timestamp', 'status', 'pay_type',]
readonly_fields = ['amount', 'status', 'pay_type', 'basket_id']
admin.site.register(models.UplusTransaction, UplusTransactionAdmin)
|
sadig/DC2 | refs/heads/master | components/dc2-lib/dc2/lib/auth/helpers/__init__.py | 1 | # -*- coding: utf-8 -*-
#
# (DC)² - DataCenter Deployment Control
# Copyright (C) 2010, 2011, 2012, 2013, 2014 Stephan Adig <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from user import get_realname # noqa
from groups import check_membership_in_group # noqa
|
rsalmond/seab-us | refs/heads/master | seabus/api/config.py | 2 | import os
import seabus
class Config(object):
PORT = 6000
DEBUG = False
TESTING = False
SQLALCHEMY_DATABASE_URI = 'sqlite://'
class Test(Config):
TESTING = True
class Dev(Config):
DEBUG=True
seabus_project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
SQLALCHEMY_DATABASE_URI = 'sqlite:///{}/db_seabus.db'.format(seabus_project_root)
class Prod(Config):
DEBUG=False
seabus_project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
SQLALCHEMY_DATABASE_URI = 'sqlite:///{}/db_seabus.db'.format(seabus_project_root)
|
Freestila/dosage | refs/heads/master | dosagelib/plugins/arcamax.py | 3 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2013-2014 Bastian Kleineidam
"""
Arcamax comic strips
"""
from re import compile
from ..scraper import make_scraper
from ..util import tagre
_imageSearch = compile(tagre("img", "data-zoom-image", r'(/newspics/[^"]+)'))
_prevSearch = compile(tagre("a", "href", r'(/[^"]+)', before='prev'))
def add(name, shortname):
url = 'http://www.arcamax.com%s' % shortname
classname = 'Arcamax_%s' % name
globals()[classname] = make_scraper(classname,
name='Arcamax/' + name,
url = url,
stripUrl = url + '%s',
imageSearch = _imageSearch,
prevSearch = _prevSearch,
help = 'Index format: none',
)
# do not edit anything below since these entries are generated from scripts/update.sh
# DO NOT REMOVE
#add('9ChickweedLane', '/thefunnies/ninechickweedlane/')
#add('Agnes', '/thefunnies/agnes/')
#add('AndyCapp', '/thefunnies/andycapp/')
#add('Archie', '/thefunnies/archie/')
add('ArcticCircle', '/thefunnies/arcticcircle/')
#add('AskShagg', '/thefunnies/askshagg/')
#add('BC', '/thefunnies/bc/')
add('BabyBlues', '/thefunnies/babyblues/')
#add('BallardStreet', '/thefunnies/ballardstreet/')
#add('BarneyAndClyde', '/thefunnies/barneyandclyde/')
add('BarneyGoogleAndSnuffySmith', '/thefunnies/barneygoogle/')
add('BeetleBailey', '/thefunnies/beetlebailey/')
add('Bizarro', '/thefunnies/bizarro/')
add('BleekerTheRechargeableDog', '/thefunnies/bleekertherechargeabledog/')
add('Blondie', '/thefunnies/blondie/')
add('Boondocks', '/thefunnies/boondocks/')
add('BrilliantMindofEdisonLee', '/thefunnies/brilliantmindofedisonlee/')
#add('Candorville', '/thefunnies/candorville/')
#add('Cathy', '/thefunnies/cathy/')
#add('ChuckleBros', '/thefunnies/chucklebros/')
add('Crankshaft', '/thefunnies/crankshaft/')
#add('CuldeSac', '/thefunnies/culdesac/')
add('Curtis', '/thefunnies/curtis/')
#add('DaddysHome', '/thefunnies/daddyshome/')
add('DeFlocked', '/thefunnies/deflocked/')
add('DennistheMenace', '/thefunnies/dennisthemenace/')
#add('DiamondLil', '/thefunnies/diamondlil/')
#add('Dilbert', '/thefunnies/dilbert/')
add('DinetteSet', '/thefunnies/thedinetteset/')
#add('DogEatDoug', '/thefunnies/dogeatdoug/')
#add('DogsofCKennel', '/thefunnies/dogsofckennel/')
#add('Doonesbury', '/thefunnies/doonesbury/')
add('Dustin', '/thefunnies/dustin/')
add('FamilyCircus', '/thefunnies/familycircus/')
#add('FloAndFriends', '/thefunnies/floandfriends/')
#add('ForHeavensSake', '/thefunnies/forheavenssake/')
#add('FortKnox', '/thefunnies/fortknox/')
#add('FreeRange', '/thefunnies/freerange/')
#add('Garfield', '/thefunnies/garfield/')
#add('GetFuzzy', '/thefunnies/getfuzzy/')
#add('Heathcliff', '/thefunnies/heathcliff/')
#add('HerbandJamaal', '/thefunnies/herbandjamaal/')
add('HiandLois', '/thefunnies/hiandlois/')
#add('HomeAndAway', '/thefunnies/homeandaway/')
add('IntelligentLife', '/thefunnies/intelligentlife/')
add('JerryKingCartoons', '/thefunnies/humorcartoon/')
#add('LittleDogLost', '/thefunnies/littledoglost/')
#add('LongStoryShort', '/thefunnies/longstoryshort/')
#add('LooseParts', '/thefunnies/looseparts/')
#add('Luann', '/thefunnies/luann/')
add('MallardFillmore', '/thefunnies/mallardfillmore/')
add('Marvin', '/thefunnies/marvin/')
add('MeaningofLila', '/thefunnies/meaningoflila/')
#add('MikeDuJour', '/thefunnies/mikedujour/')
#add('Momma', '/thefunnies/momma/')
add('MotherGooseAndGrimm', '/thefunnies/mothergooseandgrimm/')
add('Mutts', '/thefunnies/mutts/')
#add('NestHeads', '/thefunnies/nestheads/')
#add('NonSequitur', '/thefunnies/nonsequitur/')
#add('OneBigHappy', '/thefunnies/onebighappy/')
#add('Peanuts', '/thefunnies/peanuts/')
#add('PearlsBeforeSwine', '/thefunnies/pearlsbeforeswine/')
#add('Pickles', '/thefunnies/pickles/')
#add('RedandRover', '/thefunnies/redandrover/')
#add('ReplyAll', '/thefunnies/replyall/')
add('RhymeswithOrange', '/thefunnies/rhymeswithorange/')
#add('Rubes', '/thefunnies/rubes/')
#add('RudyPark', '/thefunnies/rudypark/')
#add('Rugrats', '/thefunnies/rugrats/')
#add('ScaryGary', '/thefunnies/scarygary/')
#add('SpeedBump', '/thefunnies/speedbump/')
#add('StrangeBrew', '/thefunnies/strangebrew/')
add('TakeItFromTheTinkersons', '/thefunnies/takeitfromthetinkersons/')
#add('TheBarn', '/thefunnies/thebarn/')
add('TheLockhorns', '/thefunnies/thelockhorns/')
#add('TheOtherCoast', '/thefunnies/theothercoast/')
add('TinasGroove', '/thefunnies/tinasgroove/')
#add('WeePals', '/thefunnies/weepals/')
#add('WizardofId', '/thefunnies/wizardofid/')
#add('WorkingitOut', '/thefunnies/workingitout/')
#add('Wumo', '/thefunnies/wumo/')
#add('ZackHill', '/thefunnies/zackhill/')
add('Zits', '/thefunnies/zits/')
|
dylanseago/CommunityFund | refs/heads/master | server/communityfund/apps/home/models.py | 1 | from django.db import models
from django.contrib.auth.models import User
from datetime import datetime
from django.core.urlresolvers import reverse
from django.db.models import Sum
from communityfund.apps.home.templatetags.custom_tags import currency_filter
class DatedModel(models.Model):
created_on = models.DateTimeField(auto_now_add=True)
modified_on = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Community(DatedModel):
creator = models.ForeignKey(User, related_name="communities_created")
name = models.TextField(max_length=100)
description = models.TextField(max_length=1000)
subscribers = models.ManyToManyField(User, related_name="subscriptions")
def get_absolute_url(self):
return reverse('community', args=[str(self.id)])
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "Communities"
class Project(DatedModel):
initiator = models.ForeignKey(User, related_name="projects_created")
community = models.ForeignKey(Community, related_name="projects")
name = models.TextField(max_length=100)
summary = models.TextField(max_length=250)
about = models.TextField(max_length=20000)
goal = models.PositiveIntegerField()
deadline = models.DateTimeField()
@property
def num_funded(self):
return self.fundings.count()
@property
def amount_funded(self):
amount_funded = self.fundings.all().aggregate(Sum('amount'))['amount__sum']
return amount_funded if amount_funded else 0
@property
def percent_funded(self):
return round((self.amount_funded / self.goal) * 100)
@property
def goal_reached(self):
return self.amount_funded >= self.goal
@property
def over(self):
return datetime.now() > self.time_to_fund
def get_absolute_url(self):
return reverse('project', args=[str(self.id)])
def __str__(self):
return self.name
class Funding(models.Model):
project = models.ForeignKey(Project, related_name="fundings")
user = models.ForeignKey(User, related_name="fundings")
amount = models.PositiveIntegerField()
funded_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '{} by {}'.format(currency_filter(self.amount), self.user.get_full_name()) |
pepetreshere/odoo | refs/heads/patch-2 | addons/sales_team/models/res_partner.py | 5 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class ResPartner(models.Model):
_inherit = 'res.partner'
team_id = fields.Many2one(
'crm.team', 'Sales Team',
help='If set, this Sales Team will be used for sales and assignments related to this partner')
|
mgpyh/django-fluent-comments | refs/heads/master | example/article/admin.py | 3 | from django.contrib import admin
from django.forms import ModelForm
from article.models import Article
# The timezone support was introduced in Django 1.4, fallback to standard library for 1.3.
try:
from django.utils.timezone import now
except ImportError:
from datetime import datetime
now = datetime.now
class ArticleAdminForm(ModelForm):
def __init__(self, *args, **kwargs):
super(ArticleAdminForm, self).__init__(*args, **kwargs)
self.fields['publication_date'].required = False # The admin's .save() method fills in a default.
class ArticleAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('title',)}
form = ArticleAdminForm
fieldsets = (
(None, {
'fields': ('title', 'slug',),
}),
("Contents", {
'fields': ('content',),
}),
("Publication settings", {
'fields': ('publication_date', 'enable_comments',),
}),
)
def save_model(self, request, obj, form, change):
if not obj.publication_date:
# auto_now_add makes the field uneditable.
# a default in the model fills the field before the post is written (too early)
obj.publication_date = now()
obj.save()
admin.site.register(Article, ArticleAdmin)
|
DigitalCampus/django-nurhi-oppia | refs/heads/master | oppia/utils/__init__.py | 12133432 | |
nirtiac/network_modularity | refs/heads/master | network_modularity/__init__.py | 12133432 | |
alistair-broomhead/robotframework-selenium2library | refs/heads/master | test/unit/keywords/__init__.py | 12133432 | |
erikr/django | refs/heads/master | tests/choices/__init__.py | 12133432 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.