repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ljgabc/lfs | usr/lib/python2.7/test/test_zipimport.py | 128 | 16817 | import sys
import os
import marshal
import imp
import struct
import time
import unittest
from test import test_support
from test.test_importhooks import ImportHooksBaseTestCase, test_src, test_co
# some tests can be ran even without zlib
try:
import zlib
except ImportError:
zlib = None
from zipfile import ZipFile, ZipInfo, ZIP_STORED, ZIP_DEFLATED
import zipimport
import linecache
import doctest
import inspect
import StringIO
from traceback import extract_tb, extract_stack, print_tb
raise_src = 'def do_raise(): raise TypeError\n'
def make_pyc(co, mtime):
data = marshal.dumps(co)
if type(mtime) is type(0.0):
# Mac mtimes need a bit of special casing
if mtime < 0x7fffffff:
mtime = int(mtime)
else:
mtime = int(-0x100000000L + long(mtime))
pyc = imp.get_magic() + struct.pack("<i", int(mtime)) + data
return pyc
def module_path_to_dotted_name(path):
return path.replace(os.sep, '.')
NOW = time.time()
test_pyc = make_pyc(test_co, NOW)
if __debug__:
pyc_ext = ".pyc"
else:
pyc_ext = ".pyo"
TESTMOD = "ziptestmodule"
TESTPACK = "ziptestpackage"
TESTPACK2 = "ziptestpackage2"
TEMP_ZIP = os.path.abspath("junk95142" + os.extsep + "zip")
class UncompressedZipImportTestCase(ImportHooksBaseTestCase):
compression = ZIP_STORED
def setUp(self):
# We're reusing the zip archive path, so we must clear the
# cached directory info and linecache
linecache.clearcache()
zipimport._zip_directory_cache.clear()
ImportHooksBaseTestCase.setUp(self)
def doTest(self, expected_ext, files, *modules, **kw):
z = ZipFile(TEMP_ZIP, "w")
try:
for name, (mtime, data) in files.items():
zinfo = ZipInfo(name, time.localtime(mtime))
zinfo.compress_type = self.compression
z.writestr(zinfo, data)
z.close()
stuff = kw.get("stuff", None)
if stuff is not None:
# Prepend 'stuff' to the start of the zipfile
f = open(TEMP_ZIP, "rb")
data = f.read()
f.close()
f = open(TEMP_ZIP, "wb")
f.write(stuff)
f.write(data)
f.close()
sys.path.insert(0, TEMP_ZIP)
mod = __import__(".".join(modules), globals(), locals(),
["__dummy__"])
call = kw.get('call')
if call is not None:
call(mod)
if expected_ext:
file = mod.get_file()
self.assertEqual(file, os.path.join(TEMP_ZIP,
*modules) + expected_ext)
finally:
z.close()
os.remove(TEMP_ZIP)
def testAFakeZlib(self):
#
# This could cause a stack overflow before: importing zlib.py
# from a compressed archive would cause zlib to be imported
# which would find zlib.py in the archive, which would... etc.
#
# This test *must* be executed first: it must be the first one
# to trigger zipimport to import zlib (zipimport caches the
# zlib.decompress function object, after which the problem being
# tested here wouldn't be a problem anymore...
# (Hence the 'A' in the test method name: to make it the first
# item in a list sorted by name, like unittest.makeSuite() does.)
#
# This test fails on platforms on which the zlib module is
# statically linked, but the problem it tests for can't
# occur in that case (builtin modules are always found first),
# so we'll simply skip it then. Bug #765456.
#
if "zlib" in sys.builtin_module_names:
return
if "zlib" in sys.modules:
del sys.modules["zlib"]
files = {"zlib.py": (NOW, test_src)}
try:
self.doTest(".py", files, "zlib")
except ImportError:
if self.compression != ZIP_DEFLATED:
self.fail("expected test to not raise ImportError")
else:
if self.compression != ZIP_STORED:
self.fail("expected test to raise ImportError")
def testPy(self):
files = {TESTMOD + ".py": (NOW, test_src)}
self.doTest(".py", files, TESTMOD)
def testPyc(self):
files = {TESTMOD + pyc_ext: (NOW, test_pyc)}
self.doTest(pyc_ext, files, TESTMOD)
def testBoth(self):
files = {TESTMOD + ".py": (NOW, test_src),
TESTMOD + pyc_ext: (NOW, test_pyc)}
self.doTest(pyc_ext, files, TESTMOD)
def testEmptyPy(self):
files = {TESTMOD + ".py": (NOW, "")}
self.doTest(None, files, TESTMOD)
def testBadMagic(self):
# make pyc magic word invalid, forcing loading from .py
m0 = ord(test_pyc[0])
m0 ^= 0x04 # flip an arbitrary bit
badmagic_pyc = chr(m0) + test_pyc[1:]
files = {TESTMOD + ".py": (NOW, test_src),
TESTMOD + pyc_ext: (NOW, badmagic_pyc)}
self.doTest(".py", files, TESTMOD)
def testBadMagic2(self):
# make pyc magic word invalid, causing an ImportError
m0 = ord(test_pyc[0])
m0 ^= 0x04 # flip an arbitrary bit
badmagic_pyc = chr(m0) + test_pyc[1:]
files = {TESTMOD + pyc_ext: (NOW, badmagic_pyc)}
try:
self.doTest(".py", files, TESTMOD)
except ImportError:
pass
else:
self.fail("expected ImportError; import from bad pyc")
def testBadMTime(self):
t3 = ord(test_pyc[7])
t3 ^= 0x02 # flip the second bit -- not the first as that one
# isn't stored in the .py's mtime in the zip archive.
badtime_pyc = test_pyc[:7] + chr(t3) + test_pyc[8:]
files = {TESTMOD + ".py": (NOW, test_src),
TESTMOD + pyc_ext: (NOW, badtime_pyc)}
self.doTest(".py", files, TESTMOD)
def testPackage(self):
packdir = TESTPACK + os.sep
files = {packdir + "__init__" + pyc_ext: (NOW, test_pyc),
packdir + TESTMOD + pyc_ext: (NOW, test_pyc)}
self.doTest(pyc_ext, files, TESTPACK, TESTMOD)
def testDeepPackage(self):
packdir = TESTPACK + os.sep
packdir2 = packdir + TESTPACK2 + os.sep
files = {packdir + "__init__" + pyc_ext: (NOW, test_pyc),
packdir2 + "__init__" + pyc_ext: (NOW, test_pyc),
packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc)}
self.doTest(pyc_ext, files, TESTPACK, TESTPACK2, TESTMOD)
def testZipImporterMethods(self):
packdir = TESTPACK + os.sep
packdir2 = packdir + TESTPACK2 + os.sep
files = {packdir + "__init__" + pyc_ext: (NOW, test_pyc),
packdir2 + "__init__" + pyc_ext: (NOW, test_pyc),
packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc)}
z = ZipFile(TEMP_ZIP, "w")
try:
for name, (mtime, data) in files.items():
zinfo = ZipInfo(name, time.localtime(mtime))
zinfo.compress_type = self.compression
z.writestr(zinfo, data)
z.close()
zi = zipimport.zipimporter(TEMP_ZIP)
self.assertEqual(zi.archive, TEMP_ZIP)
self.assertEqual(zi.is_package(TESTPACK), True)
mod = zi.load_module(TESTPACK)
self.assertEqual(zi.get_filename(TESTPACK), mod.__file__)
self.assertEqual(zi.is_package(packdir + '__init__'), False)
self.assertEqual(zi.is_package(packdir + TESTPACK2), True)
self.assertEqual(zi.is_package(packdir2 + TESTMOD), False)
mod_path = packdir2 + TESTMOD
mod_name = module_path_to_dotted_name(mod_path)
__import__(mod_name)
mod = sys.modules[mod_name]
self.assertEqual(zi.get_source(TESTPACK), None)
self.assertEqual(zi.get_source(mod_path), None)
self.assertEqual(zi.get_filename(mod_path), mod.__file__)
# To pass in the module name instead of the path, we must use the right importer
loader = mod.__loader__
self.assertEqual(loader.get_source(mod_name), None)
self.assertEqual(loader.get_filename(mod_name), mod.__file__)
# test prefix and archivepath members
zi2 = zipimport.zipimporter(TEMP_ZIP + os.sep + TESTPACK)
self.assertEqual(zi2.archive, TEMP_ZIP)
self.assertEqual(zi2.prefix, TESTPACK + os.sep)
finally:
z.close()
os.remove(TEMP_ZIP)
def testZipImporterMethodsInSubDirectory(self):
packdir = TESTPACK + os.sep
packdir2 = packdir + TESTPACK2 + os.sep
files = {packdir2 + "__init__" + pyc_ext: (NOW, test_pyc),
packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc)}
z = ZipFile(TEMP_ZIP, "w")
try:
for name, (mtime, data) in files.items():
zinfo = ZipInfo(name, time.localtime(mtime))
zinfo.compress_type = self.compression
z.writestr(zinfo, data)
z.close()
zi = zipimport.zipimporter(TEMP_ZIP + os.sep + packdir)
self.assertEqual(zi.archive, TEMP_ZIP)
self.assertEqual(zi.prefix, packdir)
self.assertEqual(zi.is_package(TESTPACK2), True)
mod = zi.load_module(TESTPACK2)
self.assertEqual(zi.get_filename(TESTPACK2), mod.__file__)
self.assertEqual(zi.is_package(TESTPACK2 + os.sep + '__init__'), False)
self.assertEqual(zi.is_package(TESTPACK2 + os.sep + TESTMOD), False)
mod_path = TESTPACK2 + os.sep + TESTMOD
mod_name = module_path_to_dotted_name(mod_path)
__import__(mod_name)
mod = sys.modules[mod_name]
self.assertEqual(zi.get_source(TESTPACK2), None)
self.assertEqual(zi.get_source(mod_path), None)
self.assertEqual(zi.get_filename(mod_path), mod.__file__)
# To pass in the module name instead of the path, we must use the right importer
loader = mod.__loader__
self.assertEqual(loader.get_source(mod_name), None)
self.assertEqual(loader.get_filename(mod_name), mod.__file__)
finally:
z.close()
os.remove(TEMP_ZIP)
def testGetData(self):
z = ZipFile(TEMP_ZIP, "w")
z.compression = self.compression
try:
name = "testdata.dat"
data = "".join([chr(x) for x in range(256)]) * 500
z.writestr(name, data)
z.close()
zi = zipimport.zipimporter(TEMP_ZIP)
self.assertEqual(data, zi.get_data(name))
self.assertIn('zipimporter object', repr(zi))
finally:
z.close()
os.remove(TEMP_ZIP)
def testImporterAttr(self):
src = """if 1: # indent hack
def get_file():
return __file__
if __loader__.get_data("some.data") != "some data":
raise AssertionError, "bad data"\n"""
pyc = make_pyc(compile(src, "<???>", "exec"), NOW)
files = {TESTMOD + pyc_ext: (NOW, pyc),
"some.data": (NOW, "some data")}
self.doTest(pyc_ext, files, TESTMOD)
def testImport_WithStuff(self):
# try importing from a zipfile which contains additional
# stuff at the beginning of the file
files = {TESTMOD + ".py": (NOW, test_src)}
self.doTest(".py", files, TESTMOD,
stuff="Some Stuff"*31)
def assertModuleSource(self, module):
self.assertEqual(inspect.getsource(module), test_src)
def testGetSource(self):
files = {TESTMOD + ".py": (NOW, test_src)}
self.doTest(".py", files, TESTMOD, call=self.assertModuleSource)
def testGetCompiledSource(self):
pyc = make_pyc(compile(test_src, "<???>", "exec"), NOW)
files = {TESTMOD + ".py": (NOW, test_src),
TESTMOD + pyc_ext: (NOW, pyc)}
self.doTest(pyc_ext, files, TESTMOD, call=self.assertModuleSource)
def runDoctest(self, callback):
files = {TESTMOD + ".py": (NOW, test_src),
"xyz.txt": (NOW, ">>> log.append(True)\n")}
self.doTest(".py", files, TESTMOD, call=callback)
def doDoctestFile(self, module):
log = []
old_master, doctest.master = doctest.master, None
try:
doctest.testfile(
'xyz.txt', package=module, module_relative=True,
globs=locals()
)
finally:
doctest.master = old_master
self.assertEqual(log,[True])
def testDoctestFile(self):
self.runDoctest(self.doDoctestFile)
def doDoctestSuite(self, module):
log = []
doctest.DocFileTest(
'xyz.txt', package=module, module_relative=True,
globs=locals()
).run()
self.assertEqual(log,[True])
def testDoctestSuite(self):
self.runDoctest(self.doDoctestSuite)
def doTraceback(self, module):
try:
module.do_raise()
except:
tb = sys.exc_info()[2].tb_next
f,lno,n,line = extract_tb(tb, 1)[0]
self.assertEqual(line, raise_src.strip())
f,lno,n,line = extract_stack(tb.tb_frame, 1)[0]
self.assertEqual(line, raise_src.strip())
s = StringIO.StringIO()
print_tb(tb, 1, s)
self.assertTrue(s.getvalue().endswith(raise_src))
else:
raise AssertionError("This ought to be impossible")
def testTraceback(self):
files = {TESTMOD + ".py": (NOW, raise_src)}
self.doTest(None, files, TESTMOD, call=self.doTraceback)
@unittest.skipUnless(zlib, "requires zlib")
class CompressedZipImportTestCase(UncompressedZipImportTestCase):
compression = ZIP_DEFLATED
class BadFileZipImportTestCase(unittest.TestCase):
def assertZipFailure(self, filename):
self.assertRaises(zipimport.ZipImportError,
zipimport.zipimporter, filename)
def testNoFile(self):
self.assertZipFailure('AdfjdkFJKDFJjdklfjs')
def testEmptyFilename(self):
self.assertZipFailure('')
def testBadArgs(self):
self.assertRaises(TypeError, zipimport.zipimporter, None)
self.assertRaises(TypeError, zipimport.zipimporter, TESTMOD, kwd=None)
def testFilenameTooLong(self):
self.assertZipFailure('A' * 33000)
def testEmptyFile(self):
test_support.unlink(TESTMOD)
open(TESTMOD, 'w+').close()
self.assertZipFailure(TESTMOD)
def testFileUnreadable(self):
test_support.unlink(TESTMOD)
fd = os.open(TESTMOD, os.O_CREAT, 000)
try:
os.close(fd)
self.assertZipFailure(TESTMOD)
finally:
# If we leave "the read-only bit" set on Windows, nothing can
# delete TESTMOD, and later tests suffer bogus failures.
os.chmod(TESTMOD, 0666)
test_support.unlink(TESTMOD)
def testNotZipFile(self):
test_support.unlink(TESTMOD)
fp = open(TESTMOD, 'w+')
fp.write('a' * 22)
fp.close()
self.assertZipFailure(TESTMOD)
# XXX: disabled until this works on Big-endian machines
def _testBogusZipFile(self):
test_support.unlink(TESTMOD)
fp = open(TESTMOD, 'w+')
fp.write(struct.pack('=I', 0x06054B50))
fp.write('a' * 18)
fp.close()
z = zipimport.zipimporter(TESTMOD)
try:
self.assertRaises(TypeError, z.find_module, None)
self.assertRaises(TypeError, z.load_module, None)
self.assertRaises(TypeError, z.is_package, None)
self.assertRaises(TypeError, z.get_code, None)
self.assertRaises(TypeError, z.get_data, None)
self.assertRaises(TypeError, z.get_source, None)
error = zipimport.ZipImportError
self.assertEqual(z.find_module('abc'), None)
self.assertRaises(error, z.load_module, 'abc')
self.assertRaises(error, z.get_code, 'abc')
self.assertRaises(IOError, z.get_data, 'abc')
self.assertRaises(error, z.get_source, 'abc')
self.assertRaises(error, z.is_package, 'abc')
finally:
zipimport._zip_directory_cache.clear()
def test_main():
try:
test_support.run_unittest(
UncompressedZipImportTestCase,
CompressedZipImportTestCase,
BadFileZipImportTestCase,
)
finally:
test_support.unlink(TESTMOD)
if __name__ == "__main__":
test_main()
| gpl-2.0 | 1,491,015,417,671,275,500 | 34.629237 | 92 | 0.572635 | false |
dylanparsons/Sample-Code | python/pickle.py | 1 | 3287 | import pickle
import math
import random
def arrow(n):
return str(n)+"--> "
def isPositive(aNumber):
return aNumber > 0
def abs(aNumber):
if aNumber >= 0:
return aNumber
return -aNumber
def main2():
""" test two build in high order functions: map and filter functions"""
oldList = [-10, -20, 0, 30,40]
print("oldList: ", oldList)
newList = [] # a string version of oldList
for number in oldList:
newList.append(str(number))
print("newList: ", newList)
newList2 = list( map(str, oldList) )
print("newList2: ", newList2)
newList3 = list( map(arrow, oldList) )
print("newList3: ", newList3)
newList4 = list( filter(isPositive, oldList) )
print("newList4: ", newList4)
newList5 = list( map(abs, oldList) )
print("newList5: ", newList5)
newList6 = list( filter(abs, oldList) )
print("newList6: ", newList6)
print()
def main3():
""" test pickling"""
lyst = ["COMP", 164, "-03", "pi=", 3.14]
fileObj = open("items.dat", "wb")
for item in lyst:
pickle.dump(item, fileObj)
fileObj.close()
lyst2 = list()
fileObj = open("items.dat", "rb")
while True:
try:
item = pickle.load(fileObj)
lyst2.append(item)
except EOFError:
break
fileObj.close()
print(lyst2)
fileObj = open("items2.dat", "wb")
pickle.dump(lyst, fileObj)
fileObj.close()
print()
def main4():
""" test pickling"""
lyst = ["COMP", 164, "-03", "pi=", 3.14]
fileObj = open("items.dat", "wb")
pickle.dump(lyst, fileObj)
fileObj.close()
fileObj = open("items.dat", "rb")
lyst2 = pickle.load(fileObj)
fileObj.close()
print(lyst2)
print()
def main5():
""" test two build in high order functions: map and filter functions"""
oldList = []
for i in range(10):
oldList.append(random.randint(-500, 500))
print("oldList: ", oldList)
newList = []
newList = list( map(polynomial, oldList) )
print("NewList: ", newList)
newNewList = list(filter(isBetween100, oldList))
print("Number of Numbers between -100 and 100: ", len(newNewList))
newJar = open("name.dat", "wb")
pickle.dump(newList, newJar)
newJar.close()
NewestList=[]
oldJar=open("name.dat", "rb")
NewestList = pickle.load(oldJar)
oldJar.close()
print("Here is the unpickled list: ")
print(NewestList)
print()
print("newList: ", newList)
newList2 = list( map(str, oldList) )
print("newList2: ", newList2)
newList3 = list( map(arrow, oldList) )
print("newList3: ", newList3)
newList4 = list( filter(isPositive, oldList) )
print("newList4: ", newList4)
newList5 = list( map(abs, oldList) )
print("newList5: ", newList5)
newList6 = list( filter(abs, oldList) )
print("newList6: ", newList6)
print()
def polynomial(x):
return ((3*x*x)-2*x-1)
def isBetween100(x):
if x>=-100 and x<=100:
return True
return False
if __name__ == "__main__":
main2()
main3()
main4()
main5()
| gpl-3.0 | -9,101,805,047,187,935,000 | 22.714286 | 75 | 0.55826 | false |
MiLk/youtube-dl | youtube_dl/extractor/ign.py | 12 | 4549 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
class IGNIE(InfoExtractor):
"""
Extractor for some of the IGN sites, like www.ign.com, es.ign.com de.ign.com.
Some videos of it.ign.com are also supported
"""
_VALID_URL = r'https?://.+?\.ign\.com/(?P<type>videos|show_videos|articles|(?:[^/]*/feature))(/.+)?/(?P<name_or_id>.+)'
IE_NAME = 'ign.com'
_CONFIG_URL_TEMPLATE = 'http://www.ign.com/videos/configs/id/%s.config'
_DESCRIPTION_RE = [
r'<span class="page-object-description">(.+?)</span>',
r'id="my_show_video">.*?<p>(.*?)</p>',
]
_TESTS = [
{
'url': 'http://www.ign.com/videos/2013/06/05/the-last-of-us-review',
'md5': 'eac8bdc1890980122c3b66f14bdd02e9',
'info_dict': {
'id': '8f862beef863986b2785559b9e1aa599',
'ext': 'mp4',
'title': 'The Last of Us Review',
'description': 'md5:c8946d4260a4d43a00d5ae8ed998870c',
}
},
{
'url': 'http://me.ign.com/en/feature/15775/100-little-things-in-gta-5-that-will-blow-your-mind',
'playlist': [
{
'info_dict': {
'id': '5ebbd138523268b93c9141af17bec937',
'ext': 'mp4',
'title': 'GTA 5 Video Review',
'description': 'Rockstar drops the mic on this generation of games. Watch our review of the masterly Grand Theft Auto V.',
},
},
{
'info_dict': {
'id': '638672ee848ae4ff108df2a296418ee2',
'ext': 'mp4',
'title': '26 Twisted Moments from GTA 5 in Slow Motion',
'description': 'The twisted beauty of GTA 5 in stunning slow motion.',
},
},
],
'params': {
'skip_download': True,
},
},
]
def _find_video_id(self, webpage):
res_id = [
r'data-video-id="(.+?)"',
r'<object id="vid_(.+?)"',
r'<meta name="og:image" content=".*/(.+?)-(.+?)/.+.jpg"',
]
return self._search_regex(res_id, webpage, 'video id')
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
name_or_id = mobj.group('name_or_id')
page_type = mobj.group('type')
webpage = self._download_webpage(url, name_or_id)
if page_type == 'articles':
video_url = self._search_regex(r'var videoUrl = "(.+?)"', webpage, 'video url')
return self.url_result(video_url, ie='IGN')
elif page_type != 'video':
multiple_urls = re.findall(
'<param name="flashvars" value="[^"]*?url=(https?://www\.ign\.com/videos/.*?)["&]',
webpage)
if multiple_urls:
return [self.url_result(u, ie='IGN') for u in multiple_urls]
video_id = self._find_video_id(webpage)
result = self._get_video_info(video_id)
description = self._html_search_regex(self._DESCRIPTION_RE,
webpage, 'video description', flags=re.DOTALL)
result['description'] = description
return result
def _get_video_info(self, video_id):
config_url = self._CONFIG_URL_TEMPLATE % video_id
config = self._download_json(config_url, video_id)
media = config['playlist']['media']
return {
'id': media['metadata']['videoId'],
'url': media['url'],
'title': media['metadata']['title'],
'thumbnail': media['poster'][0]['url'].replace('{size}', 'grande'),
}
class OneUPIE(IGNIE):
_VALID_URL = r'https?://gamevideos\.1up\.com/(?P<type>video)/id/(?P<name_or_id>.+)'
IE_NAME = '1up.com'
_DESCRIPTION_RE = r'<div id="vid_summary">(.+?)</div>'
_TESTS = [{
'url': 'http://gamevideos.1up.com/video/id/34976',
'md5': '68a54ce4ebc772e4b71e3123d413163d',
'info_dict': {
'id': '34976',
'ext': 'mp4',
'title': 'Sniper Elite V2 - Trailer',
'description': 'md5:5d289b722f5a6d940ca3136e9dae89cf',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
result = super(OneUPIE, self)._real_extract(url)
result['id'] = mobj.group('name_or_id')
return result
| unlicense | 2,949,120,538,659,179,000 | 35.685484 | 146 | 0.503847 | false |
abenzbiria/clients_odoo | addons/website_hr_recruitment/controllers/main.py | 19 | 5703 | # -*- coding: utf-8 -*-
import base64
from openerp import SUPERUSER_ID
from openerp import http
from openerp.tools.translate import _
from openerp.http import request
from openerp.addons.website.models.website import slug
class website_hr_recruitment(http.Controller):
@http.route([
'/jobs',
'/jobs/country/<model("res.country"):country>',
'/jobs/department/<model("hr.department"):department>',
'/jobs/country/<model("res.country"):country>/department/<model("hr.department"):department>',
'/jobs/office/<int:office_id>',
'/jobs/country/<model("res.country"):country>/office/<int:office_id>',
'/jobs/department/<model("hr.department"):department>/office/<int:office_id>',
'/jobs/country/<model("res.country"):country>/department/<model("hr.department"):department>/office/<int:office_id>',
], type='http', auth="public", website=True)
def jobs(self, country=None, department=None, office_id=None):
env = request.env(context=dict(request.env.context, show_address=True, no_tag_br=True))
Country = env['res.country']
Jobs = env['hr.job']
# List jobs available to current UID
job_ids = Jobs.search([], order="website_published desc,no_of_recruitment desc").ids
# Browse jobs as superuser, because address is restricted
jobs = Jobs.sudo().browse(job_ids)
# Deduce departments and offices of those jobs
departments = set(j.department_id for j in jobs if j.department_id)
offices = set(j.address_id for j in jobs if j.address_id)
countries = set(o.country_id for o in offices if o.country_id)
# Default search by user country
if not (country or department or office_id):
country_code = request.session['geoip'].get('country_code')
if country_code:
countries_ = Country.search([('code', '=', country_code)])
country = countries_[0] if countries_ else None
# Filter the matching one
if country:
jobs = (j for j in jobs if j.address_id is None or j.address_id.country_id and j.address_id.country_id.id == country.id)
if department:
jobs = (j for j in jobs if j.department_id and j.department_id.id == department.id)
if office_id:
jobs = (j for j in jobs if j.address_id and j.address_id.id == office_id)
# Render page
return request.website.render("website_hr_recruitment.index", {
'jobs': jobs,
'countries': countries,
'departments': departments,
'offices': offices,
'country_id': country,
'department_id': department,
'office_id': office_id,
})
@http.route('/jobs/add', type='http', auth="user", website=True)
def jobs_add(self, **kwargs):
job = request.env['hr.job'].create({
'name': _('New Job Offer'),
})
return request.redirect("/jobs/detail/%s?enable_editor=1" % slug(job))
@http.route('/jobs/detail/<model("hr.job"):job>', type='http', auth="public", website=True)
def jobs_detail(self, job, **kwargs):
return request.render("website_hr_recruitment.detail", {
'job': job,
'main_object': job,
})
@http.route('/jobs/apply/<model("hr.job"):job>', type='http', auth="public", website=True)
def jobs_apply(self, job):
error = {}
default = {}
if 'website_hr_recruitment_error' in request.session:
error = request.session.pop('website_hr_recruitment_error')
default = request.session.pop('website_hr_recruitment_default')
return request.render("website_hr_recruitment.apply", {
'job': job,
'error': error,
'default': default,
})
@http.route('/jobs/thankyou', methods=['POST'], type='http', auth="public", website=True)
def jobs_thankyou(self, **post):
error = {}
for field_name in ["partner_name", "phone", "email_from"]:
if not post.get(field_name):
error[field_name] = 'missing'
if error:
request.session['website_hr_recruitment_error'] = error
ufile = post.pop('ufile')
if ufile:
error['ufile'] = 'reset'
request.session['website_hr_recruitment_default'] = post
return request.redirect('/jobs/apply/%s' % post.get("job_id"))
# public user can't create applicants (duh)
env = request.env(user=SUPERUSER_ID)
value = {
'source_id' : env.ref('hr_recruitment.source_website_company').id,
'name': '%s\'s Application' % post.get('partner_name'),
}
for f in ['email_from', 'partner_name', 'description']:
value[f] = post.get(f)
for f in ['department_id', 'job_id']:
value[f] = int(post.get(f) or 0)
# Retro-compatibility for saas-3. "phone" field should be replace by "partner_phone" in the template in trunk.
value['partner_phone'] = post.pop('phone', False)
applicant_id = env['hr.applicant'].create(value).id
if post['ufile']:
attachment_value = {
'name': post['ufile'].filename,
'res_name': value['partner_name'],
'res_model': 'hr.applicant',
'res_id': applicant_id,
'datas': base64.encodestring(post['ufile'].read()),
'datas_fname': post['ufile'].filename,
}
env['ir.attachment'].create(attachment_value)
return request.render("website_hr_recruitment.thankyou", {})
# vim :et:
| agpl-3.0 | 6,112,785,869,621,328 | 42.534351 | 132 | 0.585306 | false |
sargas/scipy | scipy/stats/tests/test_mstats_extras.py | 4 | 4790 | # pylint: disable-msg=W0611, W0612, W0511,R0201
"""Tests suite for maskedArray statistics.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
"""
from __future__ import division, print_function, absolute_import
__author__ = "Pierre GF Gerard-Marchant ($Author: backtopop $)"
import numpy as np
import numpy.ma as ma
import scipy.stats.mstats as ms
#import scipy.stats.mmorestats as mms
from numpy.testing import TestCase, run_module_suite, assert_equal, \
assert_almost_equal, assert_
class TestMisc(TestCase):
#
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
#
def test_mjci(self):
"Tests the Marits-Jarrett estimator"
data = ma.array([ 77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(ms.mjci(data),[55.76819,45.84028,198.87875],5)
#
def test_trimmedmeanci(self):
"Tests the confidence intervals of the trimmed mean."
data = ma.array([545,555,558,572,575,576,578,580,
594,605,635,651,653,661,666])
assert_almost_equal(ms.trimmed_mean(data,0.2), 596.2, 1)
assert_equal(np.round(ms.trimmed_mean_ci(data,(0.2,0.2)),1),
[561.8, 630.6])
#
def test_idealfourths(self):
"Tests ideal-fourths"
test = np.arange(100)
assert_almost_equal(np.asarray(ms.idealfourths(test)),
[24.416667,74.583333],6)
test_2D = test.repeat(3).reshape(-1,3)
assert_almost_equal(ms.idealfourths(test_2D, axis=0),
[[24.416667,24.416667,24.416667],
[74.583333,74.583333,74.583333]],6)
assert_almost_equal(ms.idealfourths(test_2D, axis=1),
test.repeat(2).reshape(-1,2))
test = [0,0]
_result = ms.idealfourths(test)
assert_(np.isnan(_result).all())
#..............................................................................
class TestQuantiles(TestCase):
#
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
#
def test_hdquantiles(self):
data = [0.706560797,0.727229578,0.990399276,0.927065621,0.158953014,
0.887764025,0.239407086,0.349638551,0.972791145,0.149789972,
0.936947700,0.132359948,0.046041972,0.641675031,0.945530547,
0.224218684,0.771450991,0.820257774,0.336458052,0.589113496,
0.509736129,0.696838829,0.491323573,0.622767425,0.775189248,
0.641461450,0.118455200,0.773029450,0.319280007,0.752229111,
0.047841438,0.466295911,0.583850781,0.840581845,0.550086491,
0.466470062,0.504765074,0.226855960,0.362641207,0.891620942,
0.127898691,0.490094097,0.044882048,0.041441695,0.317976349,
0.504135618,0.567353033,0.434617473,0.636243375,0.231803616,
0.230154113,0.160011327,0.819464108,0.854706985,0.438809221,
0.487427267,0.786907310,0.408367937,0.405534192,0.250444460,
0.995309248,0.144389588,0.739947527,0.953543606,0.680051621,
0.388382017,0.863530727,0.006514031,0.118007779,0.924024803,
0.384236354,0.893687694,0.626534881,0.473051932,0.750134705,
0.241843555,0.432947602,0.689538104,0.136934797,0.150206859,
0.474335206,0.907775349,0.525869295,0.189184225,0.854284286,
0.831089744,0.251637345,0.587038213,0.254475554,0.237781276,
0.827928620,0.480283781,0.594514455,0.213641488,0.024194386,
0.536668589,0.699497811,0.892804071,0.093835427,0.731107772]
#
assert_almost_equal(ms.hdquantiles(data,[0., 1.]),
[0.006514031, 0.995309248])
hdq = ms.hdquantiles(data,[0.25, 0.5, 0.75])
assert_almost_equal(hdq, [0.253210762, 0.512847491, 0.762232442,])
hdq = ms.hdquantiles_sd(data,[0.25, 0.5, 0.75])
assert_almost_equal(hdq, [0.03786954, 0.03805389, 0.03800152,], 4)
#
data = np.array(data).reshape(10,10)
hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0)
assert_almost_equal(hdq[:,0], ms.hdquantiles(data[:,0],[0.25,0.5,0.75]))
assert_almost_equal(hdq[:,-1], ms.hdquantiles(data[:,-1],[0.25,0.5,0.75]))
hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0,var=True)
assert_almost_equal(hdq[...,0],
ms.hdquantiles(data[:,0],[0.25,0.5,0.75],var=True))
assert_almost_equal(hdq[...,-1],
ms.hdquantiles(data[:,-1],[0.25,0.5,0.75], var=True))
###############################################################################
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | -2,797,681,729,237,534,000 | 44.619048 | 82 | 0.586639 | false |
cohortfsllc/cohort-cocl2-sandbox | buildbot/buildbot_lib.py | 2 | 21805 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os.path
import shutil
import subprocess
import stat
import sys
import time
import traceback
ARCH_MAP = {
'32': {
'gyp_arch': 'ia32',
'scons_platform': 'x86-32',
},
'64': {
'gyp_arch': 'x64',
'scons_platform': 'x86-64',
},
'arm': {
'gyp_arch': 'arm',
'scons_platform': 'arm',
},
'mips32': {
'gyp_arch': 'mips32',
'scons_platform': 'mips32',
},
}
def RunningOnBuildbot():
return os.environ.get('BUILDBOT_SLAVE_TYPE') is not None
def GetHostPlatform():
sys_platform = sys.platform.lower()
if sys_platform.startswith('linux'):
return 'linux'
elif sys_platform in ('win', 'win32', 'windows', 'cygwin'):
return 'win'
elif sys_platform in ('darwin', 'mac'):
return 'mac'
else:
raise Exception('Can not determine the platform!')
def SetDefaultContextAttributes(context):
"""
Set default values for the attributes needed by the SCons function, so that
SCons can be run without needing ParseStandardCommandLine
"""
platform = GetHostPlatform()
context['platform'] = platform
context['mode'] = 'opt'
context['default_scons_mode'] = ['opt-host', 'nacl']
context['default_scons_platform'] = ('x86-64' if platform == 'win'
else 'x86-32')
context['android'] = False
context['clang'] = False
context['asan'] = False
context['pnacl'] = False
context['use_glibc'] = False
context['use_breakpad_tools'] = False
context['max_jobs'] = 8
context['scons_args'] = []
# Windows-specific environment manipulation
def SetupWindowsEnvironment(context):
# Poke around looking for MSVC. We should do something more principled in
# the future.
# The name of Program Files can differ, depending on the bittage of Windows.
program_files = r'c:\Program Files (x86)'
if not os.path.exists(program_files):
program_files = r'c:\Program Files'
if not os.path.exists(program_files):
raise Exception('Cannot find the Program Files directory!')
# The location of MSVC can differ depending on the version.
msvc_locs = [
('Microsoft Visual Studio 12.0', 'VS120COMNTOOLS', '2013'),
('Microsoft Visual Studio 10.0', 'VS100COMNTOOLS', '2010'),
('Microsoft Visual Studio 9.0', 'VS90COMNTOOLS', '2008'),
('Microsoft Visual Studio 8.0', 'VS80COMNTOOLS', '2005'),
]
for dirname, comntools_var, gyp_msvs_version in msvc_locs:
msvc = os.path.join(program_files, dirname)
context.SetEnv('GYP_MSVS_VERSION', gyp_msvs_version)
if os.path.exists(msvc):
break
else:
# The break statement did not execute.
raise Exception('Cannot find MSVC!')
# Put MSVC in the path.
vc = os.path.join(msvc, 'VC')
comntools = os.path.join(msvc, 'Common7', 'Tools')
perf = os.path.join(msvc, 'Team Tools', 'Performance Tools')
context.SetEnv('PATH', os.pathsep.join([
context.GetEnv('PATH'),
vc,
comntools,
perf]))
# SCons needs this variable to find vsvars.bat.
# The end slash is needed because the batch files expect it.
context.SetEnv(comntools_var, comntools + '\\')
# This environment variable will SCons to print debug info while it searches
# for MSVC.
context.SetEnv('SCONS_MSCOMMON_DEBUG', '-')
# Needed for finding devenv.
context['msvc'] = msvc
SetupGyp(context, [])
def SetupGyp(context, extra_vars=[]):
context.SetEnv('GYP_GENERATORS', 'ninja')
if RunningOnBuildbot():
goma_opts = [
'use_goma=1',
'gomadir=/b/build/goma',
]
else:
goma_opts = []
context.SetEnv('GYP_DEFINES', ' '.join(
context['gyp_vars'] + goma_opts + extra_vars))
def SetupLinuxEnvironment(context):
SetupGyp(context, ['target_arch='+context['gyp_arch']])
def SetupMacEnvironment(context):
SetupGyp(context, ['target_arch='+context['gyp_arch']])
def SetupAndroidEnvironment(context):
SetupGyp(context, ['OS=android', 'target_arch='+context['gyp_arch']])
context.SetEnv('GYP_GENERATORS', 'ninja')
context.SetEnv('GYP_CROSSCOMPILE', '1')
def ParseStandardCommandLine(context):
"""
The standard buildbot scripts require 3 arguments to run. The first
argument (dbg/opt) controls if the build is a debug or a release build. The
second argument (32/64) controls the machine architecture being targeted.
The third argument (newlib/glibc) controls which c library we're using for
the nexes. Different buildbots may have different sets of arguments.
"""
parser = optparse.OptionParser()
parser.add_option('-n', '--dry-run', dest='dry_run', default=False,
action='store_true', help='Do not execute any commands.')
parser.add_option('--inside-toolchain', dest='inside_toolchain',
default=bool(os.environ.get('INSIDE_TOOLCHAIN')),
action='store_true', help='Inside toolchain build.')
parser.add_option('--android', dest='android', default=False,
action='store_true', help='Build for Android.')
parser.add_option('--clang', dest='clang', default=False,
action='store_true', help='Build trusted code with Clang.')
parser.add_option('--coverage', dest='coverage', default=False,
action='store_true',
help='Build and test for code coverage.')
parser.add_option('--validator', dest='validator', default=False,
action='store_true',
help='Only run validator regression test')
parser.add_option('--asan', dest='asan', default=False,
action='store_true', help='Build trusted code with ASan.')
parser.add_option('--scons-args', dest='scons_args', default =[],
action='append', help='Extra scons arguments.')
parser.add_option('--step-suffix', metavar='SUFFIX', default='',
help='Append SUFFIX to buildbot step names.')
parser.add_option('--no-gyp', dest='no_gyp', default=False,
action='store_true', help='Do not run the gyp build')
parser.add_option('--no-goma', dest='no_goma', default=False,
action='store_true', help='Do not run with goma')
parser.add_option('--use-breakpad-tools', dest='use_breakpad_tools',
default=False, action='store_true',
help='Use breakpad tools for testing')
parser.add_option('--skip-build', dest='skip_build', default=False,
action='store_true',
help='Skip building steps in buildbot_pnacl')
parser.add_option('--skip-run', dest='skip_run', default=False,
action='store_true',
help='Skip test-running steps in buildbot_pnacl')
options, args = parser.parse_args()
if len(args) != 3:
parser.error('Expected 3 arguments: mode arch toolchain')
# script + 3 args == 4
mode, arch, toolchain = args
if mode not in ('dbg', 'opt', 'coverage'):
parser.error('Invalid mode %r' % mode)
if arch not in ARCH_MAP:
parser.error('Invalid arch %r' % arch)
if toolchain not in ('newlib', 'glibc', 'pnacl', 'nacl_clang'):
parser.error('Invalid toolchain %r' % toolchain)
# TODO(ncbray) allow a command-line override
platform = GetHostPlatform()
context['platform'] = platform
context['mode'] = mode
context['arch'] = arch
context['android'] = options.android
# ASan is Clang, so set the flag to simplify other checks.
context['clang'] = options.clang or options.asan
context['validator'] = options.validator
context['asan'] = options.asan
# TODO(ncbray) turn derived values into methods.
context['gyp_mode'] = {
'opt': 'Release',
'dbg': 'Debug',
'coverage': 'Debug'}[mode]
context['gn_is_debug'] = {
'opt': 'false',
'dbg': 'true',
'coverage': 'true'}[mode]
context['gyp_arch'] = ARCH_MAP[arch]['gyp_arch']
context['gyp_vars'] = []
if context['clang']:
context['gyp_vars'].append('clang=1')
if context['asan']:
context['gyp_vars'].append('asan=1')
context['default_scons_platform'] = ARCH_MAP[arch]['scons_platform']
context['default_scons_mode'] = ['nacl']
# Only Linux can build trusted code on ARM.
# TODO(mcgrathr): clean this up somehow
if arch != 'arm' or platform == 'linux':
context['default_scons_mode'] += [mode + '-host']
context['use_glibc'] = toolchain == 'glibc'
context['pnacl'] = toolchain == 'pnacl'
context['nacl_clang'] = toolchain == 'nacl_clang'
context['max_jobs'] = 8
context['dry_run'] = options.dry_run
context['inside_toolchain'] = options.inside_toolchain
context['step_suffix'] = options.step_suffix
context['no_gyp'] = options.no_gyp
context['no_goma'] = options.no_goma
context['coverage'] = options.coverage
context['use_breakpad_tools'] = options.use_breakpad_tools
context['scons_args'] = options.scons_args
context['skip_build'] = options.skip_build
context['skip_run'] = options.skip_run
# Don't run gyp on coverage builds.
if context['coverage']:
context['no_gyp'] = True
for key, value in sorted(context.config.items()):
print '%s=%s' % (key, value)
def EnsureDirectoryExists(path):
"""
Create a directory if it does not already exist.
Does not mask failures, but there really shouldn't be any.
"""
if not os.path.exists(path):
os.makedirs(path)
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
try:
RemovePath(path)
except Exception:
print 'Failed to remove %s' % path
else:
print 'Skipping %s' % path
def Retry(op, *args):
# Windows seems to be prone to having commands that delete files or
# directories fail. We currently do not have a complete understanding why,
# and as a workaround we simply retry the command a few times.
# It appears that file locks are hanging around longer than they should. This
# may be a secondary effect of processes hanging around longer than they
# should. This may be because when we kill a browser sel_ldr does not exit
# immediately, etc.
# Virus checkers can also accidently prevent files from being deleted, but
# that shouldn't be a problem on the bots.
if GetHostPlatform() == 'win':
count = 0
while True:
try:
op(*args)
break
except Exception:
print "FAILED: %s %s" % (op.__name__, repr(args))
count += 1
if count < 5:
print "RETRY: %s %s" % (op.__name__, repr(args))
time.sleep(pow(2, count))
else:
# Don't mask the exception.
raise
else:
op(*args)
def PermissionsFixOnError(func, path, exc_info):
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def _RemoveDirectory(path):
print 'Removing %s' % path
if os.path.exists(path):
shutil.rmtree(path, onerror=PermissionsFixOnError)
print ' Succeeded.'
else:
print ' Path does not exist, nothing to do.'
def RemoveDirectory(path):
"""
Remove a directory if it exists.
Does not mask failures, although it does retry a few times on Windows.
"""
Retry(_RemoveDirectory, path)
def RemovePath(path):
"""Remove a path, file or directory."""
if os.path.isdir(path):
RemoveDirectory(path)
else:
if os.path.isfile(path) and not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
os.remove(path)
# This is a sanity check so Command can print out better error information.
def FileCanBeFound(name, paths):
# CWD
if os.path.exists(name):
return True
# Paths with directories are not resolved using the PATH variable.
if os.path.dirname(name):
return False
# In path
for path in paths.split(os.pathsep):
full = os.path.join(path, name)
if os.path.exists(full):
return True
return False
def RemoveGypBuildDirectories():
# Remove all directories on all platforms. Overkill, but it allows for
# straight-line code.
# Windows
RemoveDirectory('build/Debug')
RemoveDirectory('build/Release')
RemoveDirectory('build/Debug-Win32')
RemoveDirectory('build/Release-Win32')
RemoveDirectory('build/Debug-x64')
RemoveDirectory('build/Release-x64')
# Linux and Mac
RemoveDirectory('../xcodebuild')
RemoveDirectory('../out')
RemoveDirectory('src/third_party/nacl_sdk/arm-newlib')
def RemoveSconsBuildDirectories():
RemoveDirectory('scons-out')
RemoveDirectory('breakpad-out')
# Execute a command using Python's subprocess module.
def Command(context, cmd, cwd=None):
print 'Running command: %s' % ' '.join(cmd)
# Python's subprocess has a quirk. A subprocess can execute with an
# arbitrary, user-defined environment. The first argument of the command,
# however, is located using the PATH variable of the Python script that is
# launching the subprocess. Modifying the PATH in the environment passed to
# the subprocess does not affect Python's search for the first argument of
# the command (the executable file.) This is a little counter intuitive,
# so we're forcing the search to use the same PATH variable as is seen by
# the subprocess.
env = context.MakeCommandEnv()
script_path = os.environ['PATH']
os.environ['PATH'] = env['PATH']
try:
if FileCanBeFound(cmd[0], env['PATH']) or context['dry_run']:
# Make sure that print statements before the subprocess call have been
# flushed, otherwise the output of the subprocess call may appear before
# the print statements.
sys.stdout.flush()
if context['dry_run']:
retcode = 0
else:
retcode = subprocess.call(cmd, cwd=cwd, env=env)
else:
# Provide a nicer failure message.
# If subprocess cannot find the executable, it will throw a cryptic
# exception.
print 'Executable %r cannot be found.' % cmd[0]
retcode = 1
finally:
os.environ['PATH'] = script_path
print 'Command return code: %d' % retcode
if retcode != 0:
raise StepFailed()
return retcode
# A specialized version of CommandStep.
def SCons(context, mode=None, platform=None, parallel=False, browser_test=False,
args=(), cwd=None):
python = sys.executable
if mode is None: mode = context['default_scons_mode']
if platform is None: platform = context['default_scons_platform']
if parallel:
jobs = context['max_jobs']
else:
jobs = 1
cmd = []
if browser_test and context.Linux():
# Although we could use the "browser_headless=1" Scons option, it runs
# xvfb-run once per Chromium invocation. This is good for isolating
# the tests, but xvfb-run has a stupid fixed-period sleep, which would
# slow down the tests unnecessarily.
cmd.extend(['xvfb-run', '--auto-servernum'])
cmd.extend([
python, 'scons.py',
'--verbose',
'-k',
'-j%d' % jobs,
'--mode='+','.join(mode),
'platform='+platform,
])
cmd.extend(context['scons_args'])
if context['clang']: cmd.append('--clang')
if context['asan']: cmd.append('--asan')
if context['use_glibc']: cmd.append('--nacl_glibc')
if context['pnacl']: cmd.append('bitcode=1')
if context['nacl_clang']: cmd.append('nacl_clang=1')
if context['use_breakpad_tools']:
cmd.append('breakpad_tools_dir=breakpad-out')
if context['android']:
cmd.append('android=1')
# Append used-specified arguments.
cmd.extend(args)
Command(context, cmd, cwd)
class StepFailed(Exception):
"""
Thrown when the step has failed.
"""
class StopBuild(Exception):
"""
Thrown when the entire build should stop. This does not indicate a failure,
in of itself.
"""
class Step(object):
"""
This class is used in conjunction with a Python "with" statement to ensure
that the preamble and postamble of each build step gets printed and failures
get logged. This class also ensures that exceptions thrown inside a "with"
statement don't take down the entire build.
"""
def __init__(self, name, status, halt_on_fail=True):
self.status = status
if 'step_suffix' in status.context:
suffix = status.context['step_suffix']
else:
suffix = ''
self.name = name + suffix
self.halt_on_fail = halt_on_fail
self.step_failed = False
# Called on entry to a 'with' block.
def __enter__(self):
sys.stdout.flush()
print
print '@@@BUILD_STEP %s@@@' % self.name
self.status.ReportBegin(self.name)
# The method is called on exit from a 'with' block - even for non-local
# control flow, i.e. exceptions, breaks, continues, returns, etc.
# If an exception is thrown inside a block wrapped with a 'with' statement,
# the __exit__ handler can suppress the exception by returning True. This is
# used to isolate each step in the build - if an exception occurs in a given
# step, the step is treated as a failure. This allows the postamble for each
# step to be printed and also allows the build to continue of the failure of
# a given step doesn't halt the build.
def __exit__(self, type, exception, trace):
sys.stdout.flush()
if exception is None:
# If exception is None, no exception occurred.
step_failed = False
elif isinstance(exception, StepFailed):
step_failed = True
print
print 'Halting build step because of failure.'
print
else:
step_failed = True
print
print 'The build step threw an exception...'
print
traceback.print_exception(type, exception, trace, file=sys.stdout)
print
if step_failed:
self.status.ReportFail(self.name)
print '@@@STEP_FAILURE@@@'
if self.halt_on_fail:
print
print 'Entire build halted because %s failed.' % self.name
sys.stdout.flush()
raise StopBuild()
else:
self.status.ReportPass(self.name)
sys.stdout.flush()
# Suppress any exception that occurred.
return True
# Adds an arbitrary link inside the build stage on the waterfall.
def StepLink(text, link):
print '@@@STEP_LINK@%s@%s@@@' % (text, link)
# Adds arbitrary text inside the build stage on the waterfall.
def StepText(text):
print '@@@STEP_TEXT@%s@@@' % (text)
class BuildStatus(object):
"""
Keeps track of the overall status of the build.
"""
def __init__(self, context):
self.context = context
self.ever_failed = False
self.steps = []
def ReportBegin(self, name):
pass
def ReportPass(self, name):
self.steps.append((name, 'passed'))
def ReportFail(self, name):
self.steps.append((name, 'failed'))
self.ever_failed = True
# Handy info when this script is run outside of the buildbot.
def DisplayBuildStatus(self):
print
for step, status in self.steps:
print '%-40s[%s]' % (step, status)
print
if self.ever_failed:
print 'Build failed.'
else:
print 'Build succeeded.'
def ReturnValue(self):
return int(self.ever_failed)
class BuildContext(object):
"""
Encapsulates the information needed for running a build command. This
includes environment variables and default arguments for SCons invocations.
"""
# Only allow these attributes on objects of this type.
__slots__ = ['status', 'global_env', 'config']
def __init__(self):
# The contents of global_env override os.environ for any commands run via
# self.Command(...)
self.global_env = {}
# PATH is a special case. See: Command.
self.global_env['PATH'] = os.environ.get('PATH', '')
self.config = {}
self['dry_run'] = False
# Emulate dictionary subscripting.
def __getitem__(self, key):
return self.config[key]
# Emulate dictionary subscripting.
def __setitem__(self, key, value):
self.config[key] = value
# Emulate dictionary membership test
def __contains__(self, key):
return key in self.config
def Windows(self):
return self.config['platform'] == 'win'
def Linux(self):
return self.config['platform'] == 'linux'
def Mac(self):
return self.config['platform'] == 'mac'
def GetEnv(self, name, default=None):
return self.global_env.get(name, default)
def SetEnv(self, name, value):
self.global_env[name] = str(value)
def MakeCommandEnv(self):
# The external environment is not sanitized.
e = dict(os.environ)
# Arbitrary variables can be overridden.
e.update(self.global_env)
return e
def RunBuild(script, status):
try:
script(status, status.context)
except StopBuild:
pass
# Emit a summary step for three reasons:
# - The annotator will attribute non-zero exit status to the last build step.
# This can misattribute failures to the last build step.
# - runtest.py wraps the builds to scrape perf data. It emits an annotator
# tag on exit which misattributes perf results to the last build step.
# - Provide a label step in which to show summary result.
# Otherwise these go back to the preamble.
with Step('summary', status):
if status.ever_failed:
print 'There were failed stages.'
else:
print 'Success.'
# Display a summary of the build.
status.DisplayBuildStatus()
sys.exit(status.ReturnValue())
| bsd-3-clause | 3,788,711,817,935,895,000 | 30.693314 | 80 | 0.654804 | false |
supriyantomaftuh/syzygy | third_party/numpy/files/numpy/polynomial/legendre.py | 16 | 33731 | """
Objects for dealing with Legendre series.
This module provides a number of objects (mostly functions) useful for
dealing with Legendre series, including a `Legendre` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `legdomain` -- Legendre series default domain, [-1,1].
- `legzero` -- Legendre series that evaluates identically to 0.
- `legone` -- Legendre series that evaluates identically to 1.
- `legx` -- Legendre series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `legmulx` -- multiply a Legendre series in ``P_i(x)`` by ``x``.
- `legadd` -- add two Legendre series.
- `legsub` -- subtract one Legendre series from another.
- `legmul` -- multiply two Legendre series.
- `legdiv` -- divide one Legendre series by another.
- `legpow` -- raise a Legendre series to an positive integer power
- `legval` -- evaluate a Legendre series at given points.
Calculus
--------
- `legder` -- differentiate a Legendre series.
- `legint` -- integrate a Legendre series.
Misc Functions
--------------
- `legfromroots` -- create a Legendre series with specified roots.
- `legroots` -- find the roots of a Legendre series.
- `legvander` -- Vandermonde-like matrix for Legendre polynomials.
- `legfit` -- least-squares fit returning a Legendre series.
- `legtrim` -- trim leading coefficients from a Legendre series.
- `legline` -- Legendre series representing given straight line.
- `leg2poly` -- convert a Legendre series to a polynomial.
- `poly2leg` -- convert a polynomial to a Legendre series.
Classes
-------
- `Legendre` -- A Legendre series class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division
__all__ = ['legzero', 'legone', 'legx', 'legdomain', 'legline',
'legadd', 'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow',
'legval', 'legder', 'legint', 'leg2poly', 'poly2leg',
'legfromroots', 'legvander', 'legfit', 'legtrim', 'legroots',
'Legendre']
import numpy as np
import numpy.linalg as la
import polyutils as pu
import warnings
from polytemplate import polytemplate
legtrim = pu.trimcoef
def poly2leg(pol) :
"""
Convert a polynomial to a Legendre series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Legendre series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-d array containing the polynomial coefficients
Returns
-------
cs : ndarray
1-d array containing the coefficients of the equivalent Legendre
series.
See Also
--------
leg2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(np.arange(4))
>>> p
Polynomial([ 0., 1., 2., 3.], [-1., 1.])
>>> c = P.Legendre(P.poly2leg(p.coef))
>>> c
Legendre([ 1. , 3.25, 1. , 0.75], [-1., 1.])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1) :
res = legadd(legmulx(res), pol[i])
return res
def leg2poly(cs) :
"""
Convert a Legendre series to a polynomial.
Convert an array representing the coefficients of a Legendre series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
cs : array_like
1-d array containing the Legendre series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-d array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2leg
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> c = P.Legendre(range(4))
>>> c
Legendre([ 0., 1., 2., 3.], [-1., 1.])
>>> p = c.convert(kind=P.Polynomial)
>>> p
Polynomial([-1. , -3.5, 3. , 7.5], [-1., 1.])
>>> P.leg2poly(range(4))
array([-1. , -3.5, 3. , 7.5])
"""
from polynomial import polyadd, polysub, polymulx
[cs] = pu.as_series([cs])
n = len(cs)
if n < 3:
return cs
else:
c0 = cs[-2]
c1 = cs[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1) :
tmp = c0
c0 = polysub(cs[i - 2], (c1*(i - 1))/i)
c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i)
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Legendre
legdomain = np.array([-1,1])
# Legendre coefficients representing zero.
legzero = np.array([0])
# Legendre coefficients representing one.
legone = np.array([1])
# Legendre coefficients representing the identity x.
legx = np.array([0,1])
def legline(off, scl) :
"""
Legendre series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Legendre series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> import numpy.polynomial.legendre as L
>>> L.legline(3,2)
array([3, 2])
>>> L.legval(-3, L.legline(3,2)) # should be -3
-3.0
"""
if scl != 0 :
return np.array([off,scl])
else :
return np.array([off])
def legfromroots(roots) :
"""
Generate a Legendre series with the given roots.
Return the array of coefficients for the P-series whose roots (a.k.a.
"zeros") are given by *roots*. The returned array of coefficients is
ordered from lowest order "term" to highest, and zeros of multiplicity
greater than one must be included in *roots* a number of times equal
to their multiplicity (e.g., if `2` is a root of multiplicity three,
then [2,2,2] must be in *roots*).
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-d array of the Legendre series coefficients, ordered from low to
high. If all roots are real, ``out.dtype`` is a float type;
otherwise, ``out.dtype`` is a complex type, even if all the
coefficients in the result are real (see Examples below).
See Also
--------
polyfromroots, chebfromroots
Notes
-----
What is returned are the :math:`c_i` such that:
.. math::
\\sum_{i=0}^{n} c_i*P_i(x) = \\prod_{i=0}^{n} (x - roots[i])
where ``n == len(roots)`` and :math:`P_i(x)` is the `i`-th Legendre
(basis) polynomial over the domain `[-1,1]`. Note that, unlike
`polyfromroots`, due to the nature of the Legendre basis set, the
above identity *does not* imply :math:`c_n = 1` identically (see
Examples).
Examples
--------
>>> import numpy.polynomial.legendre as L
>>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.4, 0. , 0.4])
>>> j = complex(0,1)
>>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j])
"""
if len(roots) == 0 :
return np.ones(1)
else :
[roots] = pu.as_series([roots], trim=False)
prd = np.array([1], dtype=roots.dtype)
for r in roots:
prd = legsub(legmulx(prd), r*prd)
return prd
def legadd(c1, c2):
"""
Add one Legendre series to another.
Returns the sum of two Legendre series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Legendre series of their sum.
See Also
--------
legsub, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Legendre series
is a Legendre series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legadd(c1,c2)
array([ 4., 4., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] += c2
ret = c1
else :
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def legsub(c1, c2):
"""
Subtract one Legendre series from another.
Returns the difference of two Legendre series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their difference.
See Also
--------
legadd, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Legendre
series is a Legendre series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legsub(c1,c2)
array([-2., 0., 2.])
>>> L.legsub(c2,c1) # -C.legsub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] -= c2
ret = c1
else :
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def legmulx(cs):
"""Multiply a Legendre series by x.
Multiply the Legendre series `cs` by x, where x is the independent
variable.
Parameters
----------
cs : array_like
1-d array of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Legendre
polynomials in the form
.. math::
xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1)
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
# The zero series needs special treatment
if len(cs) == 1 and cs[0] == 0:
return cs
prd = np.empty(len(cs) + 1, dtype=cs.dtype)
prd[0] = cs[0]*0
prd[1] = cs[0]
for i in range(1, len(cs)):
j = i + 1
k = i - 1
s = i + j
prd[j] = (cs[i]*j)/s
prd[k] += (cs[i]*i)/s
return prd
def legmul(c1, c2):
"""
Multiply one Legendre series by another.
Returns the product of two Legendre series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their product.
See Also
--------
legadd, legsub, legdiv, legpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Legendre polynomial basis set. Thus, to express
the product as a Legendre series, it is necessary to "re-project" the
product onto said basis set, which may produce "un-intuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2)
>>> P.legmul(c1,c2) # multiplication requires "reprojection"
array([ 4.33333333, 10.4 , 11.66666667, 3.6 ])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
cs = c2
xs = c1
else:
cs = c1
xs = c2
if len(cs) == 1:
c0 = cs[0]*xs
c1 = 0
elif len(cs) == 2:
c0 = cs[0]*xs
c1 = cs[1]*xs
else :
nd = len(cs)
c0 = cs[-2]*xs
c1 = cs[-1]*xs
for i in range(3, len(cs) + 1) :
tmp = c0
nd = nd - 1
c0 = legsub(cs[-i]*xs, (c1*(nd - 1))/nd)
c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd)
return legadd(c0, legmulx(c1))
def legdiv(c1, c2):
"""
Divide one Legendre series by another.
Returns the quotient-with-remainder of two Legendre series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
quo, rem : ndarrays
Of Legendre series coefficients representing the quotient and
remainder.
See Also
--------
legadd, legsub, legmul, legpow
Notes
-----
In general, the (polynomial) division of one Legendre series by another
results in quotient and remainder terms that are not in the Legendre
polynomial basis set. Thus, to express these results as a Legendre
series, it is necessary to "re-project" the results onto the Legendre
basis set, which may produce "un-intuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legdiv(c1,c2) # quotient "intuitive," remainder not
(array([ 3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> L.legdiv(c2,c1) # neither "intuitive"
(array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0 :
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2 :
return c1[:1]*0, c1
elif lc2 == 1 :
return c1/c2[-1], c1[:1]*0
else :
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = legmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def legpow(cs, pow, maxpower=16) :
"""Raise a Legendre series to a power.
Returns the Legendre series `cs` raised to the power `pow`. The
arguement `cs` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
cs : array_like
1d array of Legendre series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to umanageable size. Default is 16
Returns
-------
coef : ndarray
Legendre series of power.
See Also
--------
legadd, legsub, legmul, legdiv
Examples
--------
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
power = int(pow)
if power != pow or power < 0 :
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower :
raise ValueError("Power is too large")
elif power == 0 :
return np.array([1], dtype=cs.dtype)
elif power == 1 :
return cs
else :
# This can be made more efficient by using powers of two
# in the usual way.
prd = cs
for i in range(2, power + 1) :
prd = legmul(prd, cs)
return prd
def legder(cs, m=1, scl=1) :
"""
Differentiate a Legendre series.
Returns the series `cs` differentiated `m` times. At each iteration the
result is multiplied by `scl` (the scaling factor is for use in a linear
change of variable). The argument `cs` is the sequence of coefficients
from lowest order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
cs : array_like
1-D array of Legendre series coefficients ordered from low to high.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
Returns
-------
der : ndarray
Legendre series of the derivative.
See Also
--------
legint
Notes
-----
In general, the result of differentiating a Legendre series does not
resemble the same operation on a power series. Thus the result of this
function may be "un-intuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> cs = (1,2,3,4)
>>> L.legder(cs)
array([ 6., 9., 20.])
>>> L.legder(cs,3)
array([ 60.])
>>> L.legder(cs,scl=-1)
array([ -6., -9., -20.])
>>> L.legder(cs,2,-1)
array([ 9., 60.])
"""
cnt = int(m)
if cnt != m:
raise ValueError, "The order of derivation must be integer"
if cnt < 0 :
raise ValueError, "The order of derivation must be non-negative"
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if cnt == 0:
return cs
elif cnt >= len(cs):
return cs[:1]*0
else :
for i in range(cnt):
n = len(cs) - 1
cs *= scl
der = np.empty(n, dtype=cs.dtype)
for j in range(n, 0, -1):
der[j - 1] = (2*j - 1)*cs[j]
cs[j - 2] += cs[j]
cs = der
return cs
def legint(cs, m=1, k=[], lbnd=0, scl=1):
"""
Integrate a Legendre series.
Returns a Legendre series that is the Legendre series `cs`, integrated
`m` times from `lbnd` to `x`. At each iteration the resulting series
is **multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `cs` is a sequence of
coefficients, from lowest order Legendre series "term" to highest,
e.g., [1,2,3] represents the series :math:`P_0(x) + 2P_1(x) + 3P_2(x)`.
Parameters
----------
cs : array_like
1-d array of Legendre series coefficients, ordered from low to high.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
Returns
-------
S : ndarray
Legendre series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
legder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
:math:`dx = du/a`, so one will need to set `scl` equal to :math:`1/a`
- perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "re-projected" onto the C-series basis set. Thus, typically,
the result of this function is "un-intuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> cs = (1,2,3)
>>> L.legint(cs)
array([ 0.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(cs,3)
array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02,
-1.73472348e-18, 1.90476190e-02, 9.52380952e-03])
>>> L.legint(cs, k=3)
array([ 3.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(cs, lbnd=-2)
array([ 7.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(cs, scl=2)
array([ 0.66666667, 0.8 , 1.33333333, 1.2 ])
"""
cnt = int(m)
if np.isscalar(k) :
k = [k]
if cnt != m:
raise ValueError, "The order of integration must be integer"
if cnt < 0 :
raise ValueError, "The order of integration must be non-negative"
if len(k) > cnt :
raise ValueError, "Too many integration constants"
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if cnt == 0:
return cs
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt) :
n = len(cs)
cs *= scl
if n == 1 and cs[0] == 0:
cs[0] += k[i]
else:
tmp = np.empty(n + 1, dtype=cs.dtype)
tmp[0] = cs[0]*0
tmp[1] = cs[0]
for j in range(1, n):
t = cs[j]/(2*j + 1)
tmp[j + 1] = t
tmp[j - 1] -= t
tmp[0] += k[i] - legval(lbnd, tmp)
cs = tmp
return cs
def legval(x, cs):
"""Evaluate a Legendre series.
If `cs` is of length `n`, this function returns :
``p(x) = cs[0]*P_0(x) + cs[1]*P_1(x) + ... + cs[n-1]*P_{n-1}(x)``
If x is a sequence or array then p(x) will have the same shape as x.
If r is a ring_like object that supports multiplication and addition
by the values in `cs`, then an object of the same type is returned.
Parameters
----------
x : array_like, ring_like
Array of numbers or objects that support multiplication and
addition with themselves and with the elements of `cs`.
cs : array_like
1-d array of Legendre coefficients ordered from low to high.
Returns
-------
values : ndarray, ring_like
If the return is an ndarray then it has the same shape as `x`.
See Also
--------
legfit
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if isinstance(x, tuple) or isinstance(x, list) :
x = np.asarray(x)
if len(cs) == 1 :
c0 = cs[0]
c1 = 0
elif len(cs) == 2 :
c0 = cs[0]
c1 = cs[1]
else :
nd = len(cs)
c0 = cs[-2]
c1 = cs[-1]
for i in range(3, len(cs) + 1) :
tmp = c0
nd = nd - 1
c0 = cs[-i] - (c1*(nd - 1))/nd
c1 = tmp + (c1*x*(2*nd - 1))/nd
return c0 + c1*x
def legvander(x, deg) :
"""Vandermonde matrix of given degree.
Returns the Vandermonde matrix of degree `deg` and sample points `x`.
This isn't a true Vandermonde matrix because `x` can be an arbitrary
ndarray and the Legendre polynomials aren't powers. If ``V`` is the
returned matrix and `x` is a 2d array, then the elements of ``V`` are
``V[i,j,k] = P_k(x[i,j])``, where ``P_k`` is the Legendre polynomial
of degree ``k``.
Parameters
----------
x : array_like
Array of points. The values are converted to double or complex
doubles. If x is scalar it is converted to a 1D array.
deg : integer
Degree of the resulting matrix.
Returns
-------
vander : Vandermonde matrix.
The shape of the returned matrix is ``x.shape + (deg+1,)``. The last
index is the degree.
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
v = np.empty((ideg + 1,) + x.shape, dtype=x.dtype)
# Use forward recursion to generate the entries. This is not as accurate
# as reverse recursion in this application but it is more efficient.
v[0] = x*0 + 1
if ideg > 0 :
v[1] = x
for i in range(2, ideg + 1) :
v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i
return np.rollaxis(v, 0, v.ndim)
def legfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Legendre series to data.
Fit a Legendre series ``p(x) = p[0] * P_{0}(x) + ... + p[deg] *
P_{deg}(x)`` of degree `deg` to points `(x, y)`. Returns a vector of
coefficients `p` that minimises the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Legendre coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : present when `full` = True
Residuals of the least-squares fit, the effective rank of the
scaled Vandermonde matrix and its singular values, and the
specified value of `rcond`. For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
legval : Evaluates a Legendre series.
legvander : Vandermonde matrix of Legendre series.
polyfit : least squares fit using polynomials.
chebfit : least squares fit using Chebyshev series.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution are the coefficients ``c[i]`` of the Legendre series
``P(x)`` that minimizes the squared error
``E = \\sum_j |y_j - P(x_j)|^2``.
This problem is solved by setting up as the overdetermined matrix
equation
``V(x)*c = y``,
where ``V`` is the Vandermonde matrix of `x`, the elements of ``c`` are
the coefficients to be solved for, and the elements of `y` are the
observed values. This equation is then solved using the singular value
decomposition of ``V``.
If some of the singular values of ``V`` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coeficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Legendre series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError, "expected deg >= 0"
if x.ndim != 1:
raise TypeError, "expected 1D vector for x"
if x.size == 0:
raise TypeError, "expected non-empty vector for x"
if y.ndim < 1 or y.ndim > 2 :
raise TypeError, "expected 1D or 2D array for y"
if len(x) != len(y):
raise TypeError, "expected x and y to have same length"
# set up the least squares matrices
lhs = legvander(x, deg)
rhs = y
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError, "expected 1D vector for w"
if len(x) != len(w):
raise TypeError, "expected x and w to have same length"
# apply weights
if rhs.ndim == 2:
lhs *= w[:, np.newaxis]
rhs *= w[:, np.newaxis]
else:
lhs *= w[:, np.newaxis]
rhs *= w
# set rcond
if rcond is None :
rcond = len(x)*np.finfo(x.dtype).eps
# scale the design matrix and solve the least squares equation
scl = np.sqrt((lhs*lhs).sum(0))
c, resids, rank, s = la.lstsq(lhs/scl, rhs, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full :
return c, [resids, rank, s, rcond]
else :
return c
def legroots(cs):
"""
Compute the roots of a Legendre series.
Return the roots (a.k.a "zeros") of the Legendre series represented by
`cs`, which is the sequence of coefficients from lowest order "term"
to highest, e.g., [1,2,3] is the series ``L_0 + 2*L_1 + 3*L_2``.
Parameters
----------
cs : array_like
1-d array of Legendre series coefficients ordered from low to high.
Returns
-------
out : ndarray
Array of the roots. If all the roots are real, then so is the
dtype of ``out``; otherwise, ``out``'s dtype is complex.
See Also
--------
polyroots
chebroots
Notes
-----
Algorithm(s) used:
Remember: because the Legendre series basis set is different from the
"standard" basis set, the results of this function *may* not be what
one is expecting.
Examples
--------
>>> import numpy.polynomial as P
>>> P.polyroots((1, 2, 3, 4)) # 4x^3 + 3x^2 + 2x + 1 has two complex roots
array([-0.60582959+0.j , -0.07208521-0.63832674j,
-0.07208521+0.63832674j])
>>> P.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0 has only real roots
array([-0.85099543, -0.11407192, 0.51506735])
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if len(cs) <= 1 :
return np.array([], dtype=cs.dtype)
if len(cs) == 2 :
return np.array([-cs[0]/cs[1]])
n = len(cs) - 1
cs /= cs[-1]
cmat = np.zeros((n,n), dtype=cs.dtype)
cmat[1, 0] = 1
for i in range(1, n):
tmp = 2*i + 1
cmat[i - 1, i] = i/tmp
if i != n - 1:
cmat[i + 1, i] = (i + 1)/tmp
else:
cmat[:, i] -= cs[:-1]*(i + 1)/tmp
roots = la.eigvals(cmat)
roots.sort()
return roots
#
# Legendre series class
#
exec polytemplate.substitute(name='Legendre', nick='leg', domain='[-1,1]')
| apache-2.0 | 1,426,182,987,365,717,800 | 28.536778 | 80 | 0.581038 | false |
ghchinoy/tensorflow | tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch_test.py | 2 | 80753 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for GBDT train function."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.contrib import layers
from tensorflow.contrib import learn
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import tree_config_pb2
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.training.functions import gbdt_batch
from tensorflow.contrib.boosted_trees.python.utils import losses
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.python.feature_column import feature_column_lib as core_feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
def _squared_loss(label, unused_weights, predictions):
"""Unweighted loss implementation."""
loss = math_ops.reduce_sum(
math_ops.squared_difference(predictions, label), 1, keepdims=True)
return loss
def _append_to_leaf(leaf, c_id, w):
"""Helper method for building tree leaves.
Appends weight contributions for the given class index to a leaf node.
Args:
leaf: leaf node to append to.
c_id: class Id for the weight update.
w: weight contribution value.
"""
leaf.sparse_vector.index.append(c_id)
leaf.sparse_vector.value.append(w)
def _set_float_split(split, feat_col, thresh, l_id, r_id):
"""Helper method for building tree float splits.
Sets split feature column, threshold and children.
Args:
split: split node to update.
feat_col: feature column for the split.
thresh: threshold to split on forming rule x <= thresh.
l_id: left child Id.
r_id: right child Id.
"""
split.feature_column = feat_col
split.threshold = thresh
split.left_id = l_id
split.right_id = r_id
class GbdtTest(test_util.TensorFlowTestCase):
def setUp(self):
super(GbdtTest, self).setUp()
def testExtractFeatures(self):
"""Tests feature extraction."""
with self.cached_session():
features = {}
features["dense_float"] = array_ops.zeros([2, 1], dtypes.float32)
features["sparse_float"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros([2], dtypes.float32),
array_ops.zeros([2], dtypes.int64))
features["sparse_int"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros([2], dtypes.int64), array_ops.zeros([2],
dtypes.int64))
(fc_names, dense_floats, sparse_float_indices, sparse_float_values,
sparse_float_shapes, sparse_int_indices, sparse_int_values,
sparse_int_shapes) = (
gbdt_batch.extract_features(features, None, use_core_columns=False))
self.assertEqual(len(fc_names), 3)
self.assertAllEqual(fc_names,
["dense_float", "sparse_float", "sparse_int"])
self.assertEqual(len(dense_floats), 1)
self.assertEqual(len(sparse_float_indices), 1)
self.assertEqual(len(sparse_float_values), 1)
self.assertEqual(len(sparse_float_shapes), 1)
self.assertEqual(len(sparse_int_indices), 1)
self.assertEqual(len(sparse_int_values), 1)
self.assertEqual(len(sparse_int_shapes), 1)
self.assertAllEqual(dense_floats[0].eval(),
features["dense_float"].eval())
self.assertAllEqual(sparse_float_indices[0].eval(),
features["sparse_float"].indices.eval())
self.assertAllEqual(sparse_float_values[0].eval(),
features["sparse_float"].values.eval())
self.assertAllEqual(sparse_float_shapes[0].eval(),
features["sparse_float"].dense_shape.eval())
self.assertAllEqual(sparse_int_indices[0].eval(),
features["sparse_int"].indices.eval())
self.assertAllEqual(sparse_int_values[0].eval(),
features["sparse_int"].values.eval())
self.assertAllEqual(sparse_int_shapes[0].eval(),
features["sparse_int"].dense_shape.eval())
def testExtractFeaturesWithTransformation(self):
"""Tests feature extraction."""
with self.cached_session():
features = {}
features["dense_float"] = array_ops.zeros([2, 1], dtypes.float32)
features["sparse_float"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros([2], dtypes.float32),
array_ops.zeros([2], dtypes.int64))
features["sparse_categorical"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros([2], dtypes.string), array_ops.zeros([2],
dtypes.int64))
feature_columns = set()
feature_columns.add(layers.real_valued_column("dense_float"))
feature_columns.add(
layers.feature_column._real_valued_var_len_column(
"sparse_float", is_sparse=True))
feature_columns.add(
feature_column_lib.sparse_column_with_hash_bucket(
"sparse_categorical", hash_bucket_size=1000000))
(fc_names, dense_floats, sparse_float_indices, sparse_float_values,
sparse_float_shapes, sparse_int_indices, sparse_int_values,
sparse_int_shapes) = (
gbdt_batch.extract_features(
features, feature_columns, use_core_columns=False))
self.assertEqual(len(fc_names), 3)
self.assertAllEqual(fc_names,
["dense_float", "sparse_float", "sparse_categorical"])
self.assertEqual(len(dense_floats), 1)
self.assertEqual(len(sparse_float_indices), 1)
self.assertEqual(len(sparse_float_values), 1)
self.assertEqual(len(sparse_float_shapes), 1)
self.assertEqual(len(sparse_int_indices), 1)
self.assertEqual(len(sparse_int_values), 1)
self.assertEqual(len(sparse_int_shapes), 1)
self.assertAllEqual(dense_floats[0].eval(),
features["dense_float"].eval())
self.assertAllEqual(sparse_float_indices[0].eval(),
features["sparse_float"].indices.eval())
self.assertAllEqual(sparse_float_values[0].eval(),
features["sparse_float"].values.eval())
self.assertAllEqual(sparse_float_shapes[0].eval(),
features["sparse_float"].dense_shape.eval())
self.assertAllEqual(sparse_int_indices[0].eval(),
features["sparse_categorical"].indices.eval())
self.assertAllEqual(sparse_int_values[0].eval(), [397263, 397263])
self.assertAllEqual(sparse_int_shapes[0].eval(),
features["sparse_categorical"].dense_shape.eval())
def testExtractFeaturesFromCoreFeatureColumns(self):
"""Tests feature extraction when using core columns."""
with self.cached_session():
features = {}
# Sparse float column does not exist in core, so only dense numeric and
# categorical.
features["dense_float"] = array_ops.zeros([2, 1], dtypes.float32)
features["sparse_categorical"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros([2], dtypes.string), array_ops.zeros([2],
dtypes.int64))
feature_columns = set()
feature_columns.add(core_feature_column.numeric_column("dense_float"))
feature_columns.add(
core_feature_column.categorical_column_with_hash_bucket(
"sparse_categorical", hash_bucket_size=1000000))
(fc_names, dense_floats, _, _, _, sparse_int_indices, sparse_int_values,
sparse_int_shapes) = (
gbdt_batch.extract_features(
features, feature_columns, use_core_columns=True))
self.assertEqual(len(fc_names), 2)
self.assertAllEqual(fc_names, ["dense_float", "sparse_categorical"])
self.assertEqual(len(dense_floats), 1)
self.assertEqual(len(sparse_int_indices), 1)
self.assertEqual(len(sparse_int_values), 1)
self.assertEqual(len(sparse_int_shapes), 1)
self.assertAllEqual(dense_floats[0].eval(),
features["dense_float"].eval())
self.assertAllEqual(sparse_int_indices[0].eval(),
features["sparse_categorical"].indices.eval())
self.assertAllEqual(sparse_int_values[0].eval(), [397263, 397263])
self.assertAllEqual(sparse_int_shapes[0].eval(),
features["sparse_categorical"].dense_shape.eval())
def testTrainFnChiefNoBiasCentering(self):
"""Tests the train function running on chief without bias centering."""
with self.cached_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 12,
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [0.1])
self.assertEquals(stamp_token.eval(), 2)
expected_tree = """
nodes {
dense_float_binary_split {
threshold: 1.0
left_id: 1
right_id: 2
}
node_metadata {
gain: 0
}
}
nodes {
leaf {
vector {
value: 0.25
}
}
}
nodes {
leaf {
vector {
value: 0.0
}
}
}"""
self.assertProtoEquals(expected_tree, output.trees[0])
def testObliviousDecisionTreeAsWeakLearner(self):
with self.cached_session():
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.learning_rate_tuner.fixed.learning_rate = 1
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 2
learner_config.constraints.min_node_weight = 0
learner_config.weak_learner_type = (
learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE)
learner_config.pruning_mode = learner_pb2.LearnerConfig.PRE_PRUNE
learner_config.growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER
features = {}
features["dense_float"] = array_ops.constant([[-2], [-1], [1], [2]],
dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions_dict = gbdt_model.predict(learn.ModeKeys.TRAIN)
predictions = predictions_dict["predictions"]
labels = array_ops.constant([[-2], [-1], [1], [2]], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Second run.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [1])
self.assertEquals(stamp_token.eval(), 2)
expected_tree = """
nodes {
oblivious_dense_float_binary_split {
threshold: -1.0
}
node_metadata {
gain: 4.5
original_oblivious_leaves {
}
}
}
nodes {
leaf {
vector {
value: -1.5
}
}
}
nodes {
leaf {
vector {
value: 1.5
}
}
}"""
self.assertProtoEquals(expected_tree, output.trees[0])
# Third run.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [1])
self.assertEquals(stamp_token.eval(), 3)
expected_tree = """
nodes {
oblivious_dense_float_binary_split {
threshold: -1.0
}
node_metadata {
gain: 4.5
original_oblivious_leaves {
}
}
}
nodes {
oblivious_dense_float_binary_split {
threshold: -2.0
}
node_metadata {
gain: 0.25
original_oblivious_leaves {
vector {
value: -1.5
}
}
original_oblivious_leaves {
vector {
value: 1.5
}
}
}
}
nodes {
leaf {
vector {
value: -2.0
}
}
}
nodes {
leaf {
vector {
value: -1.0
}
}
}
nodes {
leaf {
vector {
value: 1.5
}
}
}
nodes {
leaf {
vector {
value: 1.5
}
}
}"""
self.assertProtoEquals(expected_tree, output.trees[0])
def testTrainFnChiefSparseAndDense(self):
"""Tests the train function with sparse and dense features."""
with self.cached_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
features["sparse_float"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros([2], dtypes.float32),
array_ops.constant([4, 1], dtypes.int64))
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 12,
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [0.1])
self.assertEquals(stamp_token.eval(), 2)
expected_tree = """
nodes {
sparse_float_binary_split_default_right {
split{
left_id: 1
right_id: 2
}
}
node_metadata {
gain: 1.125
}
}
nodes {
leaf {
vector {
value: 1.0
}
}
}
nodes {
leaf {
vector {
value: -0.5
}
}
}"""
self.assertProtoEquals(expected_tree, output.trees[0])
def testTrainFnChiefScalingNumberOfExamples(self):
"""Tests the train function running on chief without bias centering."""
with self.cached_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
num_examples_fn = (
lambda layer: math_ops.pow(math_ops.cast(2, dtypes.int64), layer) * 1)
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=num_examples_fn,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 12,
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [0.1])
self.assertEquals(stamp_token.eval(), 2)
expected_tree = """
nodes {
dense_float_binary_split {
threshold: 1.0
left_id: 1
right_id: 2
}
node_metadata {
gain: 0
}
}
nodes {
leaf {
vector {
value: 0.25
}
}
}
nodes {
leaf {
vector {
value: 0.0
}
}
}"""
self.assertProtoEquals(expected_tree, output.trees[0])
def testTrainFnChiefWithBiasCentering(self):
"""Tests the train function running on chief with bias centering."""
with self.cached_session():
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=True,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 12,
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect bias to be centered.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
expected_tree = """
nodes {
leaf {
vector {
value: 0.25
}
}
}"""
self.assertEquals(len(output.trees), 1)
self.assertAllEqual(output.tree_weights, [1.0])
self.assertProtoEquals(expected_tree, output.trees[0])
self.assertEquals(stamp_token.eval(), 1)
def testTrainFnNonChiefNoBiasCentering(self):
"""Tests the train function running on worker without bias centering."""
with self.cached_session():
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=False,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# Regardless of how many times the train op is run, a non-chief worker
# can only accumulate stats so the tree ensemble never changes.
for _ in range(5):
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 0)
def testTrainFnNonChiefWithCentering(self):
"""Tests the train function running on worker with bias centering."""
with self.cached_session():
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=False,
num_ps_replicas=0,
center_bias=True,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# Regardless of how many times the train op is run, a non-chief worker
# can only accumulate stats so the tree ensemble never changes.
for _ in range(5):
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 0)
def testPredictFn(self):
"""Tests the predict function."""
with self.cached_session() as sess:
# Create ensemble with one bias node.
ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge(
"""
trees {
nodes {
leaf {
vector {
value: 0.25
}
}
}
}
tree_weights: 1.0
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}""", ensemble_config)
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=3,
tree_ensemble_config=ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=False,
num_ps_replicas=0,
center_bias=True,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
# Create predict op.
mode = model_fn.ModeKeys.EVAL
predictions_dict = sess.run(gbdt_model.predict(mode))
self.assertEquals(predictions_dict["ensemble_stamp"], 3)
self.assertAllClose(predictions_dict["predictions"],
[[0.25], [0.25], [0.25], [0.25]])
self.assertAllClose(predictions_dict["partition_ids"], [0, 0, 0, 0])
def testPredictFnWithLeafIndexAdvancedLeft(self):
"""Tests the predict function with output leaf ids."""
with self.cached_session() as sess:
# Create ensemble with one bias node.
ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge(
"""
trees {
nodes {
dense_float_binary_split {
threshold: 1.0
left_id: 1
right_id: 2
}
node_metadata {
gain: 0
}
}
nodes {
leaf {
vector {
value: 0.25
}
}
}
nodes {
leaf {
vector {
value: 0.15
}
}
}
}
trees {
nodes {
dense_float_binary_split {
threshold: 0.99
left_id: 1
right_id: 2
}
node_metadata {
gain: 00
}
}
nodes {
leaf {
vector {
value: 0.25
}
}
}
nodes {
leaf {
vector {
value: 0.23
}
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}""", ensemble_config)
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=3,
tree_ensemble_config=ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.constant(
[[0.0], [1.0], [1.1], [2.0]], dtype=dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=False,
num_ps_replicas=0,
center_bias=True,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features,
output_leaf_index=True)
# Create predict op.
mode = model_fn.ModeKeys.INFER
predictions_dict = sess.run(gbdt_model.predict(mode))
self.assertEquals(predictions_dict["ensemble_stamp"], 3)
# here are how the numbers in expected results are calculated,
# 0.5 = 0.25 + 0.25
# 0.48 = 0.25 + 0.23
# 0.38 = 0.15 + 0.23
# 0.38 = 0.15 + 0.23
self.assertAllClose(predictions_dict["predictions"],
[[0.5], [0.48], [0.38], [0.38]])
self.assertAllClose(predictions_dict["partition_ids"], [0, 0, 0, 0])
self.assertAllClose(predictions_dict["leaf_index"],
[[1, 1], [1, 2], [2, 2], [2, 2]])
def testTrainFnMulticlassFullHessian(self):
"""Tests the GBDT train for multiclass full hessian."""
with self.cached_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 1
# Use full hessian multiclass strategy.
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.FULL_HESSIAN)
learner_config.num_classes = 5
learner_config.regularization.l1 = 0
# To make matrix inversible.
learner_config.regularization.l2 = 1e-5
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
batch_size = 3
features["dense_float"] = array_ops.constant(
[0.3, 1.5, 1.1], dtype=dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=5,
features=features)
predictions = array_ops.constant(
[[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.2]],
dtype=dtypes.float32)
labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
weights = array_ops.ones([batch_size, 1], dtypes.float32)
partition_ids = array_ops.zeros([batch_size], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 0,
}
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
losses.per_example_maxent_loss(
labels,
weights,
predictions,
num_classes=learner_config.num_classes)[0]),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 1)
# We got 3 nodes: one parent and 2 leafs.
self.assertEqual(len(output.trees[0].nodes), 3)
self.assertAllClose(output.tree_weights, [1])
self.assertEquals(stamp_token.eval(), 2)
# Leafs should have a dense vector of size 5.
expected_leaf_1 = [-3.4480, -3.4429, 13.8490, -3.45, -3.4508]
expected_leaf_2 = [-1.2547, -1.3145, 1.52, 2.3875, -1.3264]
self.assertArrayNear(expected_leaf_1,
output.trees[0].nodes[1].leaf.vector.value, 7e-3)
self.assertArrayNear(expected_leaf_2,
output.trees[0].nodes[2].leaf.vector.value, 7e-3)
def testTrainFnMulticlassDiagonalHessian(self):
"""Tests the GBDT train for multiclass diagonal hessian."""
with self.cached_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 1
# Use full hessian multiclass strategy.
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
learner_config.num_classes = 5
learner_config.regularization.l1 = 0
# To make matrix inversible.
learner_config.regularization.l2 = 1e-5
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
batch_size = 3
features = {}
features["dense_float"] = array_ops.constant(
[0.3, 1.5, 1.1], dtype=dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=5,
features=features)
predictions = array_ops.constant(
[[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.2]],
dtype=dtypes.float32)
labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
weights = array_ops.ones([batch_size, 1], dtypes.float32)
partition_ids = array_ops.zeros([batch_size], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 0,
}
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
losses.per_example_maxent_loss(
labels,
weights,
predictions,
num_classes=learner_config.num_classes)[0]),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 0)
self.assertEqual(len(output.tree_weights), 0)
self.assertEqual(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 1)
# We got 3 nodes: one parent and 2 leafs.
self.assertEqual(len(output.trees[0].nodes), 3)
self.assertAllClose(output.tree_weights, [1])
self.assertEqual(stamp_token.eval(), 2)
# Leafs should have a dense vector of size 5.
expected_leaf_1 = [-1.0354, -1.0107, 17.2976, -1.1313, -4.5023]
expected_leaf_2 = [-1.2924, -1.1376, 2.2042, 3.1052, -1.6269]
self.assertArrayNear(expected_leaf_1,
output.trees[0].nodes[1].leaf.vector.value, 1e-3)
self.assertArrayNear(expected_leaf_2,
output.trees[0].nodes[2].leaf.vector.value, 1e-3)
def testTrainFnMulticlassDiagonalHessianOblivious(self):
"""Tests the GBDT train for multiclass diagonal hessian."""
with self.cached_session():
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 1
# Use full hessian multiclass strategy.
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
learner_config.num_classes = 5
learner_config.regularization.l1 = 0
# To make matrix inversible.
learner_config.regularization.l2 = 1e-5
learner_config.weak_learner_type = (
learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE)
learner_config.pruning_mode = learner_pb2.LearnerConfig.PRE_PRUNE
learner_config.constraints.max_tree_depth = 5
learner_config.constraints.min_node_weight = 0
batch_size = 3
features = {}
features["sparse_int"] = sparse_tensor.SparseTensor(
array_ops.constant([[0, 0], [1, 0]], dtypes.int64),
array_ops.constant([1, 2], dtypes.int64),
array_ops.constant([3, 1], dtypes.int64))
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=5,
features=features)
labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
weights = array_ops.ones([batch_size, 1], dtypes.float32)
predictions_dict = gbdt_model.predict(learn.ModeKeys.TRAIN)
predictions = predictions_dict["predictions"]
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
losses.per_example_maxent_loss(
labels,
weights,
predictions,
num_classes=learner_config.num_classes)[0]),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
# Grow 2 layers.
train_op.run()
train_op.run()
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 1)
# We got 6 nodes: one parent and 4 leafs.
self.assertEqual(len(output.trees[0].nodes), 6)
self.assertAllClose(output.tree_weights, [1])
self.assertEqual(stamp_token.eval(), 2)
print(output.trees[0])
# Leafs should have a dense vector of size 5.
expected_leaf_1 = [-1.2497, -1.24976, 4.999, -1.24976, -1.2497]
expected_leaf_2 = [-2.2362, -2.2362, 6.0028, -2.2362, -2.2362]
expected_leaf_3 = [-2.2694, -2.2694, 4.0064, -0.0084, -2.2694]
expected_leaf_4 = [-2.2694, -2.2694, -0.0084, 4.0064, -2.2694]
self.assertArrayNear(expected_leaf_1,
output.trees[0].nodes[2].leaf.vector.value, 1e-3)
self.assertArrayNear(expected_leaf_2,
output.trees[0].nodes[3].leaf.vector.value, 1e-3)
self.assertArrayNear(expected_leaf_3,
output.trees[0].nodes[4].leaf.vector.value, 1e-3)
self.assertArrayNear(expected_leaf_4,
output.trees[0].nodes[5].leaf.vector.value, 1e-3)
def testTrainFnMulticlassTreePerClass(self):
"""Tests the GBDT train for multiclass tree per class strategy."""
with self.cached_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 1
# Use full hessian multiclass strategy.
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.TREE_PER_CLASS)
learner_config.num_classes = 5
learner_config.regularization.l1 = 0
# To make matrix inversible.
learner_config.regularization.l2 = 1e-5
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {
"dense_float":
array_ops.constant([[1.0], [1.5], [2.0]], dtypes.float32),
}
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=5,
features=features)
batch_size = 3
predictions = array_ops.constant(
[[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
[0.0, 0.0, 0.0, 2.0, 1.2]],
dtype=dtypes.float32)
labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
weights = array_ops.ones([batch_size, 1], dtypes.float32)
partition_ids = array_ops.zeros([batch_size], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
# This should result in a tree built for a class 2.
"num_trees": 13,
}
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
losses.per_example_maxent_loss(
labels,
weights,
predictions,
num_classes=learner_config.num_classes)[0]),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 0)
self.assertEqual(len(output.tree_weights), 0)
self.assertEqual(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [1])
self.assertEqual(stamp_token.eval(), 2)
# One node for a split, two children nodes.
self.assertEqual(3, len(output.trees[0].nodes))
# Leafs will have a sparse vector for class 3.
self.assertEqual(1,
len(output.trees[0].nodes[1].leaf.sparse_vector.index))
self.assertEqual(3, output.trees[0].nodes[1].leaf.sparse_vector.index[0])
self.assertAlmostEqual(
-1.13134455681, output.trees[0].nodes[1].leaf.sparse_vector.value[0])
self.assertEqual(1,
len(output.trees[0].nodes[2].leaf.sparse_vector.index))
self.assertEqual(3, output.trees[0].nodes[2].leaf.sparse_vector.index[0])
self.assertAllClose(
0.893284678459,
output.trees[0].nodes[2].leaf.sparse_vector.value[0],
atol=1e-4,
rtol=1e-4)
def testTrainFnChiefFeatureSelectionReachedLimitNoGoodSplit(self):
"""Tests the train function running on chief with feature selection."""
with self.cached_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.max_number_of_unique_feature_columns = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float_0"] = array_ops.ones([4, 1], dtypes.float32)
# Feature 1 is predictive but it won't be used because we have reached the
# limit of num_used_handlers >= max_number_of_unique_feature_columns
features["dense_float_1"] = array_ops.constant([0, 0, 1, 1],
dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions":
predictions,
"predictions_no_dropout":
predictions,
"partition_ids":
partition_ids,
"ensemble_stamp":
ensemble_stamp,
"num_trees":
12,
"num_used_handlers":
array_ops.constant(1, dtype=dtypes.int64),
"used_handlers_mask":
array_ops.constant([True, False], dtype=dtypes.bool),
}
labels = array_ops.constant([0, 0, 1, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [0.1])
self.assertEquals(stamp_token.eval(), 2)
expected_tree = """
nodes {
dense_float_binary_split {
feature_column: 0
threshold: 1.0
left_id: 1
right_id: 2
}
node_metadata {
gain: 0
}
}
nodes {
leaf {
vector {
value: -0.25
}
}
}
nodes {
leaf {
vector {
value: 0.0
}
}
}"""
self.assertProtoEquals(expected_tree, output.trees[0])
def testTrainFnChiefFeatureSelectionWithGoodSplits(self):
"""Tests the train function running on chief with feature selection."""
with self.cached_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.max_number_of_unique_feature_columns = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float_0"] = array_ops.ones([4, 1], dtypes.float32)
# Feature 1 is predictive and is in our selected features so it will be
# used even when we're at the limit.
features["dense_float_1"] = array_ops.constant([0, 0, 1, 1],
dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions":
predictions,
"predictions_no_dropout":
predictions,
"partition_ids":
partition_ids,
"ensemble_stamp":
ensemble_stamp,
"num_trees":
12,
"num_used_handlers":
array_ops.constant(1, dtype=dtypes.int64),
"used_handlers_mask":
array_ops.constant([False, True], dtype=dtypes.bool),
}
labels = array_ops.constant([0, 0, 1, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [0.1])
self.assertEquals(stamp_token.eval(), 2)
expected_tree = """
nodes {
dense_float_binary_split {
feature_column: 1
left_id: 1
right_id: 2
}
node_metadata {
gain: 0.5
}
}
nodes {
leaf {
vector {
value: 0.0
}
}
}
nodes {
leaf {
vector {
value: -0.5
}
}
}"""
self.assertProtoEquals(expected_tree, output.trees[0])
def testTrainFnChiefFeatureSelectionReachedLimitIncrementAttemptedLayer(self):
"""Tests the train function running on chief with feature selection."""
with self.cached_session() as sess:
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree = tree_ensemble_config.trees.add()
_set_float_split(
tree.nodes.add().sparse_float_binary_split_default_right.split, 2,
4.0, 1, 2)
_append_to_leaf(tree.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree.nodes.add().leaf, 1, 1.2)
tree_ensemble_config.tree_weights.append(1.0)
metadata = tree_ensemble_config.tree_metadata.add()
metadata.is_finalized = False
metadata.num_layers_grown = 1
tree_ensemble_config = tree_ensemble_config.SerializeToString()
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config,
name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.max_number_of_unique_feature_columns = 1
learner_config.constraints.min_node_weight = 0
features = {}
# Both features will be disabled since the feature selection limit is
# already reached.
features["dense_float_0"] = array_ops.ones([4, 1], dtypes.float32)
features["dense_float_1"] = array_ops.constant([0, 0, 1, 1],
dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.VariableV1(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions":
predictions,
"predictions_no_dropout":
predictions,
"partition_ids":
partition_ids,
"ensemble_stamp":
ensemble_stamp,
"num_trees":
12,
# We have somehow reached our limit 1. Both of the handlers will be
# disabled.
"num_used_handlers":
array_ops.constant(1, dtype=dtypes.int64),
"used_handlers_mask":
array_ops.constant([False, False], dtype=dtypes.bool),
}
labels = array_ops.constant([0, 0, 1, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertEquals(output.growing_metadata.num_layers_attempted, 1)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
# Make sure the trees are not modified, but the num_layers_attempted is
# incremented so that eventually the training stops.
self.assertEquals(len(output.trees), 1)
self.assertEquals(len(output.trees[0].nodes), 3)
self.assertEquals(output.growing_metadata.num_layers_attempted, 2)
def testResetModelBeforeAndAfterSplit(self):
"""Tests whether resetting works."""
with self.cached_session():
# First build a small tree and train it to verify training works.
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = model_ops.tree_ensemble_stamp_token(ensemble_handle)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 12,
"max_tree_depth": 4,
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
loss = math_ops.reduce_mean(_squared_loss(labels, weights, predictions))
# Create train op.
update_op, reset_op, training_state = gbdt_model.update_stats(
loss, predictions_dict)
with ops.control_dependencies(update_op):
train_op = gbdt_model.increment_step_counter_and_maybe_update_ensemble(
predictions_dict, training_state)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
original_stamp = ensemble_stamp.eval()
expected_tree = """
nodes {
dense_float_binary_split {
threshold: 1.0
left_id: 1
right_id: 2
}
node_metadata {
gain: 0
}
}
nodes {
leaf {
vector {
value: 0.25
}
}
}
nodes {
leaf {
vector {
value: 0.0
}
}
}"""
def _train_once_and_check(expect_split):
stamp = ensemble_stamp.eval()
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(stamp_token.eval(), stamp + 1)
if expect_split:
# State of the ensemble after a split occurs.
self.assertEquals(len(output.trees), 1)
self.assertProtoEquals(expected_tree, output.trees[0])
else:
# State of the ensemble after a single accumulation but before any
# splitting occurs
self.assertEquals(len(output.trees), 0)
self.assertProtoEquals("""
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}""", output)
def _run_reset():
stamp_before_reset = ensemble_stamp.eval()
reset_op.run()
stamp_after_reset = ensemble_stamp.eval()
self.assertNotEquals(stamp_after_reset, stamp_before_reset)
_, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertProtoEquals("", output)
return stamp_after_reset
# Exit after one train_op, so no new layer are created but the handlers
# contain enough information to split on the next call to train.
_train_once_and_check(expect_split=False)
self.assertEquals(ensemble_stamp.eval(), original_stamp + 1)
# Reset the handlers so it still requires two training calls to split.
stamp_after_reset = _run_reset()
_train_once_and_check(expect_split=False)
_train_once_and_check(expect_split=True)
self.assertEquals(ensemble_stamp.eval(), stamp_after_reset + 2)
# This time, test that the reset_op works right after splitting.
stamp_after_reset = _run_reset()
# Test that after resetting, the tree can be trained as normal.
_train_once_and_check(expect_split=False)
_train_once_and_check(expect_split=True)
self.assertEquals(ensemble_stamp.eval(), stamp_after_reset + 2)
def testResetModelNonChief(self):
"""Tests the reset function on a non-chief worker."""
with self.cached_session():
# Create ensemble with one bias node.
ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge(
"""
trees {
nodes {
leaf {
vector {
value: 0.25
}
}
}
}
tree_weights: 1.0
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: false
}""", ensemble_config)
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=ensemble_config.SerializeToString(),
name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=False,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = model_ops.tree_ensemble_stamp_token(ensemble_handle)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
loss = math_ops.reduce_mean(_squared_loss(labels, weights, predictions))
# Create reset op.
_, reset_op, _ = gbdt_model.update_stats(
loss, predictions_dict)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# Reset op doesn't do anything because this is a non-chief worker.
reset_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertEquals(len(output.tree_weights), 1)
self.assertEquals(stamp_token.eval(), 0)
def testResetModelWithCenterBias(self):
"""Tests the reset function running on chief with bias centering."""
with self.cached_session():
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=True,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1,
features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = model_ops.tree_ensemble_stamp_token(ensemble_handle)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 12,
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
loss = math_ops.reduce_mean(_squared_loss(labels, weights, predictions))
# Create train op.
update_op, reset_op, training_state = gbdt_model.update_stats(
loss, predictions_dict)
with ops.control_dependencies(update_op):
train_op = gbdt_model.increment_step_counter_and_maybe_update_ensemble(
predictions_dict, training_state)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect bias to be centered.
def train_and_check():
train_op.run()
_, serialized = model_ops.tree_ensemble_serialize(ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
expected_tree = """
nodes {
leaf {
vector {
value: 0.25
}
}
}"""
self.assertEquals(len(output.trees), 1)
self.assertAllEqual(output.tree_weights, [1.0])
self.assertProtoEquals(expected_tree, output.trees[0])
train_and_check()
self.assertEquals(ensemble_stamp.eval(), 1)
reset_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 2)
train_and_check()
self.assertEquals(ensemble_stamp.eval(), 3)
if __name__ == "__main__":
googletest.main()
| apache-2.0 | 6,053,921,406,122,958,000 | 37.307875 | 88 | 0.600027 | false |
PhilippeTillet/DSHF-ICA | python/examples/infomax_.py | 2 | 14364 | # Authors: Lukas Breuer <[email protected]>
# Juergen Dammers <[email protected]>
# Denis A. Engeman <[email protected]>
#
# License: BSD (3-clause)
import math
import logging
import numpy as np
logger = logging.getLogger('mne') # one selection here used across mne-python
logger.propagate = False # don't propagate (in case of multiple imports)
def random_permutation(n_samples, random_state=None):
"""Helper to emulate the randperm matlab function.
It returns a vector containing a random permutation of the
integers between 0 and n_samples-1. It returns the same random numbers
than randperm matlab function whenever the random_state is the same
as the matlab's random seed.
This function is useful for comparing against matlab scripts
which use the randperm function.
Note: the randperm(n_samples) matlab function generates a random
sequence between 1 and n_samples, whereas
random_permutation(n_samples, random_state) function generates
a random sequence between 0 and n_samples-1, that is:
randperm(n_samples) = random_permutation(n_samples, random_state) - 1
Parameters
----------
n_samples : int
End point of the sequence to be permuted (excluded, i.e., the end point
is equal to n_samples-1)
random_state : int | None
Random seed for initializing the pseudo-random number generator.
Returns
-------
randperm : ndarray, int
Randomly permuted sequence between 0 and n-1.
"""
rng = check_random_state(random_state)
idx = rng.rand(n_samples)
randperm = np.argsort(idx)
return randperm
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (int, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def infomax(raw_data, weights=None, l_rate=None, block=None, w_change=1e-12,
anneal_deg=60., anneal_step=0.9, extended=True, n_subgauss=0,
kurt_size=6000, ext_blocks=1, max_iter=200, random_state=None,
blowup=1e4, blowup_fac=0.5, n_small_angle=20, use_bias=False,
verbose=None):
"""Run (extended) Infomax ICA decomposition on raw data.
Parameters
----------
data : np.ndarray, shape (n_samples, n_features)
The whitened data to unmix.
weights : np.ndarray, shape (n_features, n_features)
The initialized unmixing matrix.
Defaults to None, which means the identity matrix is used.
l_rate : float
This quantity indicates the relative size of the change in weights.
.. note:: Smaller learning rates will slow down the ICA procedure.
Defaults to 0.01 / log(n_features ** 2).
block : int
The block size of randomly chosen data segments.
Defaults to floor(sqrt(n_times / 3.)).
w_change : float
The change at which to stop iteration. Defaults to 1e-12.
anneal_deg : float
The angle (in degrees) at which the learning rate will be reduced.
Defaults to 60.0.
anneal_step : float
The factor by which the learning rate will be reduced once
``anneal_deg`` is exceeded:
l_rate *= anneal_step
Defaults to 0.9.
extended : bool
Whether to use the extended Infomax algorithm or not.
Defaults to True.
n_subgauss : int
The number of subgaussian components. Only considered for extended
Infomax. Defaults to 1.
kurt_size : int
The window size for kurtosis estimation. Only considered for extended
Infomax. Defaults to 6000.
ext_blocks : int
Only considered for extended Infomax. If positive, denotes the number
of blocks after which to recompute the kurtosis, which is used to
estimate the signs of the sources. In this case, the number of
sub-gaussian sources is automatically determined.
If negative, the number of sub-gaussian sources to be used is fixed
and equal to n_subgauss. In this case, the kurtosis is not estimated.
Defaults to 1.
max_iter : int
The maximum number of iterations. Defaults to 200.
random_state : int | np.random.RandomState
If random_state is an int, use random_state to seed the random number
generator. If random_state is already a np.random.RandomState instance,
use random_state as random number generator.
blowup : float
The maximum difference allowed between two successive estimations of
the unmixing matrix. Defaults to 10000.
blowup_fac : float
The factor by which the learning rate will be reduced if the difference
between two successive estimations of the unmixing matrix exceededs
``blowup``:
l_rate *= blowup_fac
Defaults to 0.5.
n_small_angle : int | None
The maximum number of allowed steps in which the angle between two
successive estimations of the unmixing matrix is less than
``anneal_deg``. If None, this parameter is not taken into account to
stop the iterations.
Defaults to 20.
use_bias : bool
This quantity indicates if the bias should be computed.
Defaults to True.
verbose : bool, str, int, or None
If not None, override default verbosity level (see mne.verbose).
Returns
-------
unmixing_matrix : np.ndarray, shape (n_features, n_features)
The linear unmixing operator.
References
----------
[1] A. J. Bell, T. J. Sejnowski. An information-maximization approach to
blind separation and blind deconvolution. Neural Computation, 7(6),
1129-1159, 1995.
[2] T. W. Lee, M. Girolami, T. J. Sejnowski. Independent component analysis
using an extended infomax algorithm for mixed subgaussian and
supergaussian sources. Neural Computation, 11(2), 417-441, 1999.
"""
from scipy.stats import kurtosis
from scipy.linalg import sqrtm
rng = check_random_state(random_state)
data = raw_data - np.mean(raw_data, 0, keepdims=True)
sphere = 2*np.linalg.inv(sqrtm(np.cov(data.T)))
data = np.dot(data, sphere)
#print data[0,0], data[0,1], data[1,0]
# define some default parameters
max_weight = 1e8
restart_fac = 0.9
min_l_rate = 1e-10
degconst = 180.0 / np.pi
# for extended Infomax
extmomentum = 0.5
signsbias = 0.02
signcount_threshold = 25
signcount_step = 2
# check data shape
n_samples, n_features = data.shape
n_features_square = n_features ** 2
# check input parameters
# heuristic default - may need adjustment for large or tiny data sets
if l_rate is None:
l_rate = 0.01 / math.log(n_features ** 2.0)
if block is None:
block = int(math.floor(math.sqrt(n_samples / 3.0)))
logger.info('computing%sInfomax ICA' % ' Extended ' if extended else ' ')
# collect parameters
nblock = n_samples // block
lastt = (nblock - 1) * block + 1
# initialize training
if weights is None:
weights = np.identity(n_features, dtype=np.float64)
BI = block * np.identity(n_features, dtype=np.float64)
bias = np.zeros((n_features, 1), dtype=np.float64)
onesrow = np.ones((1, block), dtype=np.float64)
startweights = weights.copy()
oldweights = startweights.copy()
step = 0
count_small_angle = 0
wts_blowup = False
blockno = 0
signcount = 0
initial_ext_blocks = ext_blocks # save the initial value in case of reset
# for extended Infomax
if extended:
signs = np.ones(n_features)
for k in range(n_subgauss):
signs[k] = -1
kurt_size = min(kurt_size, n_samples)
old_kurt = np.zeros(n_features, dtype=np.float64)
oldsigns = np.zeros(n_features)
# trainings loop
olddelta, oldchange = 1., 0.
while step < max_iter:
Z = np.dot(data, weights)
# shuffle data at each step
permute = random_permutation(n_samples, rng)
# ICA training block
# loop across block samples
for t in range(0, lastt, block):
u = np.dot(data[permute[t:t + block], :], weights)
u += np.dot(bias, onesrow).T
if extended:
# extended ICA update
y = np.tanh(u)
weights += l_rate * np.dot(weights,
BI -
signs[None, :] * np.dot(u.T, y) -
np.dot(u.T, u))
if use_bias:
bias += l_rate * np.reshape(np.sum(y, axis=0,
dtype=np.float64) * -2.0,
(n_features, 1))
else:
# logistic ICA weights update
y = 1.0 / (1.0 + np.exp(-u))
weights += l_rate * np.dot(weights,
BI + np.dot(u.T, (1.0 - 2.0 * y)))
if use_bias:
bias += l_rate * np.reshape(np.sum((1.0 - 2.0 * y), axis=0,
dtype=np.float64),
(n_features, 1))
# check change limit
max_weight_val = np.max(np.abs(weights))
if max_weight_val > max_weight:
wts_blowup = True
blockno += 1
if wts_blowup:
break
# ICA kurtosis estimation
if extended:
if ext_blocks > 0 and blockno % ext_blocks == 0:
if kurt_size < n_samples:
rp = np.floor(rng.uniform(0, 1, kurt_size) *
(n_samples - 1))
tpartact = np.dot(data[rp.astype(int), :], weights).T
else:
tpartact = np.dot(data, weights).T
# estimate kurtosis
kurt = kurtosis(tpartact, axis=1, fisher=True)
if extmomentum != 0:
kurt = (extmomentum * old_kurt +
(1.0 - extmomentum) * kurt)
old_kurt = kurt
# estimate weighted signs
signs = np.sign(kurt + signsbias)
ndiff = (signs - oldsigns != 0).sum()
if ndiff == 0:
signcount += 1
else:
signcount = 0
oldsigns = signs
if signcount >= signcount_threshold:
ext_blocks = np.fix(ext_blocks * signcount_step)
signcount = 0
# here we continue after the for loop over the ICA training blocks
# if weights in bounds:
if not wts_blowup:
oldwtchange = weights - oldweights
step += 1
angledelta = 0.0
delta = oldwtchange.reshape(1, n_features_square)
change = np.sum(delta * delta, dtype=np.float64)
if step > 2:
angledelta = math.acos(np.sum(delta * olddelta) /
math.sqrt(change * oldchange))
angledelta *= degconst
if verbose:
logger.info(
'step %d - lrate %5f, wchange %8.8f, angledelta %4.1f deg'
% (step, l_rate, change, angledelta))
# anneal learning rate
oldweights = weights.copy()
if angledelta > anneal_deg:
l_rate *= anneal_step # anneal learning rate
# accumulate angledelta until anneal_deg reaches l_rate
olddelta = delta
oldchange = change
count_small_angle = 0 # reset count when angledelta is large
else:
if step == 1: # on first step only
olddelta = delta # initialize
oldchange = change
if n_small_angle is not None:
count_small_angle += 1
if count_small_angle > n_small_angle:
max_iter = step
# apply stopping rule
if step > 2 and change < w_change:
step = max_iter
elif change > blowup:
l_rate *= blowup_fac
# restart if weights blow up (for lowering l_rate)
else:
step = 0 # start again
wts_blowup = 0 # re-initialize variables
blockno = 1
l_rate *= restart_fac # with lower learning rate
weights = startweights.copy()
oldweights = startweights.copy()
olddelta = np.zeros((1, n_features_square), dtype=np.float64)
bias = np.zeros((n_features, 1), dtype=np.float64)
ext_blocks = initial_ext_blocks
# for extended Infomax
if extended:
signs = np.ones(n_features)
for k in range(n_subgauss):
signs[k] = -1
oldsigns = np.zeros(n_features)
if l_rate > min_l_rate:
if verbose:
logger.info('... lowering learning rate to %g'
'\n... re-starting...' % l_rate)
else:
raise ValueError('Error in Infomax ICA: unmixing_matrix matrix'
'might not be invertible!')
if verbose:
cost = -(np.linalg.slogdet(weights)[1] - np.sum(np.mean(2*np.log(1 + np.exp(Z)) - Z, 0)))
print 'step {}: cost = {:.4f}'.format(step, cost)
# prepare return values
return np.dot(weights.T, sphere)
| mit | -9,035,385,017,390,951,000 | 37.716981 | 101 | 0.563492 | false |
otype/myip | setup.py | 1 | 8799 | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import imp
import subprocess
## Python 2.6 subprocess.check_output compatibility. Thanks Greg Hewgill!
if 'check_output' not in dir(subprocess):
def check_output(cmd_args, *args, **kwargs):
proc = subprocess.Popen(
cmd_args, *args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
out, err = proc.communicate()
if proc.returncode != 0:
raise subprocess.CalledProcessError(args)
return out
subprocess.check_output = check_output
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
try:
import colorama
colorama.init() # Initialize colorama on Windows
except ImportError:
# Don't require colorama just for running paver tasks. This allows us to
# run `paver install' without requiring the user to first have colorama
# installed.
pass
# Add the current directory to the module search path.
sys.path.append('.')
## Constants
CODE_DIRECTORY = 'myip'
DOCS_DIRECTORY = 'docs'
TESTS_DIRECTORY = 'tests'
PYTEST_FLAGS = ['--doctest-modules']
# Import metadata. Normally this would just be:
#
# from myip import metadata
#
# However, when we do this, we also import `myip/__init__.py'. If this
# imports names from some other modules and these modules have third-party
# dependencies that need installing (which happens after this file is run), the
# script will crash. What we do instead is to load the metadata module by path
# instead, effectively side-stepping the dependency problem. Please make sure
# metadata has no dependencies, otherwise they will need to be added to
# the setup_requires keyword.
metadata = imp.load_source(
'metadata', os.path.join(CODE_DIRECTORY, 'metadata.py'))
## Miscellaneous helper functions
def get_project_files():
"""Retrieve a list of project files, ignoring hidden files.
:return: sorted list of project files
:rtype: :class:`list`
"""
if is_git_project():
return get_git_project_files()
project_files = []
for top, subdirs, files in os.walk('.'):
for subdir in subdirs:
if subdir.startswith('.'):
subdirs.remove(subdir)
for f in files:
if f.startswith('.'):
continue
project_files.append(os.path.join(top, f))
return project_files
def is_git_project():
return os.path.isdir('.git')
def get_git_project_files():
"""Retrieve a list of all non-ignored files, including untracked files,
excluding deleted files.
:return: sorted list of git project files
:rtype: :class:`list`
"""
cached_and_untracked_files = git_ls_files(
'--cached', # All files cached in the index
'--others', # Untracked files
# Exclude untracked files that would be excluded by .gitignore, etc.
'--exclude-standard')
uncommitted_deleted_files = git_ls_files('--deleted')
# Since sorting of files in a set is arbitrary, return a sorted list to
# provide a well-defined order to tools like flake8, etc.
return sorted(cached_and_untracked_files - uncommitted_deleted_files)
def git_ls_files(*cmd_args):
"""Run ``git ls-files`` in the top-level project directory. Arguments go
directly to execution call.
:return: set of file names
:rtype: :class:`set`
"""
cmd = ['git', 'ls-files']
cmd.extend(cmd_args)
return set(subprocess.check_output(cmd).splitlines())
def print_success_message(message):
"""Print a message indicating success in green color to STDOUT.
:param message: the message to print
:type message: :class:`str`
"""
try:
import colorama
print(colorama.Fore.GREEN + message + colorama.Fore.RESET)
except ImportError:
print(message)
def print_failure_message(message):
"""Print a message indicating failure in red color to STDERR.
:param message: the message to print
:type message: :class:`str`
"""
try:
import colorama
print(colorama.Fore.RED + message + colorama.Fore.RESET,
file=sys.stderr)
except ImportError:
print(message, file=sys.stderr)
def read(filename):
"""Return the contents of a file.
:param filename: file path
:type filename: :class:`str`
:return: the file's content
:rtype: :class:`str`
"""
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
def _lint():
"""Run lint and return an exit code."""
# Flake8 doesn't have an easy way to run checks using a Python function, so
# just fork off another process to do it.
# Python 3 compat:
# - The result of subprocess call outputs are byte strings, meaning we need
# to pass a byte string to endswith.
project_python_files = [filename for filename in get_project_files()
if filename.endswith(b'.py')]
retcode = subprocess.call(
['flake8', '--max-complexity=10'] + project_python_files)
if retcode == 0:
print_success_message('No style errors')
return retcode
def _test():
"""Run the unit tests.
:return: exit code
"""
# Make sure to import pytest in this function. For the reason, see here:
# <http://pytest.org/latest/goodpractises.html#integration-with-setuptools-test-commands> # NOPEP8
import pytest
# This runs the unit tests.
# It also runs doctest, but only on the modules in TESTS_DIRECTORY.
return pytest.main(PYTEST_FLAGS + [TESTS_DIRECTORY])
def _test_all():
"""Run lint and tests.
:return: exit code
"""
return _lint() + _test()
# The following code is to allow tests to be run with `python setup.py test'.
# The main reason to make this possible is to allow tests to be run as part of
# Setuptools' automatic run of 2to3 on the source code. The recommended way to
# run tests is still `paver test_all'.
# See <http://pythonhosted.org/setuptools/python3.html>
# Code based on <http://pytest.org/latest/goodpractises.html#integration-with-setuptools-test-commands> # NOPEP8
class TestAllCommand(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
# These are fake, and just set to appease distutils and setuptools.
self.test_suite = True
self.test_args = []
def run_tests(self):
raise SystemExit(_test_all())
# define install_requires for specific Python versions
python_version_specific_requires = []
# as of Python >= 2.7 and >= 3.2, the argparse module is maintained within
# the Python standard library, otherwise we install it as a separate package
if sys.version_info < (2, 7) or (3, 0) <= sys.version_info < (3, 3):
python_version_specific_requires.append('argparse')
# See here for more options:
# <http://pythonhosted.org/setuptools/setuptools.html>
setup_dict = dict(
name=metadata.package,
version=metadata.version,
author=metadata.authors[0],
author_email=metadata.emails[0],
maintainer=metadata.authors[0],
maintainer_email=metadata.emails[0],
url=metadata.url,
description=metadata.description,
long_description=read('README.rst'),
# Find a list of classifiers here:
# <http://pypi.python.org/pypi?%3Aaction=list_classifiers>
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Documentation',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Software Distribution',
],
packages=find_packages(exclude=(TESTS_DIRECTORY,)),
install_requires=[
# your module dependencies
] + python_version_specific_requires,
# Allow tests to be run with `python setup.py test'.
tests_require=[
'pytest==2.5.1',
'mock==1.0.1',
'flake8==2.1.0',
],
cmdclass={'test': TestAllCommand},
zip_safe=False, # don't use eggs
entry_points={
'console_scripts': [
'myip_cli = myip.main:entry_point'
],
# if you have a gui, use this
# 'gui_scripts': [
# 'myip_gui = myip.gui:entry_point'
# ]
}
)
def main():
setup(**setup_dict)
if __name__ == '__main__':
main()
| mit | -8,029,327,746,469,997,000 | 30.537634 | 113 | 0.652233 | false |
pyokagan/gyp | buildbot/buildbot_run.py | 10 | 8342 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Argument-less script to select what to run on the buildbots."""
import filecmp
import os
import shutil
import subprocess
import sys
if sys.platform in ['win32', 'cygwin']:
EXE_SUFFIX = '.exe'
else:
EXE_SUFFIX = ''
BUILDBOT_DIR = os.path.dirname(os.path.abspath(__file__))
TRUNK_DIR = os.path.dirname(BUILDBOT_DIR)
ROOT_DIR = os.path.dirname(TRUNK_DIR)
ANDROID_DIR = os.path.join(ROOT_DIR, 'android')
CMAKE_DIR = os.path.join(ROOT_DIR, 'cmake')
CMAKE_BIN_DIR = os.path.join(CMAKE_DIR, 'bin')
OUT_DIR = os.path.join(TRUNK_DIR, 'out')
def CallSubProcess(*args, **kwargs):
"""Wrapper around subprocess.call which treats errors as build exceptions."""
with open(os.devnull) as devnull_fd:
retcode = subprocess.call(stdin=devnull_fd, *args, **kwargs)
if retcode != 0:
print '@@@STEP_EXCEPTION@@@'
sys.exit(1)
def PrepareCmake():
"""Build CMake 2.8.8 since the version in Precise is 2.8.7."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber CMake checkout@@@'
shutil.rmtree(CMAKE_DIR)
# We always build CMake 2.8.8, so no need to do anything
# if the directory already exists.
if os.path.isdir(CMAKE_DIR):
return
print '@@@BUILD_STEP Initialize CMake checkout@@@'
os.mkdir(CMAKE_DIR)
print '@@@BUILD_STEP Sync CMake@@@'
CallSubProcess(
['git', 'clone',
'--depth', '1',
'--single-branch',
'--branch', 'v2.8.8',
'--',
'git://cmake.org/cmake.git',
CMAKE_DIR],
cwd=CMAKE_DIR)
print '@@@BUILD_STEP Build CMake@@@'
CallSubProcess(
['/bin/bash', 'bootstrap', '--prefix=%s' % CMAKE_DIR],
cwd=CMAKE_DIR)
CallSubProcess( ['make', 'cmake'], cwd=CMAKE_DIR)
_ANDROID_SETUP = 'source build/envsetup.sh && lunch full-eng'
def PrepareAndroidTree():
"""Prepare an Android tree to run 'android' format tests."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber Android checkout@@@'
shutil.rmtree(ANDROID_DIR)
# (Re)create the directory so that the following steps will succeed.
if not os.path.isdir(ANDROID_DIR):
os.mkdir(ANDROID_DIR)
# We use a manifest from the gyp project listing pinned revisions of AOSP to
# use, to ensure that we test against a stable target. This needs to be
# updated to pick up new build system changes sometimes, so we must test if
# it has changed.
manifest_filename = 'aosp_manifest.xml'
gyp_manifest = os.path.join(BUILDBOT_DIR, manifest_filename)
android_manifest = os.path.join(ANDROID_DIR, '.repo', 'manifests',
manifest_filename)
manifest_is_current = (os.path.isfile(android_manifest) and
filecmp.cmp(gyp_manifest, android_manifest))
if not manifest_is_current:
# It's safe to repeat these steps, so just do them again to make sure we are
# in a good state.
print '@@@BUILD_STEP Initialize Android checkout@@@'
CallSubProcess(
['repo', 'init',
'-u', 'https://android.googlesource.com/platform/manifest',
'-b', 'master',
'-g', 'all,-notdefault,-device,-darwin,-mips,-x86'],
cwd=ANDROID_DIR)
shutil.copy(gyp_manifest, android_manifest)
print '@@@BUILD_STEP Sync Android@@@'
CallSubProcess(['repo', 'sync', '-j4', '-m', manifest_filename],
cwd=ANDROID_DIR)
# If we already built the system image successfully and didn't sync to a new
# version of the source, skip running the build again as it's expensive even
# when there's nothing to do.
system_img = os.path.join(ANDROID_DIR, 'out', 'target', 'product', 'generic',
'system.img')
if manifest_is_current and os.path.isfile(system_img):
return
print '@@@BUILD_STEP Build Android@@@'
CallSubProcess(
['/bin/bash',
'-c', '%s && make -j4' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
def StartAndroidEmulator():
"""Start an android emulator from the built android tree."""
print '@@@BUILD_STEP Start Android emulator@@@'
CallSubProcess(['/bin/bash', '-c',
'%s && adb kill-server ' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
# If taskset is available, use it to force adbd to run only on one core, as,
# sadly, it improves its reliability (see crbug.com/268450).
adbd_wrapper = ''
with open(os.devnull, 'w') as devnull_fd:
if subprocess.call(['which', 'taskset'], stdout=devnull_fd) == 0:
adbd_wrapper = 'taskset -c 0'
CallSubProcess(['/bin/bash', '-c',
'%s && %s adb start-server ' % (_ANDROID_SETUP, adbd_wrapper)],
cwd=ANDROID_DIR)
subprocess.Popen(
['/bin/bash', '-c',
'%s && emulator -no-window' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
CallSubProcess(
['/bin/bash', '-c',
'%s && adb wait-for-device' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
def StopAndroidEmulator():
"""Stop all android emulators."""
print '@@@BUILD_STEP Stop Android emulator@@@'
# If this fails, it's because there is no emulator running.
subprocess.call(['pkill', 'emulator.*'])
def GypTestFormat(title, format=None, msvs_version=None, tests=[]):
"""Run the gyp tests for a given format, emitting annotator tags.
See annotator docs at:
https://sites.google.com/a/chromium.org/dev/developers/testing/chromium-build-infrastructure/buildbot-annotations
Args:
format: gyp format to test.
Returns:
0 for sucesss, 1 for failure.
"""
if not format:
format = title
print '@@@BUILD_STEP ' + title + '@@@'
sys.stdout.flush()
env = os.environ.copy()
if msvs_version:
env['GYP_MSVS_VERSION'] = msvs_version
command = ' '.join(
[sys.executable, 'trunk/gyptest.py',
'--all',
'--passed',
'--format', format,
'--path', CMAKE_BIN_DIR,
'--chdir', 'trunk'] + tests)
if format == 'android':
# gyptest needs the environment setup from envsetup/lunch in order to build
# using the 'android' backend, so this is done in a single shell.
retcode = subprocess.call(
['/bin/bash',
'-c', '%s && cd %s && %s' % (_ANDROID_SETUP, ROOT_DIR, command)],
cwd=ANDROID_DIR, env=env)
else:
retcode = subprocess.call(command, cwd=ROOT_DIR, env=env, shell=True)
if retcode:
# Emit failure tag, and keep going.
print '@@@STEP_FAILURE@@@'
return 1
return 0
def GypBuild():
# Dump out/ directory.
print '@@@BUILD_STEP cleanup@@@'
print 'Removing %s...' % OUT_DIR
shutil.rmtree(OUT_DIR, ignore_errors=True)
print 'Done.'
retcode = 0
# The Android gyp bot runs on linux so this must be tested first.
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-android':
PrepareAndroidTree()
StartAndroidEmulator()
try:
retcode += GypTestFormat('android')
finally:
StopAndroidEmulator()
elif sys.platform.startswith('linux'):
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('make')
PrepareCmake()
retcode += GypTestFormat('cmake')
elif sys.platform == 'darwin':
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('xcode')
retcode += GypTestFormat('make')
elif sys.platform == 'win32':
retcode += GypTestFormat('ninja')
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-win64':
retcode += GypTestFormat('msvs-ninja-2013', format='msvs-ninja',
msvs_version='2013',
tests=[
r'test\generator-output\gyptest-actions.py',
r'test\generator-output\gyptest-relocate.py',
r'test\generator-output\gyptest-rules.py'])
retcode += GypTestFormat('msvs-2013', format='msvs', msvs_version='2013')
else:
raise Exception('Unknown platform')
if retcode:
# TODO(bradnelson): once the annotator supports a postscript (section for
# after the build proper that could be used for cumulative failures),
# use that instead of this. This isolates the final return value so
# that it isn't misattributed to the last stage.
print '@@@BUILD_STEP failures@@@'
sys.exit(retcode)
if __name__ == '__main__':
GypBuild()
| bsd-3-clause | 8,599,390,430,592,484,000 | 32.23506 | 117 | 0.630904 | false |
Dioptas/Dioptas | dioptas/model/MaskModel.py | 1 | 11647 | # -*- coding: utf-8 -*-
# Dioptas - GUI program for fast processing of 2D X-ray diffraction data
# Principal author: Clemens Prescher ([email protected])
# Copyright (C) 2014-2019 GSECARS, University of Chicago, USA
# Copyright (C) 2015-2018 Institute for Geology and Mineralogy, University of Cologne, Germany
# Copyright (C) 2019-2020 DESY, Hamburg, Germany
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import deque
import numpy as np
import skimage.draw
from PIL import Image
from qtpy import QtCore
from math import sqrt, atan2, cos, sin
from .util.cosmics import cosmicsimage
class MaskModel(object):
def __init__(self, mask_dimension=(2048, 2048)):
self.mask_dimension = mask_dimension
self.reset_dimension()
self.filename = ''
self.mode = True
self.roi = None
self._mask_data = np.zeros(self.mask_dimension, dtype=bool)
self._undo_deque = deque(maxlen=50)
self._redo_deque = deque(maxlen=50)
def set_dimension(self, mask_dimension):
if not np.array_equal(mask_dimension, self.mask_dimension):
self.mask_dimension = mask_dimension
self.reset_dimension()
def reset_dimension(self):
if self.mask_dimension is not None:
self._mask_data = np.zeros(self.mask_dimension, dtype=bool)
self._undo_deque = deque(maxlen=50)
self._redo_deque = deque(maxlen=50)
@property
def roi_mask(self):
if self.roi is not None:
roi_mask = np.ones(self.mask_dimension)
x1, x2, y1, y2 = self.roi
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
roi_mask[int(x1):int(x2), int(y1):int(y2)] = 0
return roi_mask
else:
return None
def get_mask(self):
if self.roi is None:
return self._mask_data
elif self.roi is not None:
return np.logical_or(self._mask_data, self.roi_mask)
def get_img(self):
return self._mask_data
def update_deque(self):
"""
Saves the current mask data into a deque, which can be popped later
to provide an undo/redo feature.
When performing a new action the old redo steps will be cleared..._
"""
self._undo_deque.append(np.copy(self._mask_data))
self._redo_deque.clear()
def undo(self):
try:
old_data = self._undo_deque.pop()
self._redo_deque.append(np.copy(self._mask_data))
self._mask_data = old_data
except IndexError:
pass
def redo(self):
try:
new_data = self._redo_deque.pop()
self._undo_deque.append(np.copy(self._mask_data))
self._mask_data = new_data
except IndexError:
pass
def mask_below_threshold(self, img_data, threshold):
self.update_deque()
self._mask_data += (img_data < threshold)
def mask_above_threshold(self, img_data, threshold):
self.update_deque()
self._mask_data += (img_data > threshold)
def mask_QGraphicsRectItem(self, QGraphicsRectItem):
rect = QGraphicsRectItem.rect()
self.mask_rect(rect.top(), rect.left(), rect.height(), rect.width())
def mask_QGraphicsPolygonItem(self, QGraphicsPolygonItem):
"""
Masks a polygon given by a QGraphicsPolygonItem from the QtWidgets Library.
Uses the sklimage.draw.polygon function.
"""
# get polygon points
poly_list = list(QGraphicsPolygonItem.vertices)
x = np.zeros(len(poly_list))
y = np.zeros(len(poly_list))
for i, point in enumerate(poly_list):
x[i] = point.x()
y[i] = point.y()
self.mask_polygon(x, y)
def mask_QGraphicsEllipseItem(self, QGraphicsEllipseItem):
"""
Masks an Ellipse given by a QGraphicsEllipseItem from the QtWidgets
Library. Uses the skimage.draw.ellipse function.
"""
bounding_rect = QGraphicsEllipseItem.rect()
cx = bounding_rect.center().x()
cy = bounding_rect.center().y()
x_radius = bounding_rect.width() * 0.5
y_radius = bounding_rect.height() * 0.5
self.mask_ellipse(int(cx), int(cy), int(x_radius), int(y_radius))
def mask_rect(self, x, y, width, height):
"""
Masks a rectangle. x and y parameters are the upper left corner
of the rectangle.
"""
self.update_deque()
if width > 0:
x_ind1 = np.round(x)
x_ind2 = np.round(x + width)
else:
x_ind1 = np.round(x + width)
x_ind2 = np.round(x)
if height > 0:
y_ind1 = np.round(y)
y_ind2 = np.round(y + height)
else:
y_ind1 = np.round(y + height)
y_ind2 = np.round(y)
if x_ind1 < 0:
x_ind1 = 0
if y_ind1 < 0:
y_ind1 = 0
x_ind1, x_ind2, y_ind1, y_ind2 = int(x_ind1), int(x_ind2), int(y_ind1), int(y_ind2)
self._mask_data[x_ind1:x_ind2, y_ind1:y_ind2] = self.mode
def mask_polygon(self, x, y):
"""
Masks the a polygon with given vertices. x and y are lists of
the polygon vertices. Uses the draw.polygon implementation of
the skimage library.
"""
self.update_deque()
rr, cc = skimage.draw.polygon(y, x, self._mask_data.shape)
self._mask_data[rr, cc] = self.mode
def mask_ellipse(self, cx, cy, x_radius, y_radius):
"""
Masks an ellipse with center coordinates (cx, cy) and the radii
given. Uses the draw.ellipse implementation of
the skimage library.
"""
self.update_deque()
rr, cc = skimage.draw.ellipse(
cy, cx, y_radius, x_radius, shape=self._mask_data.shape)
self._mask_data[rr, cc] = self.mode
def grow(self):
self.update_deque()
self._mask_data[1:, :] = np.logical_or(self._mask_data[1:, :], self._mask_data[:-1, :])
self._mask_data[:-1, :] = np.logical_or(self._mask_data[:-1, :], self._mask_data[1:, :])
self._mask_data[:, 1:] = np.logical_or(self._mask_data[:, 1:], self._mask_data[:, :-1])
self._mask_data[:, :-1] = np.logical_or(self._mask_data[:, :-1], self._mask_data[:, 1:])
def shrink(self):
self.update_deque()
self._mask_data[1:, :] = np.logical_and(self._mask_data[1:, :], self._mask_data[:-1, :])
self._mask_data[:-1, :] = np.logical_and(self._mask_data[:-1, :], self._mask_data[1:, :])
self._mask_data[:, 1:] = np.logical_and(self._mask_data[:, 1:], self._mask_data[:, :-1])
self._mask_data[:, :-1] = np.logical_and(self._mask_data[:, :-1], self._mask_data[:, 1:])
def invert_mask(self):
self.update_deque()
self._mask_data = np.logical_not(self._mask_data)
def clear_mask(self):
self.update_deque()
self._mask_data[:, :] = False
def remove_cosmic(self, img):
self.update_deque()
test = cosmicsimage(img, sigclip=3.0, objlim=3.0)
num = 2
for i in range(num):
test.lacosmiciteration(True)
test.clean()
self._mask_data = np.logical_or(self._mask_data, np.array(test.mask, dtype='bool'))
def set_mode(self, mode):
"""
sets the mode to unmask or mask which equals mode = False or True
"""
self.mode = mode
def set_mask(self, mask_data):
self.update_deque()
self._mask_data = mask_data
def save_mask(self, filename):
im_array = np.int8(self.get_img())
im = Image.fromarray(im_array)
try:
im.save(filename, "tiff", compression="tiff_deflate")
except OSError:
try:
im.save(filename, "tiff", compression="tiff_adobe_deflate")
except IOError:
im.save(filename, "tiff")
self.filename = filename
def load_mask(self, filename):
try:
data = np.array(Image.open(filename))
except IOError:
data = np.loadtxt(filename)
if self.mask_dimension == data.shape:
self.filename = filename
self.mask_dimension = data.shape
self.reset_dimension()
self.set_mask(data)
return True
return False
def add_mask(self, filename):
try:
data = np.array(Image.open(filename))
except IOError:
data = np.loadtxt(filename)
if self.get_mask().shape == data.shape:
self._add_mask(data)
return True
return False
def _add_mask(self, mask_data):
self.update_deque()
self._mask_data = np.logical_or(self._mask_data,
np.array(mask_data, dtype='bool'))
def find_center_of_circle_from_three_points(self, a, b, c):
xa, ya = a.x(), a.y()
xb, yb = b.x(), b.y()
xc, yc = c.x(), c.y()
# if (xa == xb and ya == yb) or (xa == xc and ya == yc) or (xb == xc and yb == yc):
# return None
mid_ab_x = (xa + xb) / 2.0
mid_ab_y = (ya + yb) / 2.0
mid_bc_x = (xb + xc) / 2.0
mid_bc_y = (yb + yc) / 2.0
slope_ab = (yb - ya) / (xb - xa)
slope_bc = (yc - yb) / (xc - xb)
slope_p_ab = -1.0 / slope_ab
slope_p_bc = -1.0 / slope_bc
b_p_ab = mid_ab_y - slope_p_ab * mid_ab_x
b_p_bc = mid_bc_y - slope_p_bc * mid_bc_x
x0 = (b_p_bc - b_p_ab) / (slope_p_ab - slope_p_bc)
y0 = slope_p_ab * x0 + b_p_ab
self.center_for_arc = QtCore.QPointF(x0, y0)
return self.center_for_arc
@staticmethod
def find_radius_of_circle_from_center_and_point(p0, a):
r = sqrt((a.x() - p0.x()) ** 2 + (a.y() - p0.y()) ** 2)
return r
def find_n_angles_on_arc_from_three_points_around_p0(self, p0, pa, pb, pc, n):
phi_a = self.calc_angle_from_center_and_point(p0, pa)
phi_b = self.calc_angle_from_center_and_point(p0, pb)
phi_c = self.calc_angle_from_center_and_point(p0, pc)
if phi_c < phi_a < phi_b or phi_b < phi_c < phi_a:
phi_range = np.linspace(phi_a, phi_c + 2 * np.pi, n)
elif phi_a < phi_b < phi_c or phi_c < phi_b < phi_a:
phi_range = np.linspace(phi_a, phi_c, n)
elif phi_a < phi_c < phi_b or phi_b < phi_a < phi_c:
phi_range = np.linspace(phi_a + 2 * np.pi, phi_c, n)
else:
return None
return phi_range
@staticmethod
def calc_angle_from_center_and_point(p0, pa):
phi = atan2(pa.y() - p0.y(), pa.x() - p0.x())
return phi
@staticmethod
def calc_arc_points_from_angles(p0, r, width, phi_range):
p = []
for phi in phi_range:
xn = p0.x() + (r - width) * cos(phi)
yn = p0.y() + (r - width) * sin(phi)
p.append(QtCore.QPointF(xn, yn))
return p
| gpl-3.0 | -1,486,751,670,052,096,800 | 34.401216 | 97 | 0.560488 | false |
craigcitro/apitools | apitools/base/py/compression_test.py | 8 | 5319 | #!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for compression."""
from apitools.base.py import compression
from apitools.base.py import gzip
import six
import unittest2
class CompressionTest(unittest2.TestCase):
def setUp(self):
# Sample highly compressible data (~50MB).
self.sample_data = b'abc' * 16777216
# Stream of the sample data.
self.stream = six.BytesIO()
self.stream.write(self.sample_data)
self.length = self.stream.tell()
self.stream.seek(0)
def testCompressionExhausted(self):
"""Test full compression.
Test that highly compressible data is actually compressed in entirety.
"""
output, read, exhausted = compression.CompressStream(
self.stream,
self.length,
9)
# Ensure the compressed buffer is smaller than the input buffer.
self.assertLess(output.length, self.length)
# Ensure we read the entire input stream.
self.assertEqual(read, self.length)
# Ensure the input stream was exhausted.
self.assertTrue(exhausted)
def testCompressionUnbounded(self):
"""Test unbounded compression.
Test that the input stream is exhausted when length is none.
"""
output, read, exhausted = compression.CompressStream(
self.stream,
None,
9)
# Ensure the compressed buffer is smaller than the input buffer.
self.assertLess(output.length, self.length)
# Ensure we read the entire input stream.
self.assertEqual(read, self.length)
# Ensure the input stream was exhausted.
self.assertTrue(exhausted)
def testCompressionPartial(self):
"""Test partial compression.
Test that the length parameter works correctly. The amount of data
that's compressed can be greater than or equal to the requested length.
"""
output_length = 40
output, _, exhausted = compression.CompressStream(
self.stream,
output_length,
9)
# Ensure the requested read size is <= the compressed buffer size.
self.assertLessEqual(output_length, output.length)
# Ensure the input stream was not exhausted.
self.assertFalse(exhausted)
def testCompressionIntegrity(self):
"""Test that compressed data can be decompressed."""
output, read, exhausted = compression.CompressStream(
self.stream,
self.length,
9)
# Ensure uncompressed data matches the sample data.
with gzip.GzipFile(fileobj=output) as f:
original = f.read()
self.assertEqual(original, self.sample_data)
# Ensure we read the entire input stream.
self.assertEqual(read, self.length)
# Ensure the input stream was exhausted.
self.assertTrue(exhausted)
class StreamingBufferTest(unittest2.TestCase):
def setUp(self):
self.stream = compression.StreamingBuffer()
def testSimpleStream(self):
"""Test simple stream operations.
Test that the stream can be written to and read from. Also test that
reading from the stream consumes the bytes.
"""
# Ensure the stream is empty.
self.assertEqual(self.stream.length, 0)
# Ensure data is correctly written.
self.stream.write(b'Sample data')
self.assertEqual(self.stream.length, 11)
# Ensure data can be read and the read data is purged from the stream.
data = self.stream.read(11)
self.assertEqual(data, b'Sample data')
self.assertEqual(self.stream.length, 0)
def testPartialReads(self):
"""Test partial stream reads.
Test that the stream can be read in chunks while perserving the
consumption mechanics.
"""
self.stream.write(b'Sample data')
# Ensure data can be read and the read data is purged from the stream.
data = self.stream.read(6)
self.assertEqual(data, b'Sample')
self.assertEqual(self.stream.length, 5)
# Ensure the remaining data can be read.
data = self.stream.read(5)
self.assertEqual(data, b' data')
self.assertEqual(self.stream.length, 0)
def testTooShort(self):
"""Test excessive stream reads.
Test that more data can be requested from the stream than available
without raising an exception.
"""
self.stream.write(b'Sample')
# Ensure requesting more data than available does not raise an
# exception.
data = self.stream.read(100)
self.assertEqual(data, b'Sample')
self.assertEqual(self.stream.length, 0)
| apache-2.0 | 8,986,278,953,602,595,000 | 34.697987 | 79 | 0.650498 | false |
seslattery/django-sample-app | bin/fabfile.py | 1 | 3028 | from fabric.api import cd, run, env, local, sudo, require
from fabric.operations import _prefix_commands, _prefix_env_vars
from lib.fabric_helpers import *
import os
import string
env.hosts = ['djtut2.example.com']
env.code_dir = '/srv/www/djtut2'
env.virtualenv = '/srv/www/djtut2/.virtualenv'
env.code_repo = '[email protected]:user/djtut2.git'
env.django_settings_module = 'djtut2.settings'
def run_tests():
""" Runs the Django test suite as is. """
local("./manage.py test")
def deploy_static():
with cd(env.code_dir):
run('./manage.py collectstatic -v0 --noinput')
def uname():
""" Prints information about the host. """
run("uname -a")
def push():
""" Push new code and pull on all hosts """
local('git push origin master')
with cd(env.code_dir):
run('git pull origin master')
def update_requirements():
""" Update requirements in the virtualenv. """
run("%s/bin/pip install -r %s/requirements/prod.txt" % (env.virtualenv, env.code_dir))
def migrate(app=None):
"""
Run the migrate task
Usage: fab migrate:app_name
"""
if app:
run("source %s/bin/activate; django-admin.py migrate %s --settings=%s" % (env.virtualenv, app, env.django_settings_module))
else:
run("source %s/bin/activate; django-admin.py migrate --settings=%s" % (env.virtualenv, env.django_settings_module))
def version():
""" Show last commit to the deployed repo. """
with cd(env.code_dir):
run('git log -1')
def restart():
""" Restart the wsgi process """
with cd(env.code_dir):
run("touch %s/djtut2/wsgi.py" % env.code_dir)
def ve_run(cmd):
"""
Helper function.
Runs a command using the virtualenv environment
"""
require('root')
return sshagent_run('source %s/bin/activate; %s' % (env.virtualenv, cmd))
def sshagent_run(cmd):
"""
Helper function.
Runs a command with SSH agent forwarding enabled.
Note:: Fabric (and paramiko) can't forward your SSH agent.
This helper uses your system's ssh to do so.
"""
# Handle context manager modifications
wrapped_cmd = _prefix_commands(_prefix_env_vars(cmd), 'remote')
try:
host, port = env.host_string.split(':')
return local(
"ssh -p %s -A %s@%s '%s'" % (port, env.user, host, wrapped_cmd)
)
except ValueError:
return local(
"ssh -A %s@%s '%s'" % (env.user, env.host_string, wrapped_cmd)
)
def deploy():
""" Update the remote deployment, update the virtualenv, perform any
pending migrations, then restart the wsgi process """
push()
update_requirements()
migrate()
restart()
def clone():
""" Clone the repository for the first time """
with cd(env.code_dir):
run('git clone %s .' % (env.code_repo))
def bootstrap():
""" Bootstrap the initial deploy environment, then deploy """
run('mkdir %s' % (env.code_dir))
run('virtualenv %s' % (env.virtualenv))
clone()
deploy()
| bsd-3-clause | -6,400,100,869,345,203,000 | 25.330435 | 131 | 0.624174 | false |
Ademan/NumPy-GSoC | numpy/core/tests/test_umath.py | 3 | 38130 | import sys
from numpy.testing import *
import numpy.core.umath as ncu
import numpy as np
class TestDivision(TestCase):
def test_division_int(self):
# int division should follow Python
x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])
if 5 / 10 == 0.5:
assert_equal(x / 100, [0.05, 0.1, 0.9, 1,
-0.05, -0.1, -0.9, -1, -1.2])
else:
assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80])
def test_division_complex(self):
# check that implementation is correct
msg = "Complex division implementation check"
x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128)
assert_almost_equal(x**2/x, x, err_msg=msg)
# check overflow, underflow
msg = "Complex division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = x**2/x
assert_almost_equal(y/x, [1, 1], err_msg=msg)
def test_floor_division_complex(self):
# check that implementation is correct
msg = "Complex floor division implementation check"
x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128)
y = np.array([0., -1., 0., 0.], dtype=np.complex128)
assert_equal(np.floor_divide(x**2,x), y, err_msg=msg)
# check overflow, underflow
msg = "Complex floor division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = np.floor_divide(x**2, x)
assert_equal(y, [1.e+110, 0], err_msg=msg)
class TestPower(TestCase):
def test_power_float(self):
x = np.array([1., 2., 3.])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_equal(x**2, [1., 4., 9.])
y = x.copy()
y **= 2
assert_equal(y, [1., 4., 9.])
assert_almost_equal(x**(-1), [1., 0.5, 1./3])
assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)])
def test_power_complex(self):
x = np.array([1+2j, 2+3j, 3+4j])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j])
assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3])
assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4])
assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)])
assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2])
assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197,
(-117-44j)/15625])
assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j),
ncu.sqrt(3+4j)])
norm = 1./((x**14)[0])
assert_almost_equal(x**14 * norm,
[i * norm for i in [-76443+16124j, 23161315+58317492j,
5583548873 + 2465133864j]])
# Ticket #836
def assert_complex_equal(x, y):
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
for z in [complex(0, np.inf), complex(1, np.inf)]:
err = np.seterr(invalid="ignore")
z = np.array([z], dtype=np.complex_)
try:
assert_complex_equal(z**1, z)
assert_complex_equal(z**2, z*z)
assert_complex_equal(z**3, z*z*z)
finally:
np.seterr(**err)
class TestLog2(TestCase):
def test_log2_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.log2(xf), yf)
class TestExp2(TestCase):
def test_exp2_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.exp2(yf), xf)
class TestLogAddExp2(object):
# Need test for intermediate precisions
def test_logaddexp2_values(self) :
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec in zip(['f','d','g'],[6, 15, 15]) :
xf = np.log2(np.array(x, dtype=dt))
yf = np.log2(np.array(y, dtype=dt))
zf = np.log2(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec)
def test_logaddexp2_range(self) :
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp2(logxf, logyf), logzf)
def test_inf(self) :
err = np.seterr(invalid='ignore')
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
try:
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp2(logxf, logyf), logzf)
finally:
np.seterr(**err)
def test_nan(self):
assert np.isnan(np.logaddexp2(np.nan, np.inf))
assert np.isnan(np.logaddexp2(np.inf, np.nan))
assert np.isnan(np.logaddexp2(np.nan, 0))
assert np.isnan(np.logaddexp2(0, np.nan))
assert np.isnan(np.logaddexp2(np.nan, np.nan))
class TestLog(TestCase):
def test_log_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.log(xf), yf)
class TestExp(TestCase):
def test_exp_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.exp(yf), xf)
class TestLogAddExp(object):
def test_logaddexp_values(self) :
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec in zip(['f','d','g'],[6, 15, 15]) :
xf = np.log(np.array(x, dtype=dt))
yf = np.log(np.array(y, dtype=dt))
zf = np.log(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec)
def test_logaddexp_range(self) :
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
def test_inf(self) :
err = np.seterr(invalid='ignore')
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
try:
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp(logxf, logyf), logzf)
finally:
np.seterr(**err)
def test_nan(self):
assert np.isnan(np.logaddexp(np.nan, np.inf))
assert np.isnan(np.logaddexp(np.inf, np.nan))
assert np.isnan(np.logaddexp(np.nan, 0))
assert np.isnan(np.logaddexp(0, np.nan))
assert np.isnan(np.logaddexp(np.nan, np.nan))
class TestLog1p(TestCase):
def test_log1p(self):
assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2))
assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6))
class TestExpm1(TestCase):
def test_expm1(self):
assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1)
assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1)
class TestHypot(TestCase, object):
def test_simple(self):
assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2))
assert_almost_equal(ncu.hypot(0, 0), 0)
def assert_hypot_isnan(x, y):
err = np.seterr(invalid='ignore')
try:
assert np.isnan(ncu.hypot(x, y)), "hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y))
finally:
np.seterr(**err)
def assert_hypot_isinf(x, y):
err = np.seterr(invalid='ignore')
try:
assert np.isinf(ncu.hypot(x, y)), "hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y))
finally:
np.seterr(**err)
class TestHypotSpecialValues(TestCase):
def test_nan_outputs(self):
assert_hypot_isnan(np.nan, np.nan)
assert_hypot_isnan(np.nan, 1)
def test_nan_outputs(self):
assert_hypot_isinf(np.nan, np.inf)
assert_hypot_isinf(np.inf, np.nan)
assert_hypot_isinf(np.inf, 0)
assert_hypot_isinf(0, np.inf)
def assert_arctan2_isnan(x, y):
assert np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y))
def assert_arctan2_ispinf(x, y):
assert (np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y))
def assert_arctan2_isninf(x, y):
assert (np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y))
def assert_arctan2_ispzero(x, y):
assert (ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y))
def assert_arctan2_isnzero(x, y):
assert (ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y))
class TestArctan2SpecialValues(TestCase):
def test_one_one(self):
# atan2(1, 1) returns pi/4.
assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi)
assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi)
def test_zero_nzero(self):
# atan2(+-0, -0) returns +-pi.
assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi)
assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi)
def test_zero_pzero(self):
# atan2(+-0, +0) returns +-0.
assert_arctan2_ispzero(np.PZERO, np.PZERO)
assert_arctan2_isnzero(np.NZERO, np.PZERO)
def test_zero_negative(self):
# atan2(+-0, x) returns +-pi for x < 0.
assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi)
assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi)
def test_zero_positive(self):
# atan2(+-0, x) returns +-0 for x > 0.
assert_arctan2_ispzero(np.PZERO, 1)
assert_arctan2_isnzero(np.NZERO, 1)
def test_positive_zero(self):
# atan2(y, +-0) returns +pi/2 for y > 0.
assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi)
def test_negative_zero(self):
# atan2(y, +-0) returns -pi/2 for y < 0.
assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi)
def test_any_ninf(self):
# atan2(+-y, -infinity) returns +-pi for finite y > 0.
assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi)
assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)
def test_any_pinf(self):
# atan2(+-y, +infinity) returns +-0 for finite y > 0.
assert_arctan2_ispzero(1, np.inf)
assert_arctan2_isnzero(-1, np.inf)
def test_inf_any(self):
# atan2(+-infinity, x) returns +-pi/2 for finite x.
assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi)
def test_inf_ninf(self):
# atan2(+-infinity, -infinity) returns +-3*pi/4.
assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi)
def test_inf_pinf(self):
# atan2(+-infinity, +infinity) returns +-pi/4.
assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi)
def test_nan_any(self):
# atan2(nan, x) returns nan for any x, including inf
assert_arctan2_isnan(np.nan, np.inf)
assert_arctan2_isnan(np.inf, np.nan)
assert_arctan2_isnan(np.nan, np.nan)
class TestLdexp(TestCase):
def test_ldexp(self):
assert_almost_equal(ncu.ldexp(2., 3), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.float32), np.array(3, np.int16)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.float32), np.array(3, np.int32)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.float64), np.array(3, np.int16)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.float64), np.array(3, np.int32)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble), np.array(3, np.int16)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble), np.array(3, np.int32)), 16.)
class TestMaximum(TestCase):
def test_reduce_complex(self):
assert_equal(np.maximum.reduce([1,2j]),1)
assert_equal(np.maximum.reduce([1+3j,2j]),1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.maximum(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [nan, nan*1j, nan + nan*1j] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([nan, nan, nan], dtype=np.complex)
assert_equal(np.maximum(arg1, arg2), out)
class TestMinimum(TestCase):
def test_reduce_complex(self):
assert_equal(np.minimum.reduce([1,2j]),2j)
assert_equal(np.minimum.reduce([1+3j,2j]),2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.minimum(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [nan, nan*1j, nan + nan*1j] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([nan, nan, nan], dtype=np.complex)
assert_equal(np.minimum(arg1, arg2), out)
class TestFmax(TestCase):
def test_reduce_complex(self):
assert_equal(np.fmax.reduce([1,2j]),1)
assert_equal(np.fmax.reduce([1+3j,2j]),1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmax(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [nan, nan*1j, nan + nan*1j] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([0, 0, nan], dtype=np.complex)
assert_equal(np.fmax(arg1, arg2), out)
class TestFmin(TestCase):
def test_reduce_complex(self):
assert_equal(np.fmin.reduce([1,2j]),2j)
assert_equal(np.fmin.reduce([1+3j,2j]),2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmin(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [nan, nan*1j, nan + nan*1j] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([0, 0, nan], dtype=np.complex)
assert_equal(np.fmin(arg1, arg2), out)
class TestFloatingPoint(TestCase):
def test_floating_point(self):
assert_equal(ncu.FLOATING_POINT_SUPPORT, 1)
class TestDegrees(TestCase):
def test_degrees(self):
assert_almost_equal(ncu.degrees(np.pi), 180.0)
assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0)
class TestRadians(TestCase):
def test_radians(self):
assert_almost_equal(ncu.radians(180.0), np.pi)
assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi)
class TestSign(TestCase):
def test_sign(self):
a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
out = np.zeros(a.shape)
tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0])
olderr = np.seterr(invalid='ignore')
try:
res = ncu.sign(a)
assert_equal(res, tgt)
res = ncu.sign(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
finally:
np.seterr(**olderr)
class TestSpecialMethods(TestCase):
def test_wrap(self):
class with_wrap(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = with_wrap()
r.arr = arr
r.context = context
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
func, args, i = x.context
self.assertTrue(func is ncu.minimum)
self.assertEqual(len(args), 2)
assert_equal(args[0], a)
assert_equal(args[1], a)
self.assertEqual(i, 0)
def test_wrap_with_iterable(self):
# test fix for bug #1026:
class with_wrap(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1).view(cls).copy()
def __array_wrap__(self, arr, context):
return arr.view(type(self))
a = with_wrap()
x = ncu.multiply(a, (1, 2, 3))
self.assertTrue(isinstance(x, with_wrap))
assert_array_equal(x, np.array((1, 2, 3)))
def test_priority_with_scalar(self):
# test fix for bug #826:
class A(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1.0, 'float64').view(cls).copy()
a = A()
x = np.float64(1)*a
self.assertTrue(isinstance(x, A))
assert_array_equal(x, np.array(1))
def test_old_wrap(self):
class with_wrap(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr):
r = with_wrap()
r.arr = arr
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
def test_priority(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = type(self)()
r.arr = arr
r.context = context
return r
class B(A):
__array_priority__ = 20.
class C(A):
__array_priority__ = 40.
x = np.zeros(1)
a = A()
b = B()
c = C()
f = ncu.minimum
self.assertTrue(type(f(x,x)) is np.ndarray)
self.assertTrue(type(f(x,a)) is A)
self.assertTrue(type(f(x,b)) is B)
self.assertTrue(type(f(x,c)) is C)
self.assertTrue(type(f(a,x)) is A)
self.assertTrue(type(f(b,x)) is B)
self.assertTrue(type(f(c,x)) is C)
self.assertTrue(type(f(a,a)) is A)
self.assertTrue(type(f(a,b)) is B)
self.assertTrue(type(f(b,a)) is B)
self.assertTrue(type(f(b,b)) is B)
self.assertTrue(type(f(b,c)) is C)
self.assertTrue(type(f(c,b)) is C)
self.assertTrue(type(f(c,c)) is C)
self.assertTrue(type(ncu.exp(a) is A))
self.assertTrue(type(ncu.exp(b) is B))
self.assertTrue(type(ncu.exp(c) is C))
def test_failing_wrap(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
raise RuntimeError
a = A()
self.assertRaises(RuntimeError, ncu.maximum, a, a)
def test_default_prepare(self):
class with_wrap(object):
__array_priority__ = 10
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
return arr
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x, np.zeros(1))
assert_equal(type(x), np.ndarray)
def test_prepare(self):
class with_prepare(np.ndarray):
__array_priority__ = 10
def __array_prepare__(self, arr, context):
# make sure we can return a new
return np.array(arr).view(type=with_prepare)
a = np.array(1).view(type=with_prepare)
x = np.add(a, a)
assert_equal(x, np.array(2))
assert_equal(type(x), with_prepare)
def test_failing_prepare(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_prepare__(self, arr, context=None):
raise RuntimeError
a = A()
self.assertRaises(RuntimeError, ncu.maximum, a, a)
def test_array_with_context(self):
class A(object):
def __array__(self, dtype=None, context=None):
func, args, i = context
self.func = func
self.args = args
self.i = i
return np.zeros(1)
class B(object):
def __array__(self, dtype=None):
return np.zeros(1, dtype)
class C(object):
def __array__(self):
return np.zeros(1)
a = A()
ncu.maximum(np.zeros(1), a)
self.assertTrue(a.func is ncu.maximum)
assert_equal(a.args[0], 0)
self.assertTrue(a.args[1] is a)
self.assertTrue(a.i == 1)
assert_equal(ncu.maximum(a, B()), 0)
assert_equal(ncu.maximum(a, C()), 0)
class TestChoose(TestCase):
def test_mixed(self):
c = np.array([True,True])
a = np.array([True,True])
assert_equal(np.choose(c, (a, 1)), np.array([1,1]))
def is_longdouble_finfo_bogus():
info = np.finfo(np.longcomplex)
return not np.isfinite(np.log10(info.tiny/info.eps))
class TestComplexFunctions(object):
funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh,
np.arctanh, np.sin, np.cos, np.tan, np.exp,
np.exp2, np.log, np.sqrt, np.log10, np.log2,
np.log1p]
def test_it(self):
for f in self.funcs:
if f is np.arccosh :
x = 1.5
else :
x = .5
fr = f(x)
fz = f(np.complex(x))
assert_almost_equal(fz.real, fr, err_msg='real part %s'%f)
assert_almost_equal(fz.imag, 0., err_msg='imag part %s'%f)
def test_precisions_consistent(self) :
z = 1 + 1j
for f in self.funcs :
fcf = f(np.csingle(z))
fcd = f(np.cdouble(z))
fcl = f(np.clongdouble(z))
assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s'%f)
assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s'%f)
def test_branch_cuts(self):
# check branch cuts and continuity on them
yield _check_branch_cut, np.log, -0.5, 1j, 1, -1
yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1
yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1
yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1
yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1
yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1
yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1
yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1
yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1
yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1
yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1
# check against bogus branch cuts: assert continuity between quadrants
yield _check_branch_cut, np.arcsin, [-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arccos, [-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1
yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1 ], 1, 1
yield _check_branch_cut, np.arccosh, [-2j, 2j, 2], [1, 1, 1j], 1, 1
yield _check_branch_cut, np.arctanh, [-2j, 2j, 0], [1, 1, 1j], 1, 1
@dec.knownfailureif(True, "These branch cuts are known to fail")
def test_branch_cuts_failing(self):
# XXX: signed zero not OK with ICC on 64-bit platform for log, see
# http://permalink.gmane.org/gmane.comp.python.numeric.general/25335
yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True
# XXX: signed zeros are not OK for sqrt or for the arc* functions
yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1, True
yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1, True
yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1, True
yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1, True
yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True
yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1, True
def test_against_cmath(self):
import cmath, sys
# cmath.asinh is broken in some versions of Python, see
# http://bugs.python.org/issue1381
broken_cmath_asinh = False
if sys.version_info < (2,6):
broken_cmath_asinh = True
points = [-1-1j, -1+1j, +1-1j, +1+1j]
name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',
'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}
atol = 4*np.finfo(np.complex).eps
for func in self.funcs:
fname = func.__name__.split('.')[-1]
cname = name_map.get(fname, fname)
try:
cfunc = getattr(cmath, cname)
except AttributeError:
continue
for p in points:
a = complex(func(np.complex_(p)))
b = cfunc(p)
if cname == 'asinh' and broken_cmath_asinh:
continue
assert abs(a - b) < atol, "%s %s: %s; cmath: %s"%(fname,p,a,b)
def check_loss_of_precision(self, dtype):
"""Check loss of precision in complex arc* functions"""
# Check against known-good functions
info = np.finfo(dtype)
real_dtype = dtype(0.).real.dtype
eps = info.eps
def check(x, rtol):
x = x.astype(real_dtype)
z = x.astype(dtype)
d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1)
assert np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arcsinh')
z = (1j*x).astype(dtype)
d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1)
assert np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arcsin')
z = x.astype(dtype)
d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1)
assert np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arctanh')
z = (1j*x).astype(dtype)
d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1)
assert np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arctan')
# The switchover was chosen as 1e-3; hence there can be up to
# ~eps/1e-3 of relative cancellation error before it
x_series = np.logspace(-20, -3.001, 200)
x_basic = np.logspace(-2.999, 0, 10, endpoint=False)
if dtype is np.longcomplex:
# It's not guaranteed that the system-provided arc functions
# are accurate down to a few epsilons. (Eg. on Linux 64-bit)
# So, give more leeway for long complex tests here:
check(x_series, 50*eps)
else:
check(x_series, 2*eps)
check(x_basic, 2*eps/1e-3)
# Check a few points
z = np.array([1e-5*(1+1j)], dtype=dtype)
p = 9.999999999333333333e-6 + 1.000000000066666666e-5j
d = np.absolute(1-np.arctanh(z)/p)
assert np.all(d < 1e-15)
p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j
d = np.absolute(1-np.arcsinh(z)/p)
assert np.all(d < 1e-15)
p = 9.999999999333333333e-6j + 1.000000000066666666e-5
d = np.absolute(1-np.arctan(z)/p)
assert np.all(d < 1e-15)
p = 1.0000000000333333333e-5j + 9.999999999666666667e-6
d = np.absolute(1-np.arcsin(z)/p)
assert np.all(d < 1e-15)
# Check continuity across switchover points
def check(func, z0, d=1):
z0 = np.asarray(z0, dtype=dtype)
zp = z0 + abs(z0) * d * eps * 2
zm = z0 - abs(z0) * d * eps * 2
assert np.all(zp != zm), (zp, zm)
# NB: the cancellation error at the switchover is at least eps
good = (abs(func(zp) - func(zm)) < 2*eps)
assert np.all(good), (func, z0[~good])
for func in (np.arcsinh,np.arcsinh,np.arcsin,np.arctanh,np.arctan):
pts = [rp+1j*ip for rp in (-1e-3,0,1e-3) for ip in(-1e-3,0,1e-3)
if rp != 0 or ip != 0]
check(func, pts, 1)
check(func, pts, 1j)
check(func, pts, 1+1j)
def test_loss_of_precision(self):
for dtype in [np.complex64, np.complex_]:
yield self.check_loss_of_precision, dtype
@dec.knownfailureif(is_longdouble_finfo_bogus(), "Bogus long double finfo")
def test_loss_of_precision_longcomplex(self):
self.check_loss_of_precision(np.longcomplex)
class TestAttributes(TestCase):
def test_attributes(self):
add = ncu.add
assert_equal(add.__name__, 'add')
assert add.__doc__.startswith('add(x1, x2[, out])\n\n')
self.assertTrue(add.ntypes >= 18) # don't fail if types added
self.assertTrue('ii->i' in add.types)
assert_equal(add.nin, 2)
assert_equal(add.nout, 1)
assert_equal(add.identity, 0)
class TestSubclass(TestCase):
def test_subclass_op(self):
class simple(np.ndarray):
def __new__(subtype, shape):
self = np.ndarray.__new__(subtype, shape, dtype=object)
self.fill(0)
return self
a = simple((3,4))
assert_equal(a+a, a)
def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False,
dtype=np.complex):
"""
Check for a branch cut in a function.
Assert that `x0` lies on a branch cut of function `f` and `f` is
continuous from the direction `dx`.
Parameters
----------
f : func
Function to check
x0 : array-like
Point on branch cut
dx : array-like
Direction to check continuity in
re_sign, im_sign : {1, -1}
Change of sign of the real or imaginary part expected
sig_zero_ok : bool
Whether to check if the branch cut respects signed zero (if applicable)
dtype : dtype
Dtype to check (should be complex)
"""
x0 = np.atleast_1d(x0).astype(dtype)
dx = np.atleast_1d(dx).astype(dtype)
scale = np.finfo(dtype).eps * 1e3
atol = 1e-4
y0 = f(x0)
yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx))
ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx))
assert np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp)
assert np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp)
assert np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym)
assert np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym)
if sig_zero_ok:
# check that signed zeros also work as a displacement
jr = (x0.real == 0) & (dx.real != 0)
ji = (x0.imag == 0) & (dx.imag != 0)
x = -x0
x.real[jr] = 0.*dx.real
x.imag[ji] = 0.*dx.imag
x = -x
ym = f(x)
ym = ym[jr | ji]
y0 = y0[jr | ji]
assert np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym)
assert np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym)
def test_copysign():
assert np.copysign(1, -1) == -1
old_err = np.seterr(divide="ignore")
try:
assert 1 / np.copysign(0, -1) < 0
assert 1 / np.copysign(0, 1) > 0
finally:
np.seterr(**old_err)
assert np.signbit(np.copysign(np.nan, -1))
assert not np.signbit(np.copysign(np.nan, 1))
def _test_nextafter(t):
one = t(1)
two = t(2)
zero = t(0)
eps = np.finfo(t).eps
assert np.nextafter(one, two) - one == eps
assert np.nextafter(one, zero) - one < 0
assert np.isnan(np.nextafter(np.nan, one))
assert np.isnan(np.nextafter(one, np.nan))
assert np.nextafter(one, one) == one
def test_nextafter():
return _test_nextafter(np.float64)
def test_nextafterf():
return _test_nextafter(np.float32)
@dec.knownfailureif(sys.platform == 'win32', "Long double support buggy on win32")
def test_nextafterl():
return _test_nextafter(np.longdouble)
def _test_spacing(t):
err = np.seterr(invalid='ignore')
one = t(1)
eps = np.finfo(t).eps
nan = t(np.nan)
inf = t(np.inf)
try:
assert np.spacing(one) == eps
assert np.isnan(np.spacing(nan))
assert np.isnan(np.spacing(inf))
assert np.isnan(np.spacing(-inf))
assert np.spacing(t(1e30)) != 0
finally:
np.seterr(**err)
def test_spacing():
return _test_spacing(np.float64)
def test_spacingf():
return _test_spacing(np.float32)
@dec.knownfailureif(sys.platform == 'win32', "Long double support buggy on win32")
def test_spacingl():
return _test_spacing(np.longdouble)
def test_spacing_gfortran():
# Reference from this fortran file, built with gfortran 4.3.3 on linux
# 32bits:
# PROGRAM test_spacing
# INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37)
# INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200)
#
# WRITE(*,*) spacing(0.00001_DBL)
# WRITE(*,*) spacing(1.0_DBL)
# WRITE(*,*) spacing(1000._DBL)
# WRITE(*,*) spacing(10500._DBL)
#
# WRITE(*,*) spacing(0.00001_SGL)
# WRITE(*,*) spacing(1.0_SGL)
# WRITE(*,*) spacing(1000._SGL)
# WRITE(*,*) spacing(10500._SGL)
# END PROGRAM
ref = {}
ref[np.float64] = [1.69406589450860068E-021,
2.22044604925031308E-016,
1.13686837721616030E-013,
1.81898940354585648E-012]
ref[np.float32] = [
9.09494702E-13,
1.19209290E-07,
6.10351563E-05,
9.76562500E-04]
for dt, dec in zip([np.float32, np.float64], (10, 20)):
x = np.array([1e-5, 1, 1000, 10500], dtype=dt)
assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec)
def test_nextafter_vs_spacing():
# XXX: spacing does not handle long double yet
for t in [np.float32, np.float64]:
for _f in [1, 1e-5, 1000]:
f = t(_f)
f1 = t(_f + 1)
assert np.nextafter(f, f1) - f == np.spacing(f)
def test_pos_nan():
"""Check np.nan is a positive nan."""
assert np.signbit(np.nan) == 0
def test_reduceat():
"""Test bug in reduceat when structured arrays are not copied."""
db = np.dtype([('name', 'S11'),('time', np.int64), ('value', np.float32)])
a = np.empty([100], dtype=db)
a['name'] = 'Simple'
a['time'] = 10
a['value'] = 100
indx = [0,7,15,25]
h2 = []
val1 = indx[0]
for val2 in indx[1:]:
h2.append(np.add.reduce(a['value'][val1:val2]))
val1 = val2
h2.append(np.add.reduce(a['value'][val1:]))
h2 = np.array(h2)
# test buffered -- this should work
h1 = np.add.reduceat(a['value'], indx)
assert_array_almost_equal(h1, h2)
# This is when the error occurs.
# test no buffer
res = np.setbufsize(32)
h1 = np.add.reduceat(a['value'], indx)
np.setbufsize(np.UFUNC_BUFSIZE_DEFAULT)
assert_array_almost_equal(h1, h2)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | -7,408,767,103,966,507,000 | 35.176471 | 133 | 0.534645 | false |
eligoenergy/git-repo | subcmds/overview.py | 83 | 2727 | #
# Copyright (C) 2012 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from color import Coloring
from command import PagedCommand
class Overview(PagedCommand):
common = True
helpSummary = "Display overview of unmerged project branches"
helpUsage = """
%prog [--current-branch] [<project>...]
"""
helpDescription = """
The '%prog' command is used to display an overview of the projects branches,
and list any local commits that have not yet been merged into the project.
The -b/--current-branch option can be used to restrict the output to only
branches currently checked out in each project. By default, all branches
are displayed.
"""
def _Options(self, p):
p.add_option('-b', '--current-branch',
dest="current_branch", action="store_true",
help="Consider only checked out branches")
def Execute(self, opt, args):
all_branches = []
for project in self.GetProjects(args):
br = [project.GetUploadableBranch(x)
for x in project.GetBranches()]
br = [x for x in br if x]
if opt.current_branch:
br = [x for x in br if x.name == project.CurrentBranch]
all_branches.extend(br)
if not all_branches:
return
class Report(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'status')
self.project = self.printer('header', attr='bold')
self.text = self.printer('text')
out = Report(all_branches[0].project.config)
out.text("Deprecated. See repo info -o.")
out.nl()
out.project('Projects Overview')
out.nl()
project = None
for branch in all_branches:
if project != branch.project:
project = branch.project
out.nl()
out.project('project %s/' % project.relpath)
out.nl()
commits = branch.commits
date = branch.date
print('%s %-33s (%2d commit%s, %s)' % (
branch.name == project.CurrentBranch and '*' or ' ',
branch.name,
len(commits),
len(commits) != 1 and 's' or ' ',
date))
for commit in commits:
print('%-35s - %s' % ('', commit))
| apache-2.0 | -4,360,187,061,002,202,000 | 31.464286 | 76 | 0.643198 | false |
cfei18/incubator-airflow | tests/www/api/experimental/test_endpoints.py | 15 | 11876 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import timedelta
import json
import unittest
from urllib.parse import quote_plus
from airflow import configuration
from airflow.api.common.experimental.trigger_dag import trigger_dag
from airflow.models import DagBag, DagModel, DagRun, Pool, TaskInstance
from airflow.settings import Session
from airflow.utils.timezone import datetime, utcnow
from airflow.www import app as application
class TestApiExperimental(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestApiExperimental, cls).setUpClass()
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
def setUp(self):
super(TestApiExperimental, self).setUp()
configuration.load_test_config()
app = application.create_app(testing=True)
self.app = app.test_client()
def tearDown(self):
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
super(TestApiExperimental, self).tearDown()
def test_task_info(self):
url_template = '/api/experimental/dags/{}/tasks/{}'
response = self.app.get(
url_template.format('example_bash_operator', 'runme_0')
)
self.assertIn('"email"', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
response = self.app.get(
url_template.format('example_bash_operator', 'DNE')
)
self.assertIn('error', response.data.decode('utf-8'))
self.assertEqual(404, response.status_code)
response = self.app.get(
url_template.format('DNE', 'DNE')
)
self.assertIn('error', response.data.decode('utf-8'))
self.assertEqual(404, response.status_code)
def test_trigger_dag(self):
url_template = '/api/experimental/dags/{}/dag_runs'
response = self.app.post(
url_template.format('example_bash_operator'),
data=json.dumps({'run_id': 'my_run' + utcnow().isoformat()}),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
response = self.app.post(
url_template.format('does_not_exist_dag'),
data=json.dumps({}),
content_type="application/json"
)
self.assertEqual(404, response.status_code)
def test_delete_dag(self):
url_template = '/api/experimental/dags/{}'
from airflow import settings
session = settings.Session()
key = "my_dag_id"
session.add(DagModel(dag_id=key))
session.commit()
response = self.app.delete(
url_template.format(key),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
response = self.app.delete(
url_template.format('does_not_exist_dag'),
content_type="application/json"
)
self.assertEqual(404, response.status_code)
def test_trigger_dag_for_date(self):
url_template = '/api/experimental/dags/{}/dag_runs'
dag_id = 'example_bash_operator'
hour_from_now = utcnow() + timedelta(hours=1)
execution_date = datetime(hour_from_now.year,
hour_from_now.month,
hour_from_now.day,
hour_from_now.hour)
datetime_string = execution_date.isoformat()
# Test Correct execution
response = self.app.post(
url_template.format(dag_id),
data=json.dumps({'execution_date': datetime_string}),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
dagbag = DagBag()
dag = dagbag.get_dag(dag_id)
dag_run = dag.get_dagrun(execution_date)
self.assertTrue(dag_run,
'Dag Run not found for execution date {}'
.format(execution_date))
# Test error for nonexistent dag
response = self.app.post(
url_template.format('does_not_exist_dag'),
data=json.dumps({'execution_date': execution_date.isoformat()}),
content_type="application/json"
)
self.assertEqual(404, response.status_code)
# Test error for bad datetime format
response = self.app.post(
url_template.format(dag_id),
data=json.dumps({'execution_date': 'not_a_datetime'}),
content_type="application/json"
)
self.assertEqual(400, response.status_code)
def test_task_instance_info(self):
url_template = '/api/experimental/dags/{}/dag_runs/{}/tasks/{}'
dag_id = 'example_bash_operator'
task_id = 'also_run_this'
execution_date = utcnow().replace(microsecond=0)
datetime_string = quote_plus(execution_date.isoformat())
wrong_datetime_string = quote_plus(
datetime(1990, 1, 1, 1, 1, 1).isoformat()
)
# Create DagRun
trigger_dag(dag_id=dag_id,
run_id='test_task_instance_info_run',
execution_date=execution_date)
# Test Correct execution
response = self.app.get(
url_template.format(dag_id, datetime_string, task_id)
)
self.assertEqual(200, response.status_code)
self.assertIn('state', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag
response = self.app.get(
url_template.format('does_not_exist_dag', datetime_string,
task_id),
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent task
response = self.app.get(
url_template.format(dag_id, datetime_string, 'does_not_exist_task')
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag run (wrong execution_date)
response = self.app.get(
url_template.format(dag_id, wrong_datetime_string, task_id)
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for bad datetime format
response = self.app.get(
url_template.format(dag_id, 'not_a_datetime', task_id)
)
self.assertEqual(400, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
class TestPoolApiExperimental(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestPoolApiExperimental, cls).setUpClass()
session = Session()
session.query(Pool).delete()
session.commit()
session.close()
def setUp(self):
super(TestPoolApiExperimental, self).setUp()
configuration.load_test_config()
app = application.create_app(testing=True)
self.app = app.test_client()
self.session = Session()
self.pools = []
for i in range(2):
name = 'experimental_%s' % (i + 1)
pool = Pool(
pool=name,
slots=i,
description=name,
)
self.session.add(pool)
self.pools.append(pool)
self.session.commit()
self.pool = self.pools[0]
def tearDown(self):
self.session.query(Pool).delete()
self.session.commit()
self.session.close()
super(TestPoolApiExperimental, self).tearDown()
def _get_pool_count(self):
response = self.app.get('/api/experimental/pools')
self.assertEqual(response.status_code, 200)
return len(json.loads(response.data.decode('utf-8')))
def test_get_pool(self):
response = self.app.get(
'/api/experimental/pools/{}'.format(self.pool.pool),
)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode('utf-8')),
self.pool.to_json())
def test_get_pool_non_existing(self):
response = self.app.get('/api/experimental/pools/foo')
self.assertEqual(response.status_code, 404)
self.assertEqual(json.loads(response.data.decode('utf-8'))['error'],
"Pool 'foo' doesn't exist")
def test_get_pools(self):
response = self.app.get('/api/experimental/pools')
self.assertEqual(response.status_code, 200)
pools = json.loads(response.data.decode('utf-8'))
self.assertEqual(len(pools), 2)
for i, pool in enumerate(sorted(pools, key=lambda p: p['pool'])):
self.assertDictEqual(pool, self.pools[i].to_json())
def test_create_pool(self):
response = self.app.post(
'/api/experimental/pools',
data=json.dumps({
'name': 'foo',
'slots': 1,
'description': '',
}),
content_type='application/json',
)
self.assertEqual(response.status_code, 200)
pool = json.loads(response.data.decode('utf-8'))
self.assertEqual(pool['pool'], 'foo')
self.assertEqual(pool['slots'], 1)
self.assertEqual(pool['description'], '')
self.assertEqual(self._get_pool_count(), 3)
def test_create_pool_with_bad_name(self):
for name in ('', ' '):
response = self.app.post(
'/api/experimental/pools',
data=json.dumps({
'name': name,
'slots': 1,
'description': '',
}),
content_type='application/json',
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.data.decode('utf-8'))['error'],
"Pool name shouldn't be empty",
)
self.assertEqual(self._get_pool_count(), 2)
def test_delete_pool(self):
response = self.app.delete(
'/api/experimental/pools/{}'.format(self.pool.pool),
)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode('utf-8')),
self.pool.to_json())
self.assertEqual(self._get_pool_count(), 1)
def test_delete_pool_non_existing(self):
response = self.app.delete(
'/api/experimental/pools/foo',
)
self.assertEqual(response.status_code, 404)
self.assertEqual(json.loads(response.data.decode('utf-8'))['error'],
"Pool 'foo' doesn't exist")
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 62,575,648,720,219,300 | 35.318043 | 79 | 0.591192 | false |
JakeBrand/CMPUT410-E3 | lab4/lib/python2.7/site-packages/jinja2/testsuite/debug.py | 415 | 1935 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.debug
~~~~~~~~~~~~~~~~~~~~~~
Tests the debug system.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase, filesystem_loader
from jinja2 import Environment, TemplateSyntaxError
env = Environment(loader=filesystem_loader)
class DebugTestCase(JinjaTestCase):
def test_runtime_error(self):
def test():
tmpl.render(fail=lambda: 1 / 0)
tmpl = env.get_template('broken.html')
self.assert_traceback_matches(test, r'''
File ".*?broken.html", line 2, in (top-level template code|<module>)
\{\{ fail\(\) \}\}
File ".*?debug.pyc?", line \d+, in <lambda>
tmpl\.render\(fail=lambda: 1 / 0\)
ZeroDivisionError: (int(eger)? )?division (or modulo )?by zero
''')
def test_syntax_error(self):
# XXX: the .*? is necessary for python3 which does not hide
# some of the stack frames we don't want to show. Not sure
# what's up with that, but that is not that critical. Should
# be fixed though.
self.assert_traceback_matches(lambda: env.get_template('syntaxerror.html'), r'''(?sm)
File ".*?syntaxerror.html", line 4, in (template|<module>)
\{% endif %\}.*?
(jinja2\.exceptions\.)?TemplateSyntaxError: Encountered unknown tag 'endif'. Jinja was looking for the following tags: 'endfor' or 'else'. The innermost block that needs to be closed is 'for'.
''')
def test_regular_syntax_error(self):
def test():
raise TemplateSyntaxError('wtf', 42)
self.assert_traceback_matches(test, r'''
File ".*debug.pyc?", line \d+, in test
raise TemplateSyntaxError\('wtf', 42\)
(jinja2\.exceptions\.)?TemplateSyntaxError: wtf
line 42''')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DebugTestCase))
return suite
| apache-2.0 | -7,357,588,250,277,985,000 | 32.362069 | 192 | 0.643411 | false |
halberom/ansible-modules-extras | cloud/cloudstack/cs_loadbalancer_rule.py | 31 | 11531 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Darren Worrall <[email protected]>
# (c) 2015, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cs_loadbalancer_rule
short_description: Manages load balancer rules on Apache CloudStack based clouds.
description:
- Add, update and remove load balancer rules.
version_added: '2.0'
author:
- "Darren Worrall (@dazworrall)"
- "René Moser (@resmo)"
options:
name:
description:
- The name of the load balancer rule.
required: true
description:
description:
- The description of the load balancer rule.
required: false
default: null
algorithm:
description:
- Load balancer algorithm
- Required when using C(state=present).
required: false
choices: [ 'source', 'roundrobin', 'leastconn' ]
default: 'source'
private_port:
description:
- The private port of the private ip address/virtual machine where the network traffic will be load balanced to.
- Required when using C(state=present).
- Can not be changed once the rule exists due API limitation.
required: false
default: null
public_port:
description:
- The public port from where the network traffic will be load balanced from.
- Required when using C(state=present).
- Can not be changed once the rule exists due API limitation.
required: true
default: null
ip_address:
description:
- Public IP address from where the network traffic will be load balanced from.
required: true
aliases: [ 'public_ip' ]
open_firewall:
description:
- Whether the firewall rule for public port should be created, while creating the new rule.
- Use M(cs_firewall) for managing firewall rules.
required: false
default: false
cidr:
description:
- CIDR (full notation) to be used for firewall rule if required.
required: false
default: null
protocol:
description:
- The protocol to be used on the load balancer
required: false
default: null
project:
description:
- Name of the project the load balancer IP address is related to.
required: false
default: null
state:
description:
- State of the rule.
required: true
default: 'present'
choices: [ 'present', 'absent' ]
domain:
description:
- Domain the rule is related to.
required: false
default: null
account:
description:
- Account the rule is related to.
required: false
default: null
zone:
description:
- Name of the zone in which the rule shoud be created.
- If not set, default zone is used.
required: false
default: null
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Create a load balancer rule
- local_action:
module: cs_loadbalancer_rule
name: balance_http
public_ip: 1.2.3.4
algorithm: leastconn
public_port: 80
private_port: 8080
# update algorithm of an existing load balancer rule
- local_action:
module: cs_loadbalancer_rule
name: balance_http
public_ip: 1.2.3.4
algorithm: roundrobin
public_port: 80
private_port: 8080
# Delete a load balancer rule
- local_action:
module: cs_loadbalancer_rule
name: balance_http
public_ip: 1.2.3.4
state: absent
'''
RETURN = '''
---
id:
description: UUID of the rule.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
zone:
description: Name of zone the rule is related to.
returned: success
type: string
sample: ch-gva-2
project:
description: Name of project the rule is related to.
returned: success
type: string
sample: Production
account:
description: Account the rule is related to.
returned: success
type: string
sample: example account
domain:
description: Domain the rule is related to.
returned: success
type: string
sample: example domain
algorithm:
description: Load balancer algorithm used.
returned: success
type: string
sample: "source"
cidr:
description: CIDR to forward traffic from.
returned: success
type: string
sample: ""
name:
description: Name of the rule.
returned: success
type: string
sample: "http-lb"
description:
description: Description of the rule.
returned: success
type: string
sample: "http load balancer rule"
protocol:
description: Protocol of the rule.
returned: success
type: string
sample: "tcp"
public_port:
description: Public port.
returned: success
type: string
sample: 80
private_port:
description: Private IP address.
returned: success
type: string
sample: 80
public_ip:
description: Public IP address.
returned: success
type: string
sample: "1.2.3.4"
tags:
description: List of resource tags associated with the rule.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
state:
description: State of the rule.
returned: success
type: string
sample: "Add"
'''
try:
from cs import CloudStack, CloudStackException, read_config
has_lib_cs = True
except ImportError:
has_lib_cs = False
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackLBRule(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackLBRule, self).__init__(module)
self.returns = {
'publicip': 'public_ip',
'algorithm': 'algorithm',
'cidrlist': 'cidr',
'protocol': 'protocol',
}
# these values will be casted to int
self.returns_to_int = {
'publicport': 'public_port',
'privateport': 'private_port',
}
def get_rule(self, **kwargs):
rules = self.cs.listLoadBalancerRules(**kwargs)
if rules:
return rules['loadbalancerrule'][0]
def _get_common_args(self):
return {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
'publicipid': self.get_ip_address(key='id'),
'name': self.module.params.get('name'),
}
def present_lb_rule(self):
missing_params = []
for required_params in [
'algorithm',
'private_port',
'public_port',
]:
if not self.module.params.get(required_params):
missing_params.append(required_params)
if missing_params:
self.module.fail_json(msg="missing required arguments: %s" % ','.join(missing_params))
args = self._get_common_args()
rule = self.get_rule(**args)
if rule:
rule = self._update_lb_rule(rule)
else:
rule = self._create_lb_rule(rule)
if rule:
rule = self.ensure_tags(resource=rule, resource_type='LoadBalancer')
return rule
def _create_lb_rule(self, rule):
self.result['changed'] = True
if not self.module.check_mode:
args = self._get_common_args()
args['algorithm'] = self.module.params.get('algorithm')
args['privateport'] = self.module.params.get('private_port')
args['publicport'] = self.module.params.get('public_port')
args['cidrlist'] = self.module.params.get('cidr')
args['description'] = self.module.params.get('description')
args['protocol'] = self.module.params.get('protocol')
res = self.cs.createLoadBalancerRule(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
rule = self.poll_job(res, 'loadbalancer')
return rule
def _update_lb_rule(self, rule):
args = {}
args['id'] = rule['id']
args['algorithm'] = self.module.params.get('algorithm')
args['description'] = self.module.params.get('description')
if self.has_changed(args, rule):
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.updateLoadBalancerRule(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
rule = self.poll_job(res, 'loadbalancer')
return rule
def absent_lb_rule(self):
args = self._get_common_args()
rule = self.get_rule(**args)
if rule:
self.result['changed'] = True
if rule and not self.module.check_mode:
res = self.cs.deleteLoadBalancerRule(id=rule['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
res = self._poll_job(res, 'loadbalancer')
return rule
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
description = dict(default=None),
algorithm = dict(choices=['source', 'roundrobin', 'leastconn'], default='source'),
private_port = dict(type='int', default=None),
public_port = dict(type='int', default=None),
protocol = dict(default=None),
state = dict(choices=['present', 'absent'], default='present'),
ip_address = dict(required=True, aliases=['public_ip']),
cidr = dict(default=None),
project = dict(default=None),
open_firewall = dict(type='bool', default=False),
tags = dict(type='list', aliases=['tag'], default=None),
zone = dict(default=None),
domain = dict(default=None),
account = dict(default=None),
poll_async = dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
if not has_lib_cs:
module.fail_json(msg="python library cs required: pip install cs")
try:
acs_lb_rule = AnsibleCloudStackLBRule(module)
state = module.params.get('state')
if state in ['absent']:
rule = acs_lb_rule.absent_lb_rule()
else:
rule = acs_lb_rule.present_lb_rule()
result = acs_lb_rule.get_result(rule)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | 5,792,683,900,132,157,000 | 28.637532 | 118 | 0.6259 | false |
sujeet4github/MyLangUtils | LangPython/oreilly-intro-to-flask-video/venv/lib/python3.6/site-packages/sqlalchemy/ext/serializer.py | 32 | 5586 | # ext/serializer.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Serializer/Deserializer objects for usage with SQLAlchemy query structures,
allowing "contextual" deserialization.
Any SQLAlchemy query structure, either based on sqlalchemy.sql.*
or sqlalchemy.orm.* can be used. The mappers, Tables, Columns, Session
etc. which are referenced by the structure are not persisted in serialized
form, but are instead re-associated with the query structure
when it is deserialized.
Usage is nearly the same as that of the standard Python pickle module::
from sqlalchemy.ext.serializer import loads, dumps
metadata = MetaData(bind=some_engine)
Session = scoped_session(sessionmaker())
# ... define mappers
query = Session.query(MyClass).
filter(MyClass.somedata=='foo').order_by(MyClass.sortkey)
# pickle the query
serialized = dumps(query)
# unpickle. Pass in metadata + scoped_session
query2 = loads(serialized, metadata, Session)
print query2.all()
Similar restrictions as when using raw pickle apply; mapped classes must be
themselves be pickleable, meaning they are importable from a module-level
namespace.
The serializer module is only appropriate for query structures. It is not
needed for:
* instances of user-defined classes. These contain no references to engines,
sessions or expression constructs in the typical case and can be serialized
directly.
* Table metadata that is to be loaded entirely from the serialized structure
(i.e. is not already declared in the application). Regular
pickle.loads()/dumps() can be used to fully dump any ``MetaData`` object,
typically one which was reflected from an existing database at some previous
point in time. The serializer module is specifically for the opposite case,
where the Table metadata is already present in memory.
"""
from ..orm import class_mapper
from ..orm.session import Session
from ..orm.mapper import Mapper
from ..orm.interfaces import MapperProperty
from ..orm.attributes import QueryableAttribute
from .. import Table, Column
from ..engine import Engine
from ..util import pickle, byte_buffer, b64encode, b64decode, text_type
import re
__all__ = ['Serializer', 'Deserializer', 'dumps', 'loads']
def Serializer(*args, **kw):
pickler = pickle.Pickler(*args, **kw)
def persistent_id(obj):
# print "serializing:", repr(obj)
if isinstance(obj, QueryableAttribute):
cls = obj.impl.class_
key = obj.impl.key
id = "attribute:" + key + ":" + b64encode(pickle.dumps(cls))
elif isinstance(obj, Mapper) and not obj.non_primary:
id = "mapper:" + b64encode(pickle.dumps(obj.class_))
elif isinstance(obj, MapperProperty) and not obj.parent.non_primary:
id = "mapperprop:" + b64encode(pickle.dumps(obj.parent.class_)) + \
":" + obj.key
elif isinstance(obj, Table):
id = "table:" + text_type(obj.key)
elif isinstance(obj, Column) and isinstance(obj.table, Table):
id = "column:" + \
text_type(obj.table.key) + ":" + text_type(obj.key)
elif isinstance(obj, Session):
id = "session:"
elif isinstance(obj, Engine):
id = "engine:"
else:
return None
return id
pickler.persistent_id = persistent_id
return pickler
our_ids = re.compile(
r'(mapperprop|mapper|table|column|session|attribute|engine):(.*)')
def Deserializer(file, metadata=None, scoped_session=None, engine=None):
unpickler = pickle.Unpickler(file)
def get_engine():
if engine:
return engine
elif scoped_session and scoped_session().bind:
return scoped_session().bind
elif metadata and metadata.bind:
return metadata.bind
else:
return None
def persistent_load(id):
m = our_ids.match(text_type(id))
if not m:
return None
else:
type_, args = m.group(1, 2)
if type_ == 'attribute':
key, clsarg = args.split(":")
cls = pickle.loads(b64decode(clsarg))
return getattr(cls, key)
elif type_ == "mapper":
cls = pickle.loads(b64decode(args))
return class_mapper(cls)
elif type_ == "mapperprop":
mapper, keyname = args.split(':')
cls = pickle.loads(b64decode(mapper))
return class_mapper(cls).attrs[keyname]
elif type_ == "table":
return metadata.tables[args]
elif type_ == "column":
table, colname = args.split(':')
return metadata.tables[table].c[colname]
elif type_ == "session":
return scoped_session()
elif type_ == "engine":
return get_engine()
else:
raise Exception("Unknown token: %s" % type_)
unpickler.persistent_load = persistent_load
return unpickler
def dumps(obj, protocol=0):
buf = byte_buffer()
pickler = Serializer(buf, protocol)
pickler.dump(obj)
return buf.getvalue()
def loads(data, metadata=None, scoped_session=None, engine=None):
buf = byte_buffer(data)
unpickler = Deserializer(buf, metadata, scoped_session, engine)
return unpickler.load()
| gpl-3.0 | 6,848,698,411,109,407,000 | 34.132075 | 79 | 0.642678 | false |
bobintetley/asm3 | src/asm3/locales/locale_th.py | 1 | 126059 | # th.po
val = {" days." : "",
"(all)" : "",
"(any)" : "",
"(anyone)" : "(ไม่มี)",
"(available)" : "",
"(blank)" : "",
"(both)" : "",
"(everyone)" : "",
"(master user, not editable)" : "",
"(no change)" : "",
"(no deduction)" : "",
"(none)" : "(ไม่มี)",
"(unknown)" : "",
"(use system)" : "",
"({0} given, {1} remaining)" : "",
"1 treatment" : "",
"1 week" : "1 สัปดาห์",
"1 year" : "",
"2 weeks" : "2 สัปดาห์",
"3 months" : "เดือน",
"4 weeks" : "4 สัปดาห์",
"5 Year" : "",
"6 months" : "เดือน",
"6 weeks" : "6 สัปดาห์",
"8 weeks" : "8 สัปดาห์",
"9 months" : "เดือน",
"A (Stray Dog)" : "",
"A description or other information about the animal" : "",
"A list of areas this person will homecheck - eg: S60 S61" : "",
"A movement must have a reservation date or type." : "",
"A person is required for this movement type." : "",
"A publish job is already running." : "",
"A short version of the reference number" : "",
"A task is already running." : "",
"A unique number to identify this movement" : "",
"A unique reference for this litter" : "",
"A4" : "",
"ACO" : "",
"AM" : "",
"ASM" : "",
"ASM 3 is compatible with your iPad and other tablets." : "",
"ASM News" : "",
"ASM can track detailed monthly and annual figures for your shelter. Install the Monthly Figures and Annual Figures reports from Settings-Reports-Browse sheltermanager.com" : "",
"ASM comes with a dictionary of 4,000 animal names. Just click the generate random name button when adding an animal." : "",
"ASM will remove this animal from the waiting list after a set number of weeks since the last owner contact date." : "",
"Abandoned" : "",
"Abuse" : "",
"Abyssinian" : "",
"Access System Menu" : "",
"Account" : "",
"Account Types" : "",
"Account code '{0}' has already been used." : "",
"Account code '{0}' is not valid." : "",
"Account code cannot be blank." : "",
"Account disabled." : "",
"Accountant" : "",
"Accounts" : "",
"Accounts need a code." : "",
"Active" : "",
"Active Incidents" : "",
"Active Trap Loans" : "",
"Active users: {0}" : "",
"Add" : "",
"Add Accounts" : "",
"Add Animal" : "เพิ่มสัตว์",
"Add Animals" : "เพิ่มสัตว์",
"Add Appointment" : "",
"Add Call" : "",
"Add Citations" : "",
"Add Clinic Appointment" : "",
"Add Cost" : "",
"Add Diary" : "",
"Add Diets" : "",
"Add Document to Repository" : "",
"Add Flag" : "",
"Add Found Animal" : "สัตว์ที่พบเจอ",
"Add Incidents" : "",
"Add Investigation" : "",
"Add Invoice Item" : "",
"Add Licenses" : "",
"Add Litter" : "",
"Add Log" : "",
"Add Log to Animal" : "สัตว์ที่พบเจอ",
"Add Lost Animal" : "สัตว์ที่พบเจอ",
"Add Media" : "",
"Add Medical Records" : "",
"Add Message" : "",
"Add Movement" : "",
"Add Payments" : "",
"Add Person" : "",
"Add Report" : "",
"Add Rota" : "",
"Add Stock" : "",
"Add Tests" : "",
"Add Transport" : "",
"Add Trap Loans" : "",
"Add Users" : "",
"Add Vaccinations" : "",
"Add Vouchers" : "",
"Add Waiting List" : "ดูรายชื่อที่กำลังรอ",
"Add a diary note" : "",
"Add a found animal" : "สัตว์ที่พบเจอ",
"Add a log entry" : "",
"Add a lost animal" : "สัตว์ที่พบเจอ",
"Add a medical regimen" : "",
"Add a new animal" : "สัตว์ที่พบเจอ",
"Add a new log" : "",
"Add a new person" : "",
"Add a person" : "",
"Add a photo" : "",
"Add a test" : "",
"Add a vaccination" : "",
"Add account" : "",
"Add additional field" : "",
"Add an animal to the waiting list" : "",
"Add citation" : "",
"Add cost" : "",
"Add details of this email to the log after sending" : "",
"Add diary" : "",
"Add diary task" : "",
"Add diet" : "",
"Add extra images for use in reports and documents" : "",
"Add form field" : "",
"Add found animal" : "สัตว์ที่พบเจอ",
"Add investigation" : "",
"Add license" : "",
"Add litter" : "",
"Add log" : "",
"Add lost animal" : "สัตว์ที่พบเจอ",
"Add medical profile" : "",
"Add medical regimen" : "",
"Add message" : "",
"Add movement" : "",
"Add online form" : "",
"Add payment" : "",
"Add person" : "",
"Add report" : "",
"Add role" : "",
"Add rota item" : "",
"Add stock" : "",
"Add template" : "",
"Add test" : "",
"Add this text to all animal descriptions" : "",
"Add to log" : "",
"Add transport" : "",
"Add trap loan" : "",
"Add user" : "",
"Add vaccination" : "",
"Add voucher" : "",
"Add waiting list" : "ดูรายชื่อที่กำลังรอ",
"Add {0}" : "",
"Added" : "",
"Added by {0} on {1}" : "",
"Additional" : "เพิ่มเติม",
"Additional Fields" : "",
"Additional date field '{0}' contains an invalid date." : "",
"Additional fields" : "",
"Additional fields need a name, label and type." : "",
"Address" : "ที่อยู่",
"Address Contains" : "",
"Address contains" : "",
"Administered" : "",
"Administering Vet" : "",
"Adopt" : "",
"Adopt an animal" : "",
"Adoptable" : "",
"Adoptable Animal" : "",
"Adoptable and published for the first time" : "",
"Adopted" : "",
"Adopted Animals" : "เพิ่มสัตว์",
"Adopted Transferred In {0}" : "",
"Adoption" : "",
"Adoption Coordinator" : "",
"Adoption Coordinator and Fosterer" : "",
"Adoption Event" : "",
"Adoption Fee" : "",
"Adoption Number" : "",
"Adoption fee donations" : "",
"Adoption movements must have a valid adoption date." : "",
"Adoption successfully created." : "",
"Adoptions {0}" : "",
"Adult" : "",
"Advanced" : "อย่างขั้นสูง",
"Advanced find animal screen defaults to on shelter" : "",
"Affenpinscher" : "",
"Afghan Hound" : "",
"African Grey" : "",
"After the user presses submit and ASM has accepted the form, redirect the user to this URL" : "",
"Age" : "อายุ",
"Age Group" : "",
"Age Group 1" : "",
"Age Group 2" : "",
"Age Group 3" : "",
"Age Group 4" : "",
"Age Group 5" : "",
"Age Group 6" : "",
"Age Group 7" : "",
"Age Group 8" : "",
"Age Groups" : "",
"Age groups are assigned based on the age of an animal. The figure in the left column is the upper limit in years for that group." : "",
"Aged Between" : "",
"Aged From" : "",
"Aged To" : "",
"Aggression" : "",
"Airedale Terrier" : "",
"Akbash" : "",
"Akita" : "",
"Alaskan Malamute" : "",
"Alerts" : "",
"All Animals" : "เพิ่มสัตว์",
"All On-Shelter Animals" : "ลบสัตว์",
"All Publishers" : "",
"All accounts" : "",
"All animal care officers on file." : "",
"All animal shelters on file." : "",
"All animals matching current publishing options." : "",
"All animals on the shelter." : "",
"All animals where the hold ends today." : "",
"All animals who are currently held in case of reclaim." : "",
"All animals who are currently quarantined." : "",
"All animals who are flagged as not for adoption." : "",
"All animals who have been on the shelter longer than {0} months." : "",
"All animals who have not been microchipped" : "",
"All banned owners on file." : "",
"All diary notes" : "",
"All donors on file." : "",
"All drivers on file." : "",
"All existing data in your database will be REMOVED before importing the CSV file. This removal cannot be reversed." : "",
"All fields should be completed." : "",
"All fosterers on file." : "",
"All homechecked owners on file." : "",
"All homecheckers on file." : "",
"All members on file." : "",
"All notes upto today" : "",
"All people on file." : "",
"All retailers on file." : "",
"All staff on file." : "",
"All time" : "",
"All vets on file." : "",
"All volunteers on file." : "",
"Allergies" : "",
"Allow a fosterer to be selected" : "",
"Allow an adoption coordinator to be selected" : "",
"Allow creation of payments on the Move-Reserve screen" : "",
"Allow drag and drop to move animals between locations" : "",
"Allow duplicate license numbers" : "",
"Allow duplicate microchip numbers" : "",
"Allow overriding of the movement number on the Move menu screens" : "",
"Allow use of OpenOffice document templates" : "",
"Alphabetically A-Z" : "",
"Alphabetically Z-A" : "",
"Already Signed" : "",
"Already fostered to this person." : "",
"Altered" : "",
"Altered Date" : "",
"Altered Dog - 1 year" : "",
"Altered Dog - 3 year" : "",
"Altering Vet" : "",
"Always show an emblem to indicate the current location" : "",
"Amazon" : "",
"Amber" : "",
"American" : "อเมริกัน",
"American Bulldog" : "",
"American Curl" : "",
"American Eskimo Dog" : "",
"American Fuzzy Lop" : "",
"American Sable" : "",
"American Shorthair" : "",
"American Staffordshire Terrier" : "",
"American Water Spaniel" : "",
"American Wirehair" : "",
"Amount" : "",
"An age in years, eg: 1, 0.5" : "",
"An animal cannot have multiple open movements." : "",
"An optional comma separated list of email addresses to send the output of this report to" : "",
"Anatolian Shepherd" : "",
"Angora Rabbit" : "",
"Animal" : "สัตว์",
"Animal '{0}' created with code {1}" : "",
"Animal '{0}' successfully marked deceased." : "",
"Animal (optional)" : "",
"Animal (via animalname field)" : "",
"Animal - Additional" : "",
"Animal - Death" : "",
"Animal - Details" : "",
"Animal - Entry" : "",
"Animal - Health and Identification" : "",
"Animal - Notes" : "",
"Animal Codes" : "ประเภทของสัตว์",
"Animal Control" : "",
"Animal Control Caller" : "",
"Animal Control Incident" : "",
"Animal Control Officer" : "",
"Animal Control Victim" : "",
"Animal Emblems" : "",
"Animal Flags" : "",
"Animal Links" : "",
"Animal Name" : "ชื่อสัตว์",
"Animal Selection" : "",
"Animal Shelter Manager" : "",
"Animal Shelter Manager Login" : "",
"Animal Sponsorship" : "",
"Animal Type" : "ประเภทของสัตว์",
"Animal Types" : "ประเภทของสัตว์",
"Animal board costs" : "",
"Animal cannot be deceased before it was brought to the shelter" : "",
"Animal code format" : "",
"Animal comments MUST contain this phrase in order to match." : "",
"Animal control calendar" : "",
"Animal control incidents matching '{0}'." : "",
"Animal defecation" : "",
"Animal descriptions" : "",
"Animal destroyed" : "",
"Animal emblems are the little icons that appear next to animal names in shelter view, the home page and search results." : "",
"Animal food costs" : "",
"Animal picked up" : "",
"Animal shortcode format" : "",
"Animals" : "สัตว์",
"Animals at large" : "",
"Animals left in vehicle" : "",
"Animals matching '{0}'." : "",
"Animals per page" : "",
"Annual" : "รายปี",
"Annually" : "รายปี",
"Anonymize" : "",
"Anonymize personal data after this many years" : "",
"Any animal types, species, breeds, colors, locations, etc. in the CSV file that aren't already in the database will be created during the import." : "",
"Any health problems the animal has" : "",
"Any information about the animal" : "",
"Any markings or distinguishing features the animal has" : "",
"Appaloosa" : "",
"Appenzell Mountain Dog" : "",
"Applehead Siamese" : "",
"Appointment" : "",
"Appointment date must be a valid date" : "",
"Appointment {0}. {1} on {2} for {3}" : "",
"Appointments need a date and time." : "",
"Approved" : "",
"Apr" : "เม.ย.",
"April" : "เมษายน",
"Arabian" : "",
"Area" : "",
"Area Found" : "เขตที่พบ",
"Area Lost" : "",
"Area Postcode" : "",
"Area where the animal was found" : "",
"Area where the animal was lost" : "",
"Areas" : "",
"Arrived" : "",
"Asset" : "",
"Asset::Premises" : "",
"At least the last name should be completed." : "",
"Attach" : "",
"Attach File" : "",
"Attach Link" : "",
"Attach a file" : "",
"Attach a link to a web resource" : "",
"Attach link" : "",
"Audit Trail" : "",
"Aug" : "ส.ค.",
"August" : "สิงหาคม",
"Australian Cattle Dog/Blue Heeler" : "",
"Australian Kelpie" : "",
"Australian Shepherd" : "",
"Australian Terrier" : "",
"Auto log users out after this many minutes of inactivity" : "",
"Auto removed due to lack of owner contact." : "",
"Automatically cancel any outstanding reservations on an animal when it is adopted" : "",
"Automatically remove" : "",
"Automatically return any outstanding foster movements on an animal when it is adopted" : "",
"Automatically return any outstanding foster movements on an animal when it is transferred" : "",
"Available for adoption" : "",
"Available sheltermanager.com reports" : "",
"B (Boarding Animal)" : "",
"Baby" : "",
"Balance" : "",
"Balinese" : "",
"Bank" : "อันดับ",
"Bank account interest" : "",
"Bank current account" : "",
"Bank deposit account" : "",
"Bank savings account" : "",
"Bank::Current" : "",
"Bank::Deposit" : "",
"Bank::Savings" : "",
"Banned" : "",
"Base Color" : "",
"Basenji" : "",
"Basset Hound" : "",
"Batch" : "มีนาคม",
"Batch Number" : "",
"Beagle" : "",
"Bearded Collie" : "",
"Beauceron" : "",
"Bedlington Terrier" : "",
"Beginning of month" : "",
"Belgian Hare" : "",
"Belgian Shepherd Dog Sheepdog" : "",
"Belgian Shepherd Laekenois" : "",
"Belgian Shepherd Malinois" : "",
"Belgian Shepherd Tervuren" : "",
"Bengal" : "",
"Bernese Mountain Dog" : "",
"Beveren" : "",
"Bichon Frise" : "",
"Bird" : "นก",
"Birman" : "",
"Bite" : "ขนาด",
"Biting" : "",
"Black" : "ดำ",
"Black Labrador Retriever" : "",
"Black Mouth Cur" : "",
"Black Tortie" : "",
"Black and Brindle" : "",
"Black and Brown" : "ดำและน้ำตาล",
"Black and Tan" : "",
"Black and Tan Coonhound" : "",
"Black and White" : "ดำและขาว",
"Bloodhound" : "",
"Blue" : "น้ำเงิน",
"Blue Tortie" : "",
"Bluetick Coonhound" : "",
"Board and Food" : "",
"Boarding" : "",
"Boarding Cost" : "",
"Boarding cost type" : "",
"Bobtail" : "",
"Body" : "",
"Bombay" : "",
"Bonded" : "",
"Bonded With" : "",
"Books" : "",
"Border Collie" : "",
"Border Terrier" : "",
"Bordetella" : "",
"Born in Shelter" : "",
"Born on Foster {0}" : "",
"Born on Shelter {0}" : "",
"Borzoi" : "",
"Boston Terrier" : "",
"Both" : "",
"Bouvier des Flanders" : "",
"Boxer" : "",
"Boykin Spaniel" : "",
"Breed" : "สายพันธุ์",
"Breed to use when publishing to third party services and adoption sites" : "",
"Breeds" : "สายพันธุ์",
"Briard" : "",
"Brindle" : "",
"Brindle and Black" : "",
"Brindle and White" : "",
"Britannia Petite" : "",
"British Shorthair" : "",
"Brittany Spaniel" : "",
"Brotogeris" : "",
"Brought In" : "",
"Brought In By" : "",
"Brown" : "น้ำตาล",
"Brown and Black" : "น้ำตาลและดำ",
"Brown and White" : "น้ำตาลและขาว",
"Browse sheltermanager.com" : "",
"Browse sheltermanager.com and install some reports, charts and mail merges into your new system." : "",
"Brussels Griffon" : "",
"Budgie/Budgerigar" : "",
"Bulk Complete Diary" : "",
"Bulk Complete Medical Records" : "",
"Bulk Complete Vaccinations" : "",
"Bulk Complete Waiting List" : "",
"Bulk Regimen" : "",
"Bulk Test" : "",
"Bulk Transport" : "",
"Bulk Vaccination" : "",
"Bulk change animals" : "",
"Bull Terrier" : "",
"Bullmastiff" : "",
"Bunny Rabbit" : "",
"Burmese" : "",
"Burmilla" : "",
"By" : "",
"CC" : "",
"CSV of animal/adopter data" : "",
"CSV of animal/medical data" : "",
"CSV of incident data" : "",
"CSV of license data" : "",
"CSV of payment data" : "",
"CSV of person data" : "",
"Caique" : "",
"Cairn Terrier" : "",
"Calendar View" : "",
"Calendar view" : "",
"Calico" : "",
"Californian" : "",
"Call" : "",
"Call Date/Time" : "",
"Caller" : "",
"Caller Name" : "",
"Caller Phone" : "",
"Camel" : "",
"Can Login" : "เข้าสู่ระบบ",
"Can afford donation?" : "",
"Can't reserve an animal that has an active movement." : "",
"Canaan Dog" : "",
"Canadian Hairless" : "",
"Canary" : "",
"Cancel" : "ยกเลิก",
"Cancel holds on animals this many days after the brought in date, or 0 to never cancel" : "",
"Cancel unadopted reservations after" : "",
"Cancel unadopted reservations after this many days, or 0 to never cancel" : "",
"Cancelled" : "",
"Cancelled Reservation" : "",
"Cane Corso Mastiff" : "",
"Carolina Dog" : "",
"Cash" : "",
"Cat" : "แมว",
"Catahoula Leopard Dog" : "",
"Category" : "หมวดหมู่",
"Cats" : "แมว",
"Cattery" : "",
"Cattle Dog" : "",
"Cavalier King Charles Spaniel" : "",
"Cell" : "",
"Cell Phone" : "",
"Champagne D'Argent" : "",
"Change" : "",
"Change Accounts" : "",
"Change Animals" : "",
"Change Citations" : "",
"Change Clinic Apointment" : "",
"Change Cost" : "",
"Change Date Required" : "",
"Change Diets" : "",
"Change Found Animal" : "",
"Change Incidents" : "",
"Change Investigation" : "",
"Change Licenses" : "",
"Change Litter" : "",
"Change Log" : "",
"Change Lost Animal" : "",
"Change Media" : "",
"Change Medical Records" : "",
"Change Movement" : "",
"Change Password" : "",
"Change Payments" : "",
"Change Person" : "",
"Change Publishing Options" : "",
"Change Report" : "",
"Change Rota" : "",
"Change Stock" : "",
"Change System Options" : "",
"Change Tests" : "",
"Change Transactions" : "",
"Change Transport" : "",
"Change Trap Loans" : "",
"Change User Settings" : "",
"Change Vaccinations" : "",
"Change Vouchers" : "",
"Change Waiting List" : "",
"Change date required on selected treatments" : "",
"Changed Mind" : "",
"Chart" : "",
"Chart (Bar)" : "",
"Chart (Line)" : "",
"Chart (Pie)" : "",
"Chart (Point)" : "",
"Chart (Steps)" : "",
"Chartreux" : "",
"Check" : "",
"Check License" : "",
"Check No" : "",
"Checkbox" : "",
"Checked By" : "",
"Checkered Giant" : "",
"Cheque" : "",
"Chesapeake Bay Retriever" : "",
"Chicken" : "",
"Chihuahua" : "",
"Children" : "",
"Chinchilla" : "",
"Chinese Crested Dog" : "",
"Chinese Foo Dog" : "",
"Chlamydophila" : "",
"Chocolate" : "",
"Chocolate Labrador Retriever" : "",
"Chocolate Tortie" : "",
"Chow Chow" : "",
"Cinnamon" : "",
"Cinnamon Tortoiseshell" : "",
"Citation Type" : "",
"Citation Types" : "",
"Citations" : "",
"City" : "",
"City contains" : "",
"Class" : "",
"Clear" : "เคลียร์",
"Clear and sign again" : "",
"Clear tables before importing" : "",
"Clinic" : "",
"Clinic Calendar" : "",
"Clinic Invoice - {0}" : "",
"Clinic Statuses" : "",
"Clone" : "ปิด",
"Clone Animals" : "",
"Clone Rota" : "",
"Clone the rota this week to another week" : "",
"Cloning..." : "กำลังจัดเรียง...",
"Close" : "ปิด",
"Clumber Spaniel" : "",
"Clydesdale" : "",
"Coat" : "แมว",
"Coat Type" : "",
"Coat Types" : "",
"Cockapoo" : "",
"Cockatiel" : "",
"Cockatoo" : "",
"Cocker Spaniel" : "",
"Code" : "โค้ด",
"Code contains" : "",
"Code format tokens:" : "",
"Collie" : "",
"Color" : "",
"Color to use when publishing to third party services and adoption sites" : "",
"Colors" : "",
"Columns" : "",
"Columns displayed" : "",
"Comma separated list of units for this location, eg: 1,2,3,4,Isolation,Pen 5" : "",
"Comments" : "คอมเม้นต์",
"Comments Contain" : "",
"Comments contain" : "",
"Comments copied to web preferred media." : "",
"Complaint" : "",
"Complete" : "เสร็จแล้ว",
"Complete Tasks" : "",
"Completed" : "เสร็จแล้ว",
"Completed Between" : "",
"Completed Type" : "",
"Completed notes upto today" : "",
"Completion Date" : "",
"Completion Type" : "",
"Configuration" : "",
"Confirm" : "",
"Confirm Password" : "",
"Confirmation message" : "",
"Confirmed" : "",
"Consulting Room" : "",
"Consulting Room - {0}" : "",
"Consumed" : "",
"Contact" : "ติดต่อ",
"Contact Contains" : "",
"Conure" : "",
"Convert this reservation to an adoption" : "",
"Coonhound" : "",
"Copy animal comments to the notes field of the web preferred media for this animal" : "",
"Copy from animal comments" : "",
"Copy of {0}" : "สำเนาของ {0}",
"Corded" : "",
"Corgi" : "",
"Cornish Rex" : "",
"Cost" : "แมว",
"Cost For" : "",
"Cost Type" : "",
"Cost Types" : "",
"Cost date must be a valid date" : "",
"Cost record" : "",
"Costs" : "แมว",
"Costs need a date and amount." : "",
"Coton de Tulear" : "",
"Could not find animal with name '{0}'" : "",
"Country" : "ประเทศ:",
"Courtesy Listing" : "",
"Cow" : "",
"Cream" : "ครีม",
"Create" : "",
"Create Animal" : "",
"Create Log" : "",
"Create Payment" : "สร้างโดย",
"Create Waiting List" : "",
"Create a cost record" : "",
"Create a due or received payment record from this appointment" : "",
"Create a new animal by copying this one" : "",
"Create a new animal from this found animal record" : "",
"Create a new animal from this incident" : "",
"Create a new animal from this waiting list entry" : "",
"Create a new document" : "",
"Create a new template" : "",
"Create a new template by copying the selected template" : "",
"Create a new waiting list entry from this found animal record" : "",
"Create and edit" : "",
"Create boarding cost record when animal is adopted" : "",
"Create diary notes from a task" : "",
"Create missing lookup values" : "",
"Create note this many days from today, or 9999 to ask" : "",
"Create this message" : "",
"Create this person" : "",
"Created By" : "สร้างโดย",
"Creating cost and cost types creates matching accounts and transactions" : "",
"Creating payments and payments types creates matching accounts and transactions" : "",
"Creating..." : "",
"Credit Card" : "",
"Creme D'Argent" : "",
"Criteria:" : "",
"Crossbreed" : "",
"Cruelty Case" : "",
"Culling" : "",
"Curly" : "",
"Current" : "",
"Current Vet" : "",
"Cymric" : "",
"D (Dog)" : "",
"DD = current day" : "",
"DDL dump (DB2)" : "",
"DDL dump (MySQL)" : "",
"DDL dump (PostgreSQL)" : "",
"DHLPP" : "",
"DO NOT use this field to store notes about what the person is looking for." : "",
"DOA {0}" : "",
"DOB" : "",
"Dachshund" : "",
"Daily Boarding Cost" : "",
"Dalmatian" : "",
"Dandi Dinmont Terrier" : "",
"Data" : "วันที่",
"Data Protection" : "",
"Database" : "ฐานข้อมูล",
"Date" : "วันที่",
"Date '{0}' is not valid." : "",
"Date Brought In" : "",
"Date Found" : "วันที่พบ",
"Date Lost" : "",
"Date Of Birth" : "",
"Date Put On" : "",
"Date Removed" : "วันที่เอาออก",
"Date Reported" : "วันที่เอาออก",
"Date and notes are mandatory." : "",
"Date brought in cannot be blank" : "",
"Date brought in cannot be in the future." : "",
"Date brought in is not valid" : "",
"Date found cannot be blank" : "",
"Date found cannot be blank." : "",
"Date lost cannot be blank" : "",
"Date lost cannot be blank." : "",
"Date of Birth" : "",
"Date of birth cannot be blank" : "",
"Date of birth cannot be in the future." : "",
"Date of birth is not valid" : "",
"Date of last owner contact" : "",
"Date put on" : "",
"Date put on cannot be blank" : "",
"Date put on list" : "",
"Date removed" : "วันที่เอาออก",
"Date reported cannot be blank" : "",
"Date reported cannot be blank." : "",
"Date/Time" : "วันที่/เวลา",
"Day" : "",
"Day Pivot" : "",
"Days On Shelter" : "",
"Dead On Arrival" : "",
"Dead animal" : "",
"Dead on arrival" : "",
"Death" : "",
"Death Comments" : "",
"Death Reason" : "เหตุผลที่เสียชีวิต",
"Death Reasons" : "เหตุผลที่เสียชีวิต",
"Debit Card" : "",
"Dec" : "ธ.ค.",
"Deceased" : "",
"Deceased Date" : "",
"December" : "ธันวาคม",
"Declawed" : "",
"Declined" : "",
"Default Breed" : "",
"Default Brought In By" : "",
"Default Coat Type" : "",
"Default Color" : "",
"Default Cost" : "",
"Default Death Reason" : "",
"Default Diary Person" : "",
"Default Entry Reason" : "",
"Default Incident Type" : "",
"Default Location" : "",
"Default Log Filter" : "",
"Default Log Type" : "",
"Default Payment Method" : "",
"Default Payment Type" : "",
"Default Reservation Status" : "",
"Default Return Reason" : "",
"Default Rota Shift" : "",
"Default Size" : "",
"Default Species" : "",
"Default Test Type" : "",
"Default Type" : "",
"Default Vaccination Type" : "",
"Default Value" : "",
"Default daily boarding cost" : "",
"Default destination account for payments" : "",
"Default image for documents" : "",
"Default image for this record and the web" : "",
"Default source account for costs" : "",
"Default to advanced find animal screen" : "",
"Default to advanced find person screen" : "",
"Default transaction view" : "",
"Default urgency" : "",
"Default video for publishing" : "",
"Default view" : "",
"Defaults" : "ค่าปริยาย",
"Defaults formats for code and shortcode are TYYYYNNN and NNT" : "",
"Delete" : "ลบ",
"Delete Accounts" : "",
"Delete Animals" : "ลบสัตว์",
"Delete Citations" : "",
"Delete Clinic Appointment" : "",
"Delete Cost" : "",
"Delete Diary" : "",
"Delete Diets" : "",
"Delete Document from Repository" : "",
"Delete Found Animal" : "",
"Delete Incidents" : "",
"Delete Incoming Forms" : "",
"Delete Investigation" : "",
"Delete Licenses" : "",
"Delete Litter" : "",
"Delete Log" : "",
"Delete Lost Animal" : "",
"Delete Media" : "",
"Delete Medical Records" : "",
"Delete Movement" : "",
"Delete Payments" : "",
"Delete Person" : "",
"Delete Regimen" : "",
"Delete Report" : "",
"Delete Rota" : "",
"Delete Stock" : "",
"Delete Tests" : "",
"Delete Transport" : "",
"Delete Trap Loans" : "",
"Delete Treatments" : "",
"Delete Vaccinations" : "",
"Delete Vouchers" : "",
"Delete Waiting List" : "",
"Delete all rota entries for this week" : "",
"Delete this animal" : "",
"Delete this incident" : "",
"Delete this person" : "",
"Delete this record" : "",
"Delete this waiting list entry" : "",
"Denied" : "",
"Deposit" : "",
"Deposit Account" : "",
"Deposit Returned" : "",
"Description" : "คำอธิบาย",
"Description Contains" : "",
"Description cannot be blank" : "",
"Deselect" : "",
"Details" : "รายละเอียด",
"Devon Rex" : "",
"Dialog title" : "",
"Diary" : "",
"Diary Task" : "",
"Diary Task: {0}" : "",
"Diary Tasks" : "",
"Diary and Messages" : "",
"Diary calendar" : "",
"Diary date cannot be blank" : "",
"Diary date is not valid" : "",
"Diary for {0}" : "",
"Diary note cannot be blank" : "",
"Diary note {0} marked completed" : "",
"Diary note {0} rediarised for {1}" : "",
"Diary notes for: {0}" : "",
"Diary notes need a date and subject." : "",
"Diary subject cannot be blank" : "",
"Diary task items need a pivot, subject and note." : "",
"Diary tasks need a name." : "",
"Did not ask" : "",
"Did you know?" : "",
"Died" : "",
"Died off shelter" : "",
"Died {0}" : "",
"Diet" : "",
"Diets" : "",
"Diets need a start date." : "",
"Dispatch" : "",
"Dispatch Address" : "",
"Dispatch Between" : "",
"Dispatch Date/Time" : "",
"Dispatch {0}: {1}" : "",
"Dispatched ACO" : "",
"Display" : "",
"Display Index" : "",
"Display a search button at the right side of the search box" : "",
"Distemper" : "",
"Do Not Publish" : "เผยแพร่",
"Do Not Register Microchip" : "",
"Do not show" : "",
"Doberman Pinscher" : "",
"Document" : "",
"Document Link" : "",
"Document Repository" : "",
"Document Templates" : "",
"Document file" : "",
"Document signed" : "",
"Document signing request" : "",
"Document templates" : "",
"Documents" : "",
"Dog" : "สุนัข",
"Dogo Argentino" : "",
"Dogs" : "สุนัข",
"Dogue de Bordeaux" : "",
"Domestic Long Hair" : "",
"Domestic Medium Hair" : "",
"Domestic Short Hair" : "",
"Don't create a cost record" : "",
"Don't scale" : "",
"Donated" : "",
"Donation" : "บริจาค",
"Donation?" : "บริจาคหรือเปล่า?",
"Donations for animals entering the shelter" : "",
"Done" : "ไม่มี",
"Donkey" : "",
"Donkey/Mule" : "",
"Donor" : "",
"Dosage" : "",
"Dove" : "ไม่มี",
"Download" : "",
"Draft" : "",
"Driver" : "",
"Drop files here..." : "",
"Dropoff" : "",
"Duck" : "เป็ด",
"Due" : "",
"Due in next month" : "",
"Due in next week" : "",
"Due in next year" : "",
"Due today" : "",
"Duration" : "ช่วงระยะ",
"Dutch" : "",
"Dutch Shepherd" : "",
"Dwarf" : "",
"Dwarf Eared" : "",
"E = first letter of animal entry category" : "",
"EE = first and second letter of animal entry category" : "",
"Eclectus" : "",
"Edit" : "แก้ไข",
"Edit All Diary Notes" : "",
"Edit Appointment" : "",
"Edit Diary Tasks" : "",
"Edit HTML publishing templates" : "",
"Edit Header/Footer" : "",
"Edit Invoice Item" : "",
"Edit Lookups" : "",
"Edit My Diary Notes" : "",
"Edit Online Forms" : "",
"Edit Reports" : "",
"Edit Roles" : "",
"Edit Users" : "",
"Edit account" : "",
"Edit additional field" : "",
"Edit citation" : "",
"Edit cost" : "",
"Edit diary" : "",
"Edit diary notes" : "",
"Edit diary task" : "",
"Edit diary tasks" : "",
"Edit diet" : "",
"Edit document" : "",
"Edit form field" : "",
"Edit investigation" : "",
"Edit invoice" : "แก้ไขเจ้าของ",
"Edit license" : "",
"Edit litter" : "",
"Edit litters" : "",
"Edit log" : "",
"Edit media notes" : "",
"Edit medical profile" : "",
"Edit medical regimen" : "",
"Edit movement" : "",
"Edit my diary notes" : "",
"Edit my diary notes" : "",
"Edit notes" : "",
"Edit online form" : "",
"Edit online form HTML header/footer" : "",
"Edit payment" : "",
"Edit report" : "",
"Edit report template HTML header/footer" : "",
"Edit role" : "",
"Edit roles" : "",
"Edit rota item" : "",
"Edit stock" : "",
"Edit system users" : "",
"Edit template" : "",
"Edit test" : "",
"Edit the current waiting list" : "",
"Edit transaction" : "",
"Edit transport" : "",
"Edit trap loan" : "",
"Edit user" : "",
"Edit vaccination" : "",
"Edit voucher" : "",
"Edit {0}" : "",
"Egyptian Mau" : "",
"Electricity Bills" : "",
"Email" : "อีเมล์",
"Email Address" : "",
"Email PDF" : "",
"Email Person" : "",
"Email To" : "",
"Email a copy of the selected HTML documents as PDFs" : "",
"Email a copy of the selected media files" : "",
"Email address" : "",
"Email document for electronic signature" : "",
"Email incident notes to ACO" : "",
"Email incoming form submissions to this comma separated list of email addresses" : "",
"Email media" : "",
"Email person" : "",
"Email signature" : "",
"Email submissions to" : "",
"Email this message to all matching users" : "",
"Email this person" : "",
"Email users their diary notes each day" : "",
"Emu" : "อีมู",
"Enable FTP uploading" : "",
"Enable accounts functionality" : "",
"Enable location filters" : "",
"Enable lost and found functionality" : "",
"Enable multiple sites" : "",
"Enable the waiting list functionality" : "",
"Enable visual effects" : "",
"Enabled" : "",
"End Of Day" : "",
"End Time" : "",
"End at" : "",
"End of month" : "",
"End of year" : "",
"Ends" : "",
"Ends after" : "",
"English Bulldog" : "",
"English Cocker Spaniel" : "",
"English Coonhound" : "",
"English Lop" : "",
"English Pointer" : "",
"English Setter" : "",
"English Shepherd" : "",
"English Spot" : "",
"English Springer Spaniel" : "",
"English Toy Spaniel" : "",
"Entered (newest first)" : "",
"Entered (oldest first)" : "",
"Entered From" : "",
"Entered To" : "",
"Entered shelter" : "",
"Entering 'activelost' or 'activefound' in the search box will show you lost and found animals reported in the last 30 days." : "",
"Entering 'deceased' in the search box will show you recently deceased animals." : "",
"Entering 'fosterers', 'homecheckers', 'staff', 'volunteers', 'aco' or 'members' in the search box will show you those groups of people." : "",
"Entering 'notforadoption' in the search box will show you all shelter animals with the not for adoption flag set." : "",
"Entering 'os' in the search box will show you all shelter animals." : "",
"Entlebucher" : "",
"Entry" : "",
"Entry Category" : "",
"Entry Donation" : "",
"Entry Reason" : "",
"Entry Reason Category" : "",
"Entry Reasons" : "",
"Entry reason" : "",
"Error contacting server." : "",
"Escaped" : "",
"Escaped {0}" : "",
"Eskimo Dog" : "",
"Estimate" : "",
"Euthanized" : "",
"Euthanized {0}" : "",
"Every day" : "",
"Exclude animals who are aged under" : "",
"Exclude from bulk email" : "",
"Exclude new animal photos from publishing" : "",
"Exclude this image when publishing" : "",
"Execute" : "",
"Execute Script" : "",
"Execute the SQL in the box below" : "",
"Executing Task" : "",
"Executing..." : "",
"Exotic Shorthair" : "",
"Expense" : "",
"Expense::" : "",
"Expenses::Board" : "",
"Expenses::Electricity" : "",
"Expenses::Food" : "",
"Expenses::Gas" : "",
"Expenses::Phone" : "",
"Expenses::Postage" : "",
"Expenses::Stationary" : "",
"Expenses::Water" : "",
"Expire in next month" : "",
"Expired" : "",
"Expired in the last month" : "",
"Expired in the last week" : "",
"Expires" : "",
"Expiry" : "",
"Expiry date" : "",
"Export" : "",
"Export Animals as CSV" : "",
"Export Report" : "",
"Export Reports as CSV" : "",
"Export a CSV file of animal records that ASM can import into another database." : "",
"Export this database in various formats" : "",
"Exporting the complete database can take some time and generate a very large file, are you sure?" : "",
"Extra Images" : "",
"Extra images" : "",
"Extra-Toes Cat (Hemingway Polydactyl)" : "",
"F (Feral Cat)" : "",
"FECV/FeCoV" : "",
"FIPV" : "",
"FIV" : "",
"FIV Result" : "",
"FIV+" : "",
"FIV/L Test Date" : "",
"FIV/L Tested" : "",
"FLV" : "",
"FLV Result" : "",
"FLV+" : "",
"FTP hostname" : "",
"FTP password" : "",
"FTP username" : "",
"FVRCP" : "",
"Facebook" : "",
"Failed sending email" : "",
"Failed to create payment." : "",
"Failed to renew license." : "",
"Fawn" : "",
"Fawn Tortoiseshell" : "",
"FeLV" : "",
"Features" : "",
"Feb" : "ก.พ.",
"February" : "กุมภาพันธ์",
"Fee" : "",
"Female" : "เพศเมีย",
"Feral" : "",
"Ferret" : "",
"Field Spaniel" : "",
"Field names should not contain spaces." : "",
"Fila Brasileiro" : "",
"File" : "",
"Filter" : "",
"Financial" : "",
"Finch" : "",
"Find Animal" : "ค้นหาสัตว์",
"Find Animal/Person" : "",
"Find Found Animal" : "สัตว์ที่พบเจอ",
"Find Incident" : "",
"Find Lost Animal" : "สัตว์ที่พบเจอ",
"Find Person" : "",
"Find a found animal" : "สัตว์ที่พบเจอ",
"Find a lost animal" : "สัตว์ที่พบเจอ",
"Find aco" : "",
"Find an incident" : "",
"Find animal" : "ค้นหาสัตว์",
"Find animal columns" : "",
"Find animal control incidents returned {0} results." : "",
"Find animals matching the looking for criteria of this person" : "",
"Find donor" : "",
"Find driver" : "",
"Find fosterer" : "",
"Find found animal returned {0} results." : "",
"Find homechecked" : "",
"Find homechecker" : "",
"Find incident" : "",
"Find lost animal returned {0} results." : "",
"Find member" : "",
"Find person" : "",
"Find person columns" : "",
"Find retailer" : "",
"Find shelter" : "",
"Find staff" : "",
"Find staff/volunteer" : "",
"Find this address on a map" : "",
"Find vet" : "",
"Find volunteer" : "",
"Fine Amount" : "",
"Finnish Lapphund" : "",
"Finnish Spitz" : "",
"First Last" : "",
"First Names" : "",
"First name(s)" : "",
"First offence" : "",
"Fish" : "ปลา",
"Flag" : "",
"Flags" : "",
"Flat-coated Retriever" : "",
"Flemish Giant" : "",
"Florida White" : "",
"Followup" : "",
"Followup Between" : "",
"Followup Date/Time" : "",
"Footer" : "",
"For" : "",
"Forbidden" : "",
"Forenames" : "",
"Forget" : "",
"Form URL" : "",
"Forms need a name." : "",
"Foster" : "",
"Foster Book" : "",
"Foster Capacity" : "",
"Foster Transfer" : "วันที่โอนย้าย",
"Foster an animal" : "",
"Foster book" : "",
"Foster movements must have a valid foster date." : "",
"Foster successfully created." : "",
"Fostered" : "",
"Fostered Animals" : "สัตว์ที่สูญหาย",
"Fostered to {0} since {1}" : "",
"Fosterer" : "",
"Fosterer (Active Only)" : "",
"Fosterer Medical Report" : "",
"Found" : "",
"Found Animal" : "สัตว์ที่พบเจอ",
"Found Animal - Additional" : "",
"Found Animal - Details" : "",
"Found Animal Contact" : "",
"Found Animal {0}" : "สัตว์ที่พบเจอ: {0}",
"Found Animal: {0}" : "สัตว์ที่พบเจอ: {0}",
"Found animal - {0} {1} [{2}]" : "",
"Found animal entries matching '{0}'." : "",
"Found animals must have a contact" : "",
"Found animals reported in the last 30 days." : "",
"Found from" : "",
"Found to" : "",
"FoundLost animal entry {0} successfully created." : "",
"Fox Terrier" : "",
"Foxhound" : "",
"Fr" : "",
"French Bulldog" : "",
"French-Lop" : "",
"Frequency" : "ความถี่",
"Frequently Asked Questions" : "",
"Fri" : "",
"Friday" : "",
"From" : "",
"From Fostering" : "",
"From Other" : "",
"From retailer is only valid on adoption movements." : "",
"Future notes" : "",
"GDPR Contact Opt-In" : "",
"Gaited" : "",
"Gas Bills" : "",
"Gecko" : "",
"General" : "",
"Generate" : "",
"Generate Documents" : "",
"Generate HTML from this SQL" : "",
"Generate Report" : "",
"Generate a document from this animal" : "",
"Generate a document from this incident" : "",
"Generate a document from this movement" : "",
"Generate a document from this person" : "",
"Generate a document from this record" : "",
"Generate a javascript database for the search page" : "",
"Generate a new animal code" : "",
"Generate a random name for this animal" : "",
"Generate document from this appointment" : "",
"Generate document from this license" : "",
"Generate document from this payment" : "",
"Generate document from this transport" : "",
"Generate documentation" : "",
"Generate documents" : "",
"Generate image thumbnails as tn_$$IMAGE$$" : "",
"Generated document '{0}'" : "",
"Gerbil" : "",
"German Pinscher" : "",
"German Shepherd Dog" : "",
"German Shorthaired Pointer" : "",
"German Wirehaired Pointer" : "",
"Get more reports from sheltermanager.com" : "",
"Gift Aid" : "",
"GiftAid" : "",
"Giftaid" : "",
"Ginger" : "",
"Ginger and White" : "เทาและขาว",
"Give" : "",
"Give Treatments" : "",
"Give Vaccination" : "",
"Given" : "",
"Glen of Imaal Terrier" : "",
"Go" : "",
"Go the lookup data screen and add/remove breeds, species and animal types according to the animals your shelter deals with." : "",
"Go the options screen and set your shelter's contact details and other settings." : "",
"Go the system users screen and add user accounts for your staff." : "",
"Goat" : "แพะ",
"Golden" : "",
"Golden Retriever" : "",
"Goldfish" : "ปลาทอง",
"Good With Cats" : "เป็นมิตรต่อแมว",
"Good With Children" : "เป็นมิตรต่อเด็ก",
"Good With Dogs" : "เป็นมิตรต่อสุนัข",
"Good with Cats" : "เป็นมิตรต่อแมว",
"Good with Children" : "เป็นมิตรต่อเด็ก",
"Good with Dogs" : "เป็นมิตรต่อสุนัข",
"Good with cats" : "เป็นมิตรต่อแมว",
"Good with children" : "เป็นมิตรต่อเด็ก",
"Good with dogs" : "เป็นมิตรต่อสุนัข",
"Good with kids" : "เป็นมิตรต่อแมว",
"Google+" : "",
"Goose" : "",
"Gordon Setter" : "",
"Grade" : "",
"Great Dane" : "",
"Great Pyrenees" : "",
"Greater Swiss Mountain Dog" : "",
"Green" : "เขียว",
"Grey" : "เทา",
"Grey and White" : "เทาและขาว",
"Greyhound" : "",
"Guinea Pig" : "",
"Guinea fowl" : "",
"HMRC Gift Aid Spreadsheet" : "",
"HTML" : "",
"HTML Publishing Templates" : "",
"HTML/FTP Publisher" : "",
"Hairless" : "",
"Half-Yearly" : "รายครึ่งปี",
"Hamster" : "",
"Harlequin" : "",
"Havana" : "",
"Havanese" : "",
"Header" : "",
"Health Problems" : "",
"Health and Identification" : "",
"Healthy" : "สมบูรณ์",
"Heartworm" : "",
"Heartworm Test Date" : "",
"Heartworm Test Result" : "",
"Heartworm Tested" : "",
"Heartworm+" : "",
"Hedgehog" : "",
"Held" : "",
"Help" : "",
"Hepatitis" : "",
"Here are some things you should do before you start adding animals and people to your database." : "",
"Hidden" : "",
"Hidden Comments" : "",
"Hidden comments about the animal" : "",
"Hide deceased animals from the home page" : "",
"High" : "",
"Highlight" : "",
"Himalayan" : "",
"History" : "ประวัติ",
"Hold" : "",
"Hold the animal until this date or blank to hold indefinitely" : "",
"Hold until" : "",
"Hold until {0}" : "",
"Holland Lop" : "",
"Home" : "",
"Home Phone" : "",
"Home page" : "",
"Homecheck Areas" : "",
"Homecheck Date" : "",
"Homecheck History" : "",
"Homecheck areas" : "",
"Homechecked" : "",
"Homechecked By" : "",
"Homechecked by" : "",
"Homechecker" : "",
"Horizontal Pitch" : "",
"Horse" : "ม้า",
"Hotot" : "",
"Hound" : "",
"Hours" : "",
"Housetrained" : "",
"Hovawart" : "",
"How urgent is it that we take this animal?" : "",
"Husky" : "",
"I've finished, Don't show me this popup again." : "",
"IP Restriction" : "",
"IP restriction is a space-separated list of IP netblocks in CIDR notation that this user is *only* permitted to login from (eg: 192.168.0.0/24 127.0.0.0/8). If left blank, the user can login from any address." : "",
"Ibizan Hound" : "",
"If the shelter provides initial insurance cover to new adopters, the policy number" : "",
"If this form has a populated emailaddress field during submission, send a confirmation email to it" : "",
"If this is the web preferred image, web publishers will use these notes as the animal description" : "",
"If this person is a fosterer, the maximum number of animals they can care for." : "",
"If this person is a member, the date that membership expires." : "",
"If this person is a member, their membership number" : "",
"If this person is a member, their membership number." : "",
"If this stock record is for a drug, the batch number from the container" : "",
"If this stock record is for a perishable good, the expiry date on the container" : "",
"If you assign view or edit roles, only users within those roles will be able to view and edit this account." : "",
"If you don't select any locations, publishers will include animals in all locations." : "",
"Iguana" : "",
"Illyrian Sheepdog" : "",
"Image" : "",
"Image file" : "",
"Import" : "",
"Import a CSV file" : "",
"Import a PayPal CSV file" : "",
"Import from file" : "",
"Important" : "",
"In" : "",
"In SubTotal" : "",
"In the last month" : "",
"In the last quarter" : "",
"In the last week" : "",
"In the last year" : "",
"In-Kind Donation" : "บริจาค",
"Inactive" : "",
"Inactive - do not include" : "",
"Incident" : "",
"Incident - Additional" : "",
"Incident - Citation" : "",
"Incident - Details" : "",
"Incident - Dispatch" : "",
"Incident - Owner" : "",
"Incident Between" : "",
"Incident Completed Types" : "",
"Incident Date/Time" : "",
"Incident Type" : "",
"Incident Types" : "",
"Incident date cannot be blank" : "",
"Incident followup" : "",
"Incident {0} successfully created." : "",
"Incident {0}, {1}: {2}" : "",
"Incidents" : "",
"Incidents Requiring Followup" : "",
"Include CSV header line" : "",
"Include Removed" : "",
"Include animals in the following locations" : "",
"Include animals on trial adoption" : "",
"Include animals who don't have a description" : "รวมสัตว์ที่ไม่มีภาพถ่ายด้วย",
"Include animals who don't have a picture" : "",
"Include cruelty case animals" : "",
"Include deceased animals" : "",
"Include fostered animals" : "",
"Include found" : "",
"Include held animals" : "",
"Include incomplete medical records when generating document templates" : "",
"Include incomplete vaccination and test records when generating document templates" : "",
"Include non-shelter animals" : "",
"Include off-shelter animals in medical calendar and books" : "",
"Include preferred photo" : "",
"Include quarantined animals" : "",
"Include reserved animals" : "",
"Include retailer animals" : "",
"Include returned" : "",
"Include this image when publishing" : "",
"Include unaltered animals" : "รวมสัตว์ที่ไม่มีภาพถ่ายด้วย",
"Income" : "",
"Income from an on-site shop" : "",
"Income::" : "",
"Income::Adoption" : "",
"Income::Donation" : "",
"Income::EntryDonation" : "",
"Income::Interest" : "",
"Income::OpeningBalances" : "",
"Income::Shop" : "",
"Income::Sponsorship" : "",
"Income::WaitingList" : "",
"Incoming" : "",
"Incoming Forms" : "",
"Incoming donations (misc)" : "",
"Incoming forms are online forms that have been completed and submitted by people on the web." : "",
"Incomplete incidents" : "",
"Incomplete notes upto today" : "",
"Index" : "",
"Individual/Couple" : "",
"Induct a new animal" : "",
"Information" : "ข้อมูล",
"Initials" : "",
"Install" : "",
"Install the selected reports to your database" : "",
"Insurance" : "",
"Insurance No" : "",
"Intake" : "",
"Intakes {0}" : "",
"Internal Location" : "",
"Internal Locations" : "",
"Invalid email address" : "",
"Invalid email address '{0}'" : "",
"Invalid microchip number length" : "",
"Invalid time '{0}', times should be in 00:00 format" : "",
"Invalid time, times should be in HH:MM format" : "",
"Invalid username or password." : "",
"Investigation" : "",
"Investigations" : "",
"Investigator" : "",
"Invoice Only" : "",
"Invoice items need a description and amount." : "",
"Irish Setter" : "",
"Irish Terrier" : "",
"Irish Water Spaniel" : "",
"Irish Wolfhound" : "",
"Is this a permanent foster?" : "",
"Is this a trial adoption?" : "",
"Issue a new insurance number for this animal/adoption" : "",
"Issue date and expiry date must be valid dates." : "",
"Issued" : "",
"Issued in the last month" : "",
"Issued in the last week" : "",
"Italian Greyhound" : "",
"Italian Spinone" : "",
"Item" : "",
"Jack Russell Terrier" : "",
"Jan" : "ม.ค.",
"January" : "มกราคม",
"Japanese Bobtail" : "",
"Japanese Chin" : "",
"Javanese" : "",
"Jersey Wooly" : "",
"Jindo" : "",
"Jul" : "ก.ค.",
"July" : "กรกฎาคม",
"Jump to diary" : "",
"Jump to donations" : "",
"Jump to media" : "",
"Jump to movements" : "",
"Jun" : "มิ.ย.",
"June" : "มิถุนายน",
"Jurisdiction" : "ช่วงระยะ",
"Jurisdictions" : "",
"Kai Dog" : "",
"Kakariki" : "",
"Karelian Bear Dog" : "",
"Keep table headers visible when scrolling" : "",
"Keeshond" : "",
"Kennel" : "",
"Kerry Blue Terrier" : "",
"Kishu" : "",
"Kittens (under {0} months)" : "",
"Km" : "",
"Komondor" : "",
"Korat" : "",
"Kuvasz" : "",
"Kyi Leo" : "",
"Label" : "",
"Labrador Retriever" : "",
"Lakeland Terrier" : "",
"Lancashire Heeler" : "",
"Large" : "ใหญ่",
"Last First" : "",
"Last Location" : "",
"Last Month" : "",
"Last Name" : "",
"Last Week" : "",
"Last changed by {0} on {1}" : "",
"Last name" : "",
"Last, First" : "",
"Latency" : "",
"Latency Tester" : "",
"Least recently changed" : "",
"Leave" : "",
"Leave of absence" : "",
"Left Margin" : "",
"Left shelter" : "",
"Leonberger" : "",
"Leptospirosis" : "",
"Letter" : "",
"Lhasa Apso" : "",
"Liability" : "",
"Licence for {0} successfully renewed {1} - {2}" : "",
"License" : "",
"License Number" : "",
"License Types" : "",
"License number '{0}' has already been issued." : "",
"License numbers matching '{0}'." : "",
"License requires a number" : "",
"License requires a person" : "",
"License requires issued and expiry dates" : "",
"Licenses" : "",
"Licensing" : "",
"Lifetime" : "",
"Light Amber" : "",
"Lilac" : "",
"Lilac Tortie" : "",
"Limited to {0} matches" : "",
"Link" : "ลิงค์",
"Link an animal" : "",
"Link to an external web resource" : "",
"Link to this animal" : "",
"Links" : "ลิงค์",
"List" : "",
"Litter" : "",
"Litter Ref" : "",
"Litter Reference" : "",
"Littermates" : "",
"Litters" : "",
"Litters need at least a required date and number." : "",
"Live Releases {0}" : "วันที่ปล่อย",
"Liver" : "",
"Liver and White" : "เทาและขาว",
"Lizard" : "",
"Llama" : "ลามะ",
"Loading..." : "",
"Loan" : "",
"Local" : "ที่ตั้ง",
"Locale" : "",
"Location" : "ที่ตั้ง",
"Location Filter" : "",
"Location and Species" : "",
"Location and Type" : "",
"Location and Unit" : "",
"Locations" : "ที่ตั้ง",
"Log" : "ปูมบันทึก",
"Log Text" : "",
"Log Type" : "ประเภทของปูมบันทึก",
"Log Types" : "ประเภทของปูมบันทึก",
"Log date must be a valid date" : "",
"Log entries need a date and text." : "",
"Log requires a date." : "",
"Log requires a person." : "",
"Log requires an animal." : "",
"Log successfully added." : "",
"Login" : "เข้าสู่ระบบ",
"Logout" : "ออกจากระบบ",
"Long" : "ยาว",
"Long term" : "",
"Longest On Shelter" : "",
"Looking For" : "",
"Looking for" : "",
"Lookup" : "",
"Lookup (Multiple Select)" : "",
"Lookup Values" : "",
"Lookup data" : "",
"Lookups" : "",
"Lop Eared" : "",
"Lory/Lorikeet" : "",
"Lost" : "แมว",
"Lost Animal" : "สัตว์ที่สูญหาย",
"Lost Animal - Additional" : "",
"Lost Animal - Details" : "",
"Lost Animal Contact" : "",
"Lost Animal: {0}" : "สัตว์ที่สูญหาย: {0}",
"Lost and Found" : "",
"Lost and found entries must have a contact" : "",
"Lost animal - {0} {1} [{2}]" : "",
"Lost animal entries matching '{0}'." : "",
"Lost animal entry {0} successfully created." : "",
"Lost animals must have a contact" : "",
"Lost animals reported in the last 30 days." : "",
"Lost from" : "",
"Lost to" : "",
"Lost/Found" : "",
"Lots of reports installed? Clean up the Reports menu with Settings-Options- Display-Show report menu items in collapsed categories." : "",
"Lovebird" : "",
"Low" : "",
"Lowchen" : "",
"Lowest" : "",
"M (Miscellaneous)" : "",
"MM = current month" : "",
"Macaw" : "",
"Mail" : "",
"Mail Merge" : "",
"Mail Merge - {0}" : "",
"Maine Coon" : "",
"Make this the default image when creating documents" : "",
"Make this the default image when viewing this record and publishing to the web" : "",
"Make this the default video link when publishing to the web" : "",
"Male" : "เพศผู้",
"Maltese" : "",
"Manchester Terrier" : "",
"Mandatory" : "",
"Manual" : "",
"Manually enter codes (do not generate)" : "",
"Manufacturer" : "",
"Manx" : "",
"Map" : "",
"Map of active incidents" : "",
"Mar" : "มี.ค.",
"March" : "มีนาคม",
"Maremma Sheepdog" : "",
"Mark Deceased" : "",
"Mark an animal deceased" : "",
"Mark dispatched now" : "",
"Mark new animals as not for adoption" : "",
"Mark responded now" : "",
"Mark selected payments received" : "",
"Mark this owner homechecked" : "",
"Mark treatments given" : "",
"Marketer" : "",
"Markings" : "คำเตือน",
"Markup" : "",
"Marriage/Relationship split" : "",
"Mastiff" : "",
"Match" : "มีนาคม",
"Match Lost and Found" : "",
"Match against other lost/found animals" : "",
"Match lost and found animals" : "",
"Match this animal with the lost and found database" : "",
"Maternity" : "",
"May" : "พ.ค.",
"McNab" : "",
"Media" : "มีเดีย",
"Media Notes" : "",
"Media notes contain" : "",
"Medical" : "",
"Medical Book" : "",
"Medical Profiles" : "",
"Medical book" : "",
"Medical calendar" : "",
"Medical profiles" : "",
"Medical profiles need a profile name, treatment, dosage and frequencies." : "",
"Medical regimens need an animal, name, dosage, a start date and frequencies." : "",
"Medicate" : "",
"Medicate Animal" : "",
"Medium" : "กลาง",
"Member" : "สมาชิก",
"Membership Expiry" : "",
"Membership Number" : "",
"Merge" : "",
"Merge Person" : "",
"Merge another animal into this one" : "",
"Merge another person into this one" : "",
"Merge bonded animals into a single record" : "",
"Merge duplicate records" : "",
"Message" : "",
"Message Board" : "",
"Message from {0}" : "",
"Message successfully sent to {0}" : "",
"Messages" : "",
"Messages successfully sent" : "",
"Method" : "",
"Microchip" : "",
"Microchip Date" : "",
"Microchip Number" : "",
"Microchip number {0} has already been allocated to another animal." : "",
"Microchipped" : "",
"Miles" : "",
"Mini Rex" : "",
"Mini-Lop" : "",
"Miniature Pinscher" : "",
"Minutes" : "",
"Missouri Foxtrotter" : "",
"Mixed Breed" : "สายพันธุ์",
"Mo" : "",
"Mobile signing pad" : "",
"Modify Additional Fields" : "",
"Modify Document Templates" : "",
"Modify Lookups" : "",
"Mon" : "",
"Monday" : "",
"Money" : "เงิน",
"Month" : "",
"Monthly" : "รายเดือน",
"More Info Needed" : "",
"More Medications" : "",
"More Tests" : "",
"More Vaccinations" : "",
"More diary notes" : "",
"Morgan" : "",
"Most browsers let you search in dropdowns by typing the first few letters of the item you want." : "",
"Most browsers will let you visit a record you have been to in this session by typing part of its name in the address bar." : "",
"Most recently changed" : "",
"Most relevant" : "",
"Mother" : "",
"Mountain Cur" : "",
"Mountain Dog" : "",
"Mouse" : "",
"Move" : "ไม่มี",
"Move an animal to a retailer" : "",
"Moved to animal record {0}" : "",
"Movement" : "",
"Movement Date" : "",
"Movement Number" : "",
"Movement Type" : "",
"Movement Types" : "",
"Movement dates clash with an existing movement." : "",
"Movement numbers must be unique." : "",
"Movements" : "",
"Movements require an animal" : "",
"Movements require an animal." : "",
"Moving..." : "",
"Multi-Lookup" : "",
"Multiple Treatments" : "",
"Munchkin" : "",
"Munsterlander" : "",
"Mustang" : "",
"My Fosters" : "",
"My Incidents" : "",
"My Undispatched Incidents" : "",
"My diary notes" : "",
"My sheltermanager.com account" : "",
"Mynah" : "",
"N (Non-Shelter Animal)" : "",
"NNN or NN = number unique for this type of animal for this year" : "",
"Name" : "ชื่อ",
"Name Contains" : "",
"Name and Address" : "",
"Name cannot be blank" : "",
"Name contains" : "",
"Neapolitan Mastiff" : "",
"Negative" : "",
"Neglect" : "",
"Netherland Dwarf" : "",
"Neuter/Spay" : "",
"Neutered" : "",
"Neutered/Spayed Non-Shelter Animals In {0}" : "",
"Neutered/Spayed Shelter Animals In {0}" : "",
"New" : "ใหม่",
"New Account" : "",
"New Appointment" : "",
"New Citation" : "",
"New Cost" : "",
"New Diary" : "",
"New Diet" : "",
"New Document" : "",
"New Field" : "",
"New Fosterer" : "",
"New Guinea Singing Dog" : "",
"New Item" : "",
"New License" : "",
"New Litter" : "",
"New Log" : "ปูมบันทึกใหม่",
"New Movement" : "",
"New Owner" : "",
"New Password" : "",
"New Payment" : "",
"New Profile" : "",
"New Record" : "",
"New Regimen" : "",
"New Report" : "",
"New Role" : "",
"New Stock" : "",
"New Task" : "",
"New Template" : "",
"New Test" : "",
"New Transport" : "",
"New Trap Loan" : "",
"New User" : "",
"New Vaccination" : "",
"New Voucher" : "",
"New Waiting List Entry" : "",
"New Zealand" : "",
"New diary task" : "",
"New form field" : "",
"New name" : "",
"New online form" : "",
"New password and confirmation password don't match." : "",
"New task detail" : "",
"New template" : "",
"Newfoundland Dog" : "",
"Next" : "ข้อความ",
"No" : "ไม่",
"No adjustment" : "",
"No data to show on the report." : "",
"No data." : "",
"No description" : "คำอธิบาย",
"No longer retained" : "",
"No matches found." : "",
"No picture" : "",
"No publishers are running." : "",
"No results found." : "",
"No results." : "",
"No tasks are running." : "",
"No view permission for this report" : "",
"Noise" : "",
"Non-Shelter" : "",
"Non-Shelter Animal" : "",
"Non-Shelter Animals" : "ลบสัตว์",
"Non-shelter Animals" : "ลบสัตว์",
"None" : "ไม่มี",
"Norfolk Terrier" : "",
"Normal user" : "",
"Norwegian Buhund" : "",
"Norwegian Elkhound" : "",
"Norwegian Forest Cat" : "",
"Norwegian Lundehund" : "",
"Norwich Terrier" : "",
"Not Arrived" : "",
"Not Available For Adoption" : "",
"Not Available for Adoption" : "",
"Not For Adoption" : "",
"Not Microchipped" : "",
"Not Reconciled" : "",
"Not available for adoption" : "",
"Not dispatched" : "",
"Not for adoption" : "",
"Not for adoption flag set" : "",
"Not in chosen publisher location" : "",
"Not reconciled" : "",
"Note" : "ไม่มี",
"Notes" : "ไม่มี",
"Notes about the death of the animal" : "",
"Nov" : "พ.ย.",
"Nova Scotia Duck-Tolling Retriever" : "",
"November" : "พฤศจิกายน",
"Now" : "",
"Number" : "",
"Number in litter" : "",
"Number of Tasks" : "",
"Number of animal links to show" : "",
"Number of fields" : "",
"Number of pets" : "",
"Ocicat" : "",
"Oct" : "ต.ค.",
"October" : "ตุลาคม",
"Office" : "",
"Old English Sheepdog" : "",
"Old Password" : "",
"Omit criteria" : "",
"Omit header/footer" : "",
"On Foster (in figures)" : "",
"On Shelter" : "",
"On shelter for {0} days, daily cost {1}, cost record total <b>{2}</b>" : "",
"On shelter for {0} days. Total cost: {1}" : "",
"Once assigned, codes cannot be changed" : "",
"Once signed, this document cannot be edited or tampered with." : "",
"One Off" : "",
"One-Off" : "",
"Online Form: {0}" : "",
"Online Forms" : "",
"Online form fields need a name and label." : "",
"Online forms can be linked to from your website and used to take information from visitors for applications, etc." : "",
"Only PDF, HTML and JPG image files can be attached." : "",
"Only active accounts" : "",
"Only allow users with one of these roles to view this incident" : "",
"Only show account totals for the current period, which starts on " : "",
"Only show declawed" : "",
"Only show pickups" : "",
"Only show special needs" : "",
"Only show transfers" : "",
"Open Incidents" : "",
"Open records in a new browser tab" : "",
"Open reports in a new browser tab" : "",
"Opening balances" : "",
"Optional, the date the vaccination \"wears off\" and needs to be administered again" : "",
"Options" : "",
"Or move this diary on to" : "",
"Order published animals by" : "",
"Organisation" : "",
"Organization" : "",
"Organization name" : "",
"Oriental Long Hair" : "",
"Oriental Short Hair" : "",
"Oriental Tabby" : "",
"Original Owner" : "",
"Ostrich" : "",
"Other Account" : "",
"Other Organisation" : "",
"Other Shelter" : "",
"Otterhound" : "",
"Our shelter does trial adoptions, allow us to mark these on movement screens" : "",
"Out" : "",
"Out Between" : "",
"Out SubTotal" : "",
"Output a deceased animals page" : "",
"Output a page with links to available online forms" : "",
"Output a separate page for each animal type" : "",
"Output a separate page for each species" : "",
"Output an adopted animals page" : "",
"Output an rss.xml page" : "",
"Overdue" : "",
"Overdue medical items" : "",
"Overtime" : "",
"Owl" : "",
"Owner" : "เจ้าของ",
"Owner Vet" : "",
"Owner given citation" : "",
"Owners Vet" : "",
"PM" : "",
"Page extension" : "",
"Paid" : "",
"Paint/Pinto" : "",
"Palomino" : "",
"Paper Size" : "",
"Papillon" : "",
"Parainfluenza" : "",
"Parakeet (Other)" : "",
"Parent" : "",
"Parrot (Other)" : "",
"Parrotlet" : "",
"Parvovirus" : "",
"Paso Fino" : "",
"Pass Homecheck" : "",
"Password" : "",
"Password for '{0}' has been reset." : "",
"Password is incorrect." : "",
"Password successfully changed." : "",
"Passwords cannot be blank." : "",
"Path" : "",
"Patterdale Terrier (Fell Terrier)" : "",
"PayPal" : "",
"Payment" : "",
"Payment Book" : "",
"Payment From" : "",
"Payment Methods" : "",
"Payment Type" : "",
"Payment Types" : "",
"Payment book" : "",
"Payment calendar" : "",
"Payment of {0} successfully received ({1})." : "",
"Payments" : "",
"Payments need at least one date, an amount and a person." : "",
"Payments of type" : "",
"Payments require a person" : "",
"Payments require a received date" : "",
"Peacock/Pea fowl" : "",
"Pekingese" : "",
"Pending Adoption" : "",
"Pending Apartment Verification" : "",
"Pending Home Visit" : "",
"Pending Vet Check" : "",
"Pension" : "",
"People" : "",
"People Looking For" : "",
"People matching '{0}'." : "",
"People or animal records that already exist in the database will not be imported again and movement/payment data will be attached to the existing records instead." : "",
"People with active reservations, but no homecheck has been done." : "",
"People with overdue donations." : "",
"Percheron" : "",
"Perform" : "",
"Perform Homecheck" : "",
"Perform Test" : "",
"Performed" : "",
"Permanent Foster" : "",
"Persian" : "",
"Person" : "",
"Person - Additional" : "",
"Person - Name and Address" : "",
"Person - Type" : "",
"Person Flags" : "",
"Person looking for report" : "",
"Person successfully created" : "",
"Personal" : "",
"Peruvian Inca Orchid" : "",
"Peruvian Paso" : "",
"Petit Basset Griffon Vendeen" : "",
"Pharaoh Hound" : "",
"Pheasant" : "",
"Phone" : "โทร",
"Phone contains" : "",
"Photo successfully uploaded." : "",
"Picked Up" : "",
"Picked Up By" : "",
"Pickup" : "",
"Pickup Address" : "",
"Pickup Location" : "",
"Pickup Locations" : "",
"Pig" : "หมู",
"Pig (Farm)" : "หมู (ฟาร์ม)",
"Pigeon" : "นกพิราบ",
"Pinterest" : "",
"Pionus" : "",
"Pit Bull Terrier" : "",
"Pixie-Bob" : "",
"Please click the Sign button when you are finished." : "",
"Please see the manual for more information." : "",
"Please select a PDF, HTML or JPG image file to attach" : "",
"Please tighten the scope of your email campaign to {0} emails or less." : "",
"Please use the links below to electronically sign these documents." : "",
"Plott Hound" : "",
"Poicephalus/Senegal" : "",
"Pointer" : "",
"Points for being found within 2 weeks of being lost" : "",
"Points for matching age group" : "",
"Points for matching breed" : "",
"Points for matching color" : "",
"Points for matching features" : "",
"Points for matching lost/found area" : "",
"Points for matching sex" : "",
"Points for matching species" : "",
"Points for matching zipcode" : "",
"Points required to appear on match report" : "",
"Polish" : "",
"Polish Lowland Sheepdog" : "",
"Pomeranian" : "",
"Pony" : "",
"Poodle" : "",
"Portugese Podengo" : "",
"Portuguese Water Dog" : "",
"Positive" : "",
"Positive for Heartworm, FIV or FLV" : "",
"Positive/Negative" : "",
"Post" : "รหัสไปรษณีย์",
"Postage costs" : "",
"Pot Bellied" : "",
"Prairie Dog" : "",
"Prefill new media notes for animal images with animal comments if left blank" : "",
"Prefill new media notes with the filename if left blank" : "",
"Premises" : "",
"Presa Canario" : "",
"Press F11 in HTML or SQL code editing boxes to edit in fullscreen mode" : "",
"Preview" : "",
"Previous" : "",
"Previous Adopter" : "",
"Print" : "พิมพ์",
"Print Preview" : "",
"Print selected forms" : "",
"Printable Manual" : "",
"Printing word processor documents uses hidden iframe and window.print" : "",
"Priority" : "",
"Priority Floor" : "",
"Produce a CSV File" : "",
"Produce a PDF of printable labels" : "",
"Profile" : "",
"Profile name cannot be blank" : "",
"Public Holiday" : "",
"Publish Animals to the Internet" : "",
"Publish HTML via FTP" : "",
"Publish now" : "",
"Publish to folder" : "",
"Published to Website" : "",
"Publisher" : "",
"Publisher Breed" : "",
"Publisher Color" : "",
"Publisher Logs" : "",
"Publisher Species" : "",
"Publishing" : "",
"Publishing History" : "",
"Publishing Logs" : "",
"Publishing Options" : "",
"Publishing complete." : "",
"Publishing template" : "",
"Pug" : "",
"Puli" : "",
"Pumi" : "",
"Puppies (under {0} months)" : "",
"Purchased" : "",
"Qty" : "",
"Quaker Parakeet" : "",
"Quantity" : "",
"Quarantine" : "",
"Quarterhorse" : "",
"Quarterly" : "รายไตรมาสต์",
"Quick Links" : "",
"Quicklinks" : "",
"Quicklinks are shown on the home page and allow quick access to areas of the system." : "",
"R" : "",
"Rabbit" : "กระต่าย",
"Rabies" : "",
"Rabies Tag" : "",
"RabiesTag" : "",
"Radio Buttons" : "",
"Ragamuffin" : "",
"Ragdoll" : "",
"Rank" : "อันดับ",
"Rat" : "",
"Rat Terrier" : "",
"Raw Markup" : "",
"Read the manual for more information about Animal Shelter Manager." : "",
"Real name" : "",
"Reason" : "เหตุผล",
"Reason For Appointment" : "",
"Reason Not From Owner" : "",
"Reason for Entry" : "",
"Reason for entry" : "",
"Reason not from Owner" : "",
"Reason the owner did not bring in the animal themselves" : "",
"Recalculate ALL animal ages/times" : "",
"Recalculate ALL animal locations" : "",
"Recalculate on-shelter animal locations" : "",
"Receipt No" : "",
"Receipt/Invoice" : "",
"Receive" : "",
"Receive a donation" : "",
"Receive a payment" : "",
"Received" : "",
"Received in last day" : "",
"Received in last month" : "",
"Received in last week" : "",
"Received in last year" : "",
"Received today" : "",
"Recently Adopted" : "",
"Recently Changed" : "",
"Recently Entered Shelter" : "",
"Recently Fostered" : "",
"Recently deceased" : "",
"Recently deceased shelter animals (last 30 days)." : "",
"Reception" : "",
"Reclaim" : "",
"Reclaim an animal" : "",
"Reclaim movements must have a valid reclaim date." : "",
"Reclaim successfully created." : "",
"Reclaimed" : "",
"Reconcile" : "",
"Reconciled" : "",
"Redbone Coonhound" : "",
"Rediarised" : "",
"Redirect to URL after POST" : "",
"Reference" : "",
"Refresh" : "",
"Regenerate 'Match lost and found animals' report" : "",
"Regenerate 'Person looking for' report" : "",
"Regenerate annual animal figures for" : "",
"Regenerate monthly animal figures for" : "",
"Regenerate person names in selected format" : "",
"Register Microchip" : "",
"Register microchips after" : "",
"Released To Wild" : "",
"Released To Wild {0}" : "",
"Reload" : "",
"Remaining" : "",
"Remember me on this computer" : "",
"Removal" : "เอาออก",
"Removal Reason" : "",
"Removal reason" : "",
"Remove" : "",
"Remove HTML and PDF document media after this many years" : "",
"Remove clinic functionality from screens and menus" : "",
"Remove fine-grained animal control incident permissions" : "",
"Remove holds after" : "",
"Remove move menu and the movements tab from animal and person screens" : "",
"Remove personally identifiable data" : "",
"Remove previously published files before uploading" : "",
"Remove retailer functionality from the movement screens and menus" : "",
"Remove short shelter code box from the animal details screen" : "",
"Remove the FIV/L test fields from animal health details" : "",
"Remove the Litter ID field from animal details" : "",
"Remove the Rabies Tag field from animal health details" : "",
"Remove the adoption coordinator field from animal entry details" : "",
"Remove the adoption fee field from animal details" : "",
"Remove the animal control functionality from menus and screens" : "",
"Remove the bonded with fields from animal entry details" : "",
"Remove the city/state fields from person details" : "",
"Remove the coat type field from animal details" : "",
"Remove the declawed box from animal health details" : "",
"Remove the document repository functionality from menus" : "",
"Remove the good with fields from animal notes" : "",
"Remove the heartworm test fields from animal health details" : "",
"Remove the insurance number field from the movement screens" : "",
"Remove the location unit field from animal details" : "",
"Remove the microchip fields from animal identification details" : "",
"Remove the neutered fields from animal health details" : "",
"Remove the online form functionality from menus" : "",
"Remove the picked up fields from animal entry details" : "",
"Remove the rota functionality from menus and screens" : "",
"Remove the size field from animal details" : "",
"Remove the stock control functionality from menus and screens" : "",
"Remove the tattoo fields from animal identification details" : "",
"Remove the transport functionality from menus and screens" : "",
"Remove the trap loan functionality from menus and screens" : "",
"Remove the weight field from animal details" : "",
"Removed" : "",
"Rename" : "",
"Renew License" : "",
"Renew licence" : "",
"Renew license" : "",
"Report" : "รายงาน",
"Report Title" : "",
"Report a new incident" : "",
"Reports" : "รายงาน",
"Request signature by email" : "",
"Requested" : "",
"Require followup" : "",
"Required" : "",
"Required date must be a valid date" : "",
"Reschedule" : "",
"Reservation" : "",
"Reservation Book" : "",
"Reservation Cancelled" : "",
"Reservation Date" : "",
"Reservation For" : "",
"Reservation Status" : "",
"Reservation Statuses" : "",
"Reservation book" : "",
"Reservation date cannot be after cancellation date." : "",
"Reservation successfully created." : "",
"Reservations must have a valid reservation date." : "",
"Reserve" : "",
"Reserve an animal" : "",
"Reserved" : "",
"Reset" : "",
"Reset Password" : "รีเซ็ตรหัสผ่าน",
"Respond" : "",
"Responded" : "",
"Responded Between" : "",
"Responded Date/Time" : "",
"Result" : "",
"Results" : "",
"Results for '{0}'." : "",
"Retailer" : "",
"Retailer Animals" : "ลบสัตว์",
"Retailer Book" : "",
"Retailer book" : "",
"Retailer movement successfully created." : "",
"Retailer movements must have a valid movement date." : "",
"Retriever" : "",
"Return" : "",
"Return Category" : "",
"Return Date" : "",
"Return a transferred animal" : "",
"Return an animal from adoption" : "",
"Return an animal from another movement" : "",
"Return an animal from transfer" : "",
"Return date cannot be before the movement date." : "",
"Return this movement and bring the animal back to the shelter" : "",
"Returned" : "",
"Returned By" : "สร้างโดย",
"Returned To Owner" : "",
"Returned from" : "",
"Returned to" : "",
"Returned to Owner {0}" : "",
"Returning" : "",
"Returns {0}" : "",
"Reupload animal images every time" : "",
"Rex" : "",
"Rhea" : "",
"Rhinelander" : "",
"Rhodesian Ridgeback" : "",
"Ringneck/Psittacula" : "",
"Role is in use and cannot be deleted." : "",
"Roles" : "",
"Roles need a name." : "",
"Rosella" : "",
"Rostered day off" : "",
"Rota" : "",
"Rota Types" : "",
"Rota cloned successfully." : "",
"Rotate image 90 degrees anticlockwis" : "",
"Rotate image 90 degrees clockwise" : "",
"Rottweiler" : "",
"Rough" : "",
"Rows" : "",
"Ruddy" : "",
"Russian Blue" : "",
"S (Stray Cat)" : "",
"S = first letter of animal species" : "",
"SM Account" : "",
"SMS" : "",
"SQL" : "SQL",
"SQL Interface" : "",
"SQL dump" : "",
"SQL dump (ASM2 HSQLDB Format)" : "",
"SQL editor: Press F11 to go full screen and press CTRL+SPACE to autocomplete table and column names" : "",
"SQL interface" : "",
"SQL is syntactically correct." : "",
"SS = first and second letter of animal species" : "",
"Sa" : "",
"Saddlebred" : "",
"Saint Bernard St. Bernard" : "",
"Sales Tax" : "",
"Saluki" : "",
"Samoyed" : "",
"Sat" : "",
"Satin" : "",
"Saturday" : "",
"Save" : "",
"Save and leave" : "",
"Save this incident" : "",
"Save this person" : "",
"Save this record" : "",
"Save this waiting list entry" : "",
"Saving..." : "",
"Scale published animal images to" : "",
"Scheduled" : "",
"Schipperke" : "",
"Schnauzer" : "",
"Scottish Deerhound" : "",
"Scottish Fold" : "",
"Scottish Terrier Scottie" : "",
"Script" : "",
"Seal" : "",
"Sealyham Terrier" : "",
"Search" : "ค้นหา",
"Search Results for '{0}'" : "",
"Search returned {0} results." : "",
"Search sort order" : "",
"Searchable" : "",
"Second offence" : "",
"Select" : "เลือก",
"Select a person" : "",
"Select a person to attach this form to." : "",
"Select a person to merge into this record. The selected person will be removed, and their movements, diary notes, log entries, etc. will be reattached to this record." : "",
"Select all" : "",
"Select an animal" : "เลือกสัตว์",
"Select an animal to attach this form to." : "",
"Select an animal to merge into this record. The selected animal will be removed, and their movements, diary notes, log entries, etc. will be reattached to this record." : "",
"Select animal to merge" : "เลือกสัตว์",
"Select animals" : "เลือกสัตว์",
"Select date for diary task" : "",
"Select person to merge" : "",
"Select recommended" : "",
"Selected On-Shelter Animals" : "ลบสัตว์ที่สูญหาย",
"Selkirk Rex" : "",
"Send" : "ส่ง",
"Send Emails" : "",
"Send a weekly email to fosterers with medical information about their animals" : "ข้อมูลเพิ่มเติมเกี่ยวกับสัตว์",
"Send confirmation email to form submitter" : "",
"Send emails" : "",
"Send mass emails and perform mail merges" : "",
"Send via email" : "",
"Sending {0} emails is considered abusive and will damage the reputation of the email server." : "",
"Sending..." : "กำลังจัดเรียง...",
"Senior" : "",
"Sent to mobile signing pad." : "",
"Sep" : "ก.ย.",
"Separate waiting list rank by species" : "",
"September" : "กันยายน",
"Server clock adjustment" : "",
"Set publishing options" : "",
"Set this to 0 to never automatically remove." : "",
"Set to 0 to never update urgencies." : "",
"Set wether or not this user account can log in to the user interface." : "",
"Setter" : "",
"Setting a location filter will prevent this user seeing animals who are not in these locations on shelterview, find animal and search." : "",
"Settings" : "การตั้งค่า",
"Settings, Lookup data" : "",
"Settings, Options" : "",
"Settings, Reports" : "",
"Settings, System user accounts" : "",
"Sex" : "เพศ",
"Sex and Species" : "เลือกสปีชี่ส์",
"Sexes" : "",
"Shar Pei" : "",
"Share" : "",
"Shared weblink" : "",
"Shares" : "",
"Sheep" : "แกะ",
"Sheep Dog" : "",
"Shelter" : "",
"Shelter Animal" : "",
"Shelter Animals" : "",
"Shelter Details" : "",
"Shelter animal {0} '{1}'" : "",
"Shelter animals" : "",
"Shelter code cannot be blank" : "",
"Shelter code {0} has already been allocated to another animal." : "",
"Shelter stats (all time)" : "",
"Shelter stats (this month)" : "",
"Shelter stats (this week)" : "",
"Shelter stats (this year)" : "",
"Shelter stats (today)" : "",
"Shelter view" : "",
"Shepherd" : "",
"Shetland Sheepdog Sheltie" : "",
"Shiba Inu" : "",
"Shift" : "",
"Shih Tzu" : "",
"Short" : "สั้น",
"Show GDPR Contact Opt-In field on person screens" : "",
"Show PDF files inline instead of sending them as attachments" : "",
"Show a cost field on medical/test/vaccination screens" : "",
"Show a minimap of the address on person screens" : "",
"Show a separate paid date field with costs" : "",
"Show alerts on the home page" : "",
"Show animal thumbnails in movement and medical books" : "",
"Show animals adopted" : "",
"Show codes on the shelter view screen" : "",
"Show complete comments in table views" : "",
"Show empty locations" : "",
"Show on new record screens" : "",
"Show quick links on all pages" : "",
"Show quick links on the home page" : "",
"Show report menu items in collapsed categories" : "",
"Show short shelter codes on screens" : "",
"Show the adoption fee field" : "",
"Show the altered fields" : "",
"Show the breed fields" : "",
"Show the brought in by field" : "",
"Show the color field" : "",
"Show the date brought in field" : "",
"Show the entry category field" : "",
"Show the full diary (instead of just my notes) on the home page" : "",
"Show the hold fields" : "",
"Show the internal location field" : "",
"Show the litter ID field" : "",
"Show the location unit field" : "",
"Show the microchip fields" : "",
"Show the original owner field" : "",
"Show the size field" : "",
"Show the tattoo fields" : "",
"Show the time brought in field" : "",
"Show the transfer in field" : "",
"Show the weight field" : "",
"Show timeline on the home page" : "",
"Show tips on the home page" : "",
"Show transactions from" : "",
"Show weight as lb rather than kg" : "",
"Showing {0} timeline events." : "",
"Siamese" : "",
"Siberian" : "",
"Siberian Husky" : "",
"Sick leave" : "",
"Sick/Injured" : "ป่วย/บาดเจ็บ",
"Sick/injured animal" : "",
"Sign" : "",
"Sign document" : "",
"Sign on screen" : "",
"Signature" : "",
"Signed" : "",
"Signing" : "",
"Signing Pad" : "",
"Signup" : "",
"Silky Terrier" : "",
"Silver" : "",
"Silver Fox" : "",
"Silver Marten" : "",
"Similar Animal" : "",
"Similar Person" : "",
"Simple" : "อย่างง่าย",
"Singapura" : "",
"Single Treatment" : "",
"Site" : "ขนาด",
"Sites" : "ขนาด",
"Size" : "ขนาด",
"Sizes" : "ขนาด",
"Skunk" : "",
"Skye Terrier" : "",
"Sloughi" : "",
"Small" : "เล็ก",
"SmartTag PETID" : "",
"Smooth Fox Terrier" : "",
"Snake" : "งู",
"Snowshoe" : "",
"Social" : "",
"Softbill (Other)" : "",
"Sold" : "",
"Somali" : "",
"Some batch processes may take a few minutes to run and could prevent other users being able to use the system for a short time." : "",
"Some browsers allow shortcut keys, press SHIFT+ALT+A in Chrome or Firefox to jump to the animal adoption screen." : "",
"Some info text" : "",
"Sorrel" : "",
"Sorrel Tortoiseshell" : "",
"Sorry, this document has already been signed" : "",
"South Russian Ovcharka" : "",
"Spaniel" : "",
"Special Needs" : "",
"Species" : "สปีชี่ส์",
"Species A-Z" : "",
"Species Z-A" : "",
"Species to use when publishing to third party services and adoption sites" : "",
"Specifying a reschedule date will make copies of the selected vaccinations and mark them to be given on the reschedule date. Example: If this vaccination needs to be given every year, set the reschedule date to be 1 year from today." : "",
"Sphynx (hairless cat)" : "",
"Spitz" : "",
"Split baby/adult age at" : "",
"Split species pages with a baby/adult prefix" : "",
"Sponsorship donations" : "",
"Staff" : "",
"Staff Rota" : "",
"Staff record" : "",
"Staff rota" : "",
"Staffordshire Bull Terrier" : "",
"Standard" : "",
"Standardbred" : "",
"Start Date" : "",
"Start Of Day" : "",
"Start Time" : "",
"Start at" : "",
"Start date" : "",
"Start date must be a valid date" : "",
"Start of year" : "",
"Started" : "",
"Starts" : "สถานะ",
"State" : "สถานะ",
"State contains" : "",
"Stationary costs" : "",
"Stats" : "สถานะ",
"Stats period" : "",
"Stats show running figures for the selected period of animals entering and leaving the shelter on the home page." : "",
"Status" : "สถานะ",
"Status and Species" : "เลือกสปีชี่ส์",
"Stay" : "",
"Stock" : "",
"Stock Control" : "",
"Stock Levels" : "",
"Stock Locations" : "",
"Stock Take" : "",
"Stock Usage Type" : "",
"Stock level must have a name" : "",
"Stock level must have a unit" : "",
"Stock needs a name and unit." : "",
"Stocktake" : "",
"Stolen" : "",
"Stolen {0}" : "",
"Stop" : "",
"Stop Publishing" : "",
"Stores" : "",
"Stray" : "",
"Su" : "",
"SubTotal" : "",
"Subject" : "",
"Submission received: {0}" : "",
"Success" : "",
"Successfully attached to {0}" : "",
"Sugar Glider" : "",
"Sun" : "",
"Sunday" : "",
"Super user" : "",
"Superuser" : "",
"Surname" : "นามสกุล",
"Surrender" : "",
"Surrender Pickup" : "",
"Suspect" : "",
"Suspect 1" : "",
"Suspect 2" : "",
"Suspect 3" : "",
"Suspect/Animal" : "",
"Swan" : "",
"Swedish Vallhund" : "",
"Syntax check this SQL" : "",
"System" : "",
"System Admin" : "",
"System Options" : "",
"System user accounts" : "",
"T = first letter of animal type" : "",
"TNR" : "",
"TNR - Trap/Neuter/Release" : "",
"TT = first and second letter of animal type" : "",
"Tabby" : "",
"Tabby and White" : "",
"Take another payment" : "",
"Taken By" : "",
"Tan" : "",
"Tan and Black" : "",
"Tan and White" : "",
"Task complete." : "เสร็จแล้ว",
"Task items are executed in order of index, lowest to highest" : "",
"Tattoo" : "",
"Tattoo Date" : "",
"Tattoo Number" : "",
"Tax" : "",
"Tax Amount" : "",
"Tax Rate %" : "",
"Telephone" : "โทรศัพท์",
"Telephone Bills" : "",
"Template" : "",
"Template Name" : "",
"Template names can include a path portion with /, eg: Vets/Rabies Certificate" : "",
"Tennessee Walker" : "",
"Terrapin" : "",
"Terrier" : "",
"Test" : "ข้อความ",
"Test Animal" : "สัตว์ที่สูญหาย",
"Test Book" : "",
"Test Performed" : "",
"Test Results" : "",
"Test Types" : "",
"Test book" : "",
"Test marked as performed for {0} - {1}" : "",
"Tests" : "ข้อความ",
"Tests need an animal and at least a required date." : "",
"Text" : "ข้อความ",
"Text Encoding" : "",
"Th" : "",
"Thai Ridgeback" : "",
"Thank you for choosing Animal Shelter Manager for your shelter!" : "",
"Thank you, the document is now signed." : "",
"That animal is already linked to the incident" : "",
"The CSV file should be created by PayPal's \"All Activity\" report." : "",
"The SmartTag PETID number" : "",
"The SmartTag type" : "",
"The URL is the address of a web resource, eg: www.youtube.com/watch?v=xxxxxx" : "",
"The animal name" : "",
"The animal record to merge must be different from the original." : "",
"The animal sex" : "",
"The base color of this animal" : "",
"The coat type of this animal" : "",
"The confirmation email message to send to the form submitter. Leave blank to send a copy of the completed form." : "",
"The database will be inaccessible to all users while the export is in progress." : "",
"The date reported to the shelter" : "",
"The date the animal died" : "",
"The date the animal was FIV/L tested" : "",
"The date the animal was adopted" : "",
"The date the animal was altered" : "",
"The date the animal was born" : "",
"The date the animal was brought into the shelter" : "",
"The date the animal was heartworm tested" : "",
"The date the animal was microchipped" : "",
"The date the animal was reclaimed" : "",
"The date the animal was tattooed" : "",
"The date the foster animal will be returned if known" : "",
"The date the foster is effective from" : "",
"The date the litter entered the shelter" : "",
"The date the owner last contacted the shelter" : "",
"The date the payment was received" : "",
"The date the reservation is effective from" : "",
"The date the retailer movement is effective from" : "",
"The date the transfer is effective from" : "",
"The date the trial adoption is over" : "",
"The date the vaccination is required/due to be administered" : "",
"The date the vaccination was administered" : "",
"The date this animal was found" : "",
"The date this animal was lost" : "",
"The date this animal was put on the waiting list" : "",
"The date this animal was removed from the waiting list" : "",
"The date this animal was reserved" : "",
"The date this animal was returned to its owner" : "",
"The date this person was homechecked." : "",
"The default username is 'user' with the password 'letmein'" : "",
"The entry reason for this animal" : "",
"The litter this animal belongs to" : "",
"The locale determines the language ASM will use when displaying text, dates and currencies." : "",
"The location where the animal was picked up" : "",
"The microchip number" : "",
"The movement number '{0}' is not unique." : "",
"The number of stock records to create" : "",
"The period in days before waiting list urgency is increased" : "",
"The person record to merge must be different from the original." : "",
"The primary breed of this animal" : "",
"The reason the owner wants to part with the animal" : "",
"The reason this animal was removed from the waiting list" : "",
"The remaining units in the container" : "",
"The result of the FIV test" : "",
"The result of the FLV test" : "",
"The result of the heartworm test" : "",
"The retail/resale price per unit" : "",
"The secondary breed of this animal" : "",
"The selected file is not an image." : "",
"The shelter category for this animal" : "",
"The shelter reference number" : "",
"The sheltermanager.com admin account password cannot be changed here, please visit {0}" : "",
"The size of this animal" : "",
"The species of this animal" : "",
"The tattoo number" : "",
"The type of unit in the container, eg: tablet, vial, etc." : "",
"The veterinary license number." : "",
"The wholesale/trade price the container was bought for" : "",
"There is not enough information in the form to attach to a shelter animal record (need an animal name)." : "",
"There is not enough information in the form to create a found animal record (need a description and area found)." : "",
"There is not enough information in the form to create a lost animal record (need a description and area lost)." : "",
"There is not enough information in the form to create a person record (need a surname)." : "",
"There is not enough information in the form to create a transport record (need animalname)." : "",
"There is not enough information in the form to create a transport record (need pickupdate and dropoffdate)." : "",
"There is not enough information in the form to create a waiting list record (need a description)." : "",
"There is not enough information in the form to create an incident record (need call notes and dispatch address)." : "",
"These are the HTML headers and footers used when displaying online forms." : "",
"These are the HTML headers and footers used when generating reports." : "",
"These are the default values for these fields when creating new records." : "",
"These batch processes are run each night by the system and should not need to be run manually." : "",
"These fields allow you to deduct stock for the test(s) given. This single deduction should cover the selected tests being performed." : "",
"These fields allow you to deduct stock for the treatment(s) given. This single deduction should cover the selected treatments being administered." : "",
"These fields allow you to deduct stock for the vaccination(s) given. This single deduction should cover the selected vaccinations being administered." : "",
"These fields determine which columns are shown on the find animal and find person screens." : "",
"These numbers are for shelters who have agreements with insurance companies and are given blocks of policy numbers to allocate." : "",
"These options change the behaviour of the search box at the top of the page." : "",
"These values are required for correct operation of the system. ONLY change them if you are translating to another language." : "",
"Third offence" : "",
"This Month" : "",
"This Week" : "",
"This Year" : "",
"This animal already has an active reservation." : "",
"This animal has a SmartTag PETID" : "",
"This animal has a tattoo" : "",
"This animal has active reservations, they will be cancelled." : "",
"This animal has an adoption fee of {0}" : "",
"This animal has been FIV/L tested" : "",
"This animal has been altered" : "",
"This animal has been declawed" : "",
"This animal has been heartworm tested" : "",
"This animal has movements and cannot be removed." : "",
"This animal has not been altered." : "",
"This animal has not been microchipped." : "",
"This animal has special needs" : "",
"This animal has the same name as another animal recently added to the system." : "",
"This animal is a crossbreed" : "",
"This animal is bonded with {0}" : "",
"This animal is bonded with {0}. Adoption movement records will be created for all bonded animals." : "",
"This animal is currently at a retailer, it will be automatically returned first." : "",
"This animal is currently fostered, it will be automatically returned first." : "",
"This animal is currently held and cannot be adopted." : "",
"This animal is currently quarantined and should not leave the shelter." : "",
"This animal is marked not for adoption." : "",
"This animal is microchipped" : "",
"This animal is not on the shelter." : "",
"This animal is part of a cruelty case and should not leave the shelter." : "",
"This animal should be held in case it is reclaimed" : "",
"This animal should not be shown in figures and is not in the custody of the shelter" : "",
"This animal was dead on arrival to the shelter" : "",
"This animal was euthanized" : "",
"This animal was picked up" : "",
"This animal was transferred from another shelter" : "",
"This code has already been used." : "",
"This database is locked and in read-only mode. You cannot add, change or delete records." : "",
"This database is locked." : "",
"This date of birth is an estimate" : "",
"This expense account is the source for costs of this type" : "",
"This income account is the source for payments received of this type" : "",
"This item is referred to in the database ({0}) and cannot be deleted until it is no longer in use." : "",
"This many years after creation of a person record, the name, address and telephone data will be anonymized." : "",
"This month" : "",
"This movement cannot be from a retailer when the animal has no prior retailer movements." : "",
"This person has an animal control incident against them" : "",
"This person has an animal control incident against them." : "",
"This person has been banned from adopting animals" : "",
"This person has been banned from adopting animals." : "",
"This person has been under investigation" : "",
"This person has been under investigation." : "",
"This person has movements and cannot be removed." : "",
"This person has not passed a homecheck" : "",
"This person has not passed a homecheck." : "",
"This person has payments and cannot be removed." : "",
"This person has previously surrendered an animal." : "",
"This person is linked to a waiting list record and cannot be removed." : "",
"This person is linked to an animal and cannot be removed." : "",
"This person is linked to an investigation and cannot be removed." : "",
"This person is linked to animal control and cannot be removed." : "",
"This person is linked to animal licenses and cannot be removed." : "",
"This person is linked to animal transportation and cannot be removed." : "",
"This person is linked to citations and cannot be removed." : "",
"This person is linked to found animals and cannot be removed." : "",
"This person is linked to lost animals and cannot be removed." : "",
"This person is linked to trap loans and cannot be removed." : "",
"This person is not flagged as a fosterer and cannot foster animals." : "",
"This person is not flagged as a retailer and cannot handle retailer movements." : "",
"This person is very similar to another person on file, carry on creating this record?" : "",
"This person lives in the same area as the person who brought the animal to the shelter." : "",
"This record has been changed by another user, please reload." : "",
"This report cannot be sent by email as it requires criteria to run." : "",
"This screen allows you to add extra documents to your database, for staff training, reference materials, etc." : "",
"This screen allows you to add extra images to your database, for use in reports and documents." : "",
"This type of movement requires a date." : "",
"This type of movement requires a person." : "",
"This week" : "",
"This will permanently remove the selected records, are you sure?" : "",
"This will permanently remove the selected roles, are you sure?" : "",
"This will permanently remove the selected user accounts. Are you sure?" : "",
"This will permanently remove this account and ALL TRANSACTIONS HELD AGAINST IT. This action is irreversible, are you sure you want to do this?" : "",
"This will permanently remove this additional field and ALL DATA CURRENTLY HELD AGAINST IT. This action is irreversible, are you sure you want to do this?" : "",
"This will permanently remove this animal, are you sure?" : "",
"This will permanently remove this incident, are you sure?" : "",
"This will permanently remove this person, are you sure?" : "",
"This will permanently remove this record, are you sure?" : "",
"This will permanently remove this waiting list entry, are you sure?" : "",
"This will remove ALL rota entries for the week beginning {0}. This action is irreversible, are you sure?" : "",
"This year" : "",
"Thoroughbred" : "",
"Thu" : "",
"Thumbnail size" : "",
"Thursday" : "",
"Tibetan Mastiff" : "",
"Tibetan Spaniel" : "",
"Tibetan Terrier" : "",
"Tiger" : "เสือ",
"Time" : "",
"Time Brought In" : "",
"Time On List" : "",
"Time On Shelter" : "",
"Time on list" : "",
"Time on shelter" : "",
"Timeline" : "",
"Timeline ({0})" : "",
"Times should be in HH:MM format, eg: 09:00, 16:30" : "",
"Title" : "",
"Title First Last" : "",
"Title Initials Last" : "",
"To" : "",
"To Adoption" : "",
"To Fostering" : "",
"To Other" : "",
"To Retailer" : "",
"To add people to the rota, create new person records with the staff or volunteer flag." : "",
"To continue using ASM, please renew {0}" : "",
"To week beginning" : "",
"Today" : "",
"Tonkinese" : "",
"Too Many Animals" : "สัตว์ที่พบเจอ",
"Tooltip" : "",
"Top Margin" : "",
"Tortie" : "",
"Tortie and White" : "",
"Tortoise" : "",
"Tosa Inu" : "",
"Total" : "",
"Total number of units in the container" : "",
"Total payments" : "",
"Toucan" : "",
"Toy Fox Terrier" : "",
"Training" : "",
"Transactions" : "",
"Transactions need a date and description." : "",
"Transfer" : "",
"Transfer In" : "โอนย้ายเข้า",
"Transfer To" : "โอนย้ายเข้า",
"Transfer an animal" : "",
"Transfer from Municipal Shelter" : "",
"Transfer from Other Shelter" : "",
"Transfer successfully created." : "",
"Transfer?" : "",
"Transferred" : "โอนย้ายเข้า",
"Transferred From" : "โอนย้ายเข้า",
"Transferred In" : "โอนย้ายเข้า",
"Transferred In {0}" : "โอนย้ายเข้า",
"Transferred Out" : "โอนย้ายออก",
"Transferred Out {0}" : "โอนย้ายออก",
"Transfers must have a valid transfer date." : "",
"Transport" : "",
"Transport Book" : "",
"Transport Types" : "ประเภทกราฟ",
"Transport book" : "",
"Transport requires an animal" : "",
"Transports must have valid pickup and dropoff dates and times." : "",
"Trap Loans" : "",
"Trap Number" : "",
"Trap Types" : "",
"Trap loan" : "",
"Trap loans" : "",
"Treat animals at retailers as part of the shelter inventory" : "",
"Treat foster animals as part of the shelter inventory" : "",
"Treat trial adoptions as part of the shelter inventory" : "",
"Treatment" : "",
"Treatment Given" : "",
"Treatment marked as given for {0} - {1}" : "",
"Treatment name cannot be blank" : "",
"Treatments" : "",
"Treeing Walker Coonhound" : "",
"Trial Adoption" : "",
"Trial adoption" : "",
"Trial adoption book" : "",
"Trial ends on" : "",
"Tricolour" : "",
"Trigger Batch Processes" : "",
"Tu" : "",
"Tue" : "",
"Tuesday" : "",
"Tumblr" : "",
"Turkey" : "",
"Turkish Angora" : "",
"Turkish Van" : "",
"Turtle" : "เต่า",
"Twitter" : "",
"Type" : "ประเภท",
"Type of animal links to show" : "",
"U (Unwanted Cat)" : "",
"UK Giftaid" : "",
"URL" : "",
"UUUUUUUUUU or UUUU = unique number" : "",
"Unable to Afford" : "",
"Unable to Cope" : "",
"Unaltered" : "",
"Unaltered Adopted Animals" : "",
"Unaltered Dog - 1 year" : "",
"Unaltered Dog - 3 year" : "",
"Unavailable" : "",
"Under {0} weeks old" : "",
"Unit" : "",
"Unit Price" : "",
"Unit within the location, eg: pen or cage number" : "",
"Units" : "",
"Unknown" : "ไม่ทราบ",
"Unknown microchip brand" : "",
"Unpaid Fines" : "",
"Unreserved" : "",
"Unsaved Changes" : "",
"Unspecified" : "",
"Unsuitable Accomodation" : "",
"Up for adoption" : "",
"Upcoming medical items" : "",
"Update" : "",
"Update publishing options" : "",
"Update system options" : "",
"Update the daily boarding cost for this animal" : "",
"Updated database to version {0}" : "",
"Updated." : "",
"Updating..." : "",
"Upload" : "",
"Upload Document" : "",
"Upload ODT" : "",
"Upload Photo" : "",
"Upload a new OpenOffice template" : "",
"Upload all available images for animals" : "",
"Upload an SQL script" : "",
"Upload splash.jpg and logo.jpg to override the login screen image and logo at the top left of ASM." : "",
"Uploading..." : "",
"Urgencies" : "",
"Urgency" : "",
"Urgent" : "",
"Usage Date" : "",
"Usage Type" : "",
"Usage explains why this stock record was created or adjusted. Usage records will only be created if the balance changes." : "",
"Use Automatic Insurance Numbers" : "",
"Use HTML5 client side image scaling where available to speed up image uploads" : "",
"Use SQL Interface" : "",
"Use a single breed field" : "",
"Use animal comments" : "",
"Use fancy tooltips" : "",
"Use notes from preferred photo" : "",
"Use the icon in the lower right of notes fields to view them in a separate window." : "",
"User Accounts" : "",
"User Roles" : "",
"User accounts that will only ever call the Service API should set this to No." : "",
"User roles" : "",
"Username" : "",
"Username '{0}' already exists" : "",
"Users" : "",
"Users need a username, password and at least one role or the superuser flag setting." : "",
"Vacation" : "ที่ตั้ง",
"Vaccinate" : "",
"Vaccinate Animal" : "",
"Vaccination" : "",
"Vaccination Book" : "",
"Vaccination Given" : "ประเภทวัคซีน",
"Vaccination Types" : "ประเภทวัคซีน",
"Vaccination book" : "",
"Vaccination marked as given for {0} - {1}" : "",
"Vaccinations" : "",
"Vaccinations need an animal and at least a required date." : "",
"Vaccinations require an animal" : "",
"Vaccinations: {0}, Tests: {1}, Medical Treatments: {2}, Transport: {3}, Costs: {4}, Total Costs: {5} Total Payments: {6}, Balance: {7}" : "",
"Valid tokens for the subject and text" : "",
"Value" : "",
"Various" : "",
"Vertical Pitch" : "",
"Very Large" : "ใหญ่มาก",
"Vet" : "",
"Vet Visit" : "",
"Victim" : "",
"Victim Name" : "",
"Video Link" : "",
"Vietnamese Pot Bellied" : "",
"View" : "",
"View Accounts" : "",
"View Animals" : "ดูสัตว์ต่างๆ",
"View Audit Trail" : "",
"View Citations" : "",
"View Clinic Appointment" : "",
"View Cost" : "",
"View Diary" : "",
"View Diets" : "",
"View Document" : "",
"View Document Repository" : "",
"View Found Animal" : "สัตว์ที่พบเจอ",
"View Incidents" : "",
"View Incoming Forms" : "",
"View Investigations" : "",
"View Licenses" : "",
"View Litter" : "",
"View Log" : "ปูมบันทึกใหม่",
"View Lost Animal" : "สัตว์ที่พบเจอ",
"View Manual" : "",
"View Media" : "",
"View Medical Records" : "",
"View Movement" : "",
"View PDF" : "",
"View Payments" : "",
"View Person" : "",
"View Person Links" : "",
"View Report" : "",
"View Roles" : "",
"View Rota" : "",
"View Shelter Animals" : "",
"View Staff Person Records" : "",
"View Stock" : "",
"View Tests" : "",
"View Training Videos" : "",
"View Transport" : "",
"View Trap Loans" : "",
"View Vaccinations" : "",
"View Volunteer Person Records" : "",
"View Vouchers" : "",
"View Waiting List" : "ดูรายชื่อที่กำลังรอ",
"View animals matching publishing options" : "",
"View littermates" : "",
"View matching records" : "",
"View media" : "",
"View publishing logs" : "",
"Visual Theme" : "",
"Vizsla" : "",
"Volunteer" : "",
"Voucher Types" : "",
"Vouchers" : "",
"Vouchers need an issue and expiry date." : "",
"WARNING: This animal has not been microchipped" : "",
"WARNING: This animal is over 6 months old and has not been neutered/spayed" : "",
"Waiting" : "รายชื่อที่กำลังคอย",
"Waiting List" : "รายชื่อที่กำลังคอย",
"Waiting List - Additional" : "",
"Waiting List - Details" : "",
"Waiting List - Removal" : "",
"Waiting List Contact" : "",
"Waiting List Donation" : "",
"Waiting List {0}" : "รายชื่อที่กำลังรอ: {0}",
"Waiting List: {0}" : "รายชื่อที่กำลังรอ: {0}",
"Waiting Room" : "รายชื่อที่กำลังคอย",
"Waiting for documents..." : "",
"Waiting list donations" : "",
"Waiting list entries matching '{0}'." : "",
"Waiting list entries must have a contact" : "",
"Waiting list entry for {0} ({1})" : "",
"Waiting list entry successfully added." : "",
"Waiting list urgency update period in days" : "",
"Warmblood" : "",
"Warn if the name of the new animal is similar to one entered recently" : "",
"Warn when adopting an animal who has not been microchipped" : "",
"Warn when adopting an unaltered animal" : "",
"Warn when adopting to a person who has been banned from adopting animals" : "",
"Warn when adopting to a person who has not been homechecked" : "",
"Warn when adopting to a person who has previously brought an animal to the shelter" : "",
"Warn when adopting to a person who lives in the same area as the original owner" : "",
"Warn when creating multiple reservations on the same animal" : "",
"Warnings" : "คำเตือน",
"Wasted" : "",
"Water Bills" : "",
"We" : "",
"Wed" : "",
"Wednesday" : "",
"Week" : "",
"Week beginning {0}" : "",
"Weekly" : "รายสัปดาห์",
"Weight" : "น้ำหนัก",
"Weimaraner" : "",
"Welcome!" : "",
"Welsh Corgi" : "",
"Welsh Springer Spaniel" : "",
"Welsh Terrier" : "",
"West Highland White Terrier Westie" : "",
"Wheaten Terrier" : "",
"When" : "",
"When ASM should stop showing this message" : "",
"When I change the location of an animal, make a note of it in the log with this type" : "",
"When I change the weight of an animal, make a note of it in the log with this type" : "",
"When I generate a document, make a note of it in the log with this type" : "",
"When I mark an animal held, make a note of it in the log with this type" : "",
"When I set a new GDPR Opt-In contact option, make a note of it in the log with this type" : "",
"When a message is created, email it to each matching user" : "",
"When creating payments from the Move menu screens, mark them due instead of received" : "",
"When displaying calendars, the first day of the week is" : "",
"When displaying person names, use the format" : "",
"When entering dates, hold down CTRL and use the cursor keys to move around the calendar. Press t to go to today." : "",
"When entering vaccinations, default the last batch number and manufacturer for that type" : "",
"When matching lost animals, include shelter animals" : "",
"When publishing to third party services, add this extra text to the bottom of all animal descriptions" : "",
"When receiving multiple payments, allow the due and received dates to be set" : "",
"When receiving payments, allow a quantity and unit price to be set" : "",
"When receiving payments, allow recording of sales tax with a default rate of" : "",
"When receiving payments, allow the deposit account to be overridden" : "",
"When you use Move > Adopt an animal, ASM will automatically return any open foster or retailer movement before creating the adoption." : "",
"When you use Move > Foster an animal, ASM will automatically return any open foster movement before moving the animal to its new home." : "",
"Where this animal is located within the shelter" : "",
"Whippet" : "",
"White" : "ขาว",
"White German Shepherd" : "",
"White and Black" : "ขาวและดำ",
"White and Brindle" : "",
"White and Brown" : "ขาวและน้ำตาล",
"White and Grey" : "ขาวและเทา",
"White and Liver" : "",
"White and Tabby" : "",
"White and Tan" : "",
"White and Torti" : "",
"Will this owner give a donation?" : "",
"Wire-haired Pointing Griffon" : "",
"Wirehaired Terrier" : "",
"With Vet" : "",
"With overnight batch" : "",
"Withdrawal" : "",
"Wk" : "",
"Work" : "",
"Work Phone" : "",
"Work Types" : "",
"XXX or XX = number unique for this year" : "",
"Xoloitzcuintle/Mexican Hairless" : "",
"YY or YYYY = current year" : "",
"Yellow Labrador Retriever" : "",
"Yellow and Grey" : "เหลืองและเทา",
"Yes" : "ใช่",
"Yes/No" : "ใช่/ไม่",
"Yes/No/Unknown" : "",
"Yorkshire Terrier Yorkie" : "",
"You can bookmark search results, animals, people and most data entry screens." : "",
"You can drag and drop animals in shelter view to change their locations." : "",
"You can middle click a link to open it in a new browser tab (push the wheel on most modern mice)." : "",
"You can override the search result sort by adding one of the following to the end of your search - sort:az, sort:za, sort:mr, sort:lr" : "",
"You can prefix your term in the search box with a: to search only animals, p: to search only people, wl: to search waiting list entries, la: to search lost animals and fa: to search found animals." : "",
"You can set a default amount for different payment types in the Settings- Lookup Data screen. Very handy when creating adoptions." : "",
"You can sort tables by clicking on the column headings." : "",
"You can upload images called logo.jpg and splash.jpg to the Settings- Reports-Extra Images screen to override the login splash screen and logo in the upper left corner of the application." : "",
"You can use incoming forms to create new records or attach them to existing records." : "",
"You can't have a return without a movement." : "",
"You didn't specify any search criteria, so an on-shelter search was assumed." : "",
"You have unsaved changes, are you sure you want to leave this page?" : "",
"You must supply a code." : "",
"Young Adult" : "",
"Your CSV file should have a header row with field names ASM recognises." : "",
"Your sheltermanager.com account is due to expire on {0}, please renew {1}" : "",
"Zipcode" : "",
"Zipcode contains" : "",
"[None]" : "[ไม่มี]",
"after connecting, chdir to" : "",
"and" : "",
"are sent to" : "",
"at" : "",
"cm" : "",
"days" : "วัน",
"estimate" : "",
"filters: a:animal, p:person, wl:waitinglist, la:lostanimal, fa:foundanimal keywords: onshelter/os, notforadoption, aco, banned, donors, deceased, vets, retailers, staff, fosterers, volunteers, homecheckers, members, activelost, activefound" : "",
"inches" : "",
"invalid" : "",
"kg" : "",
"lb" : "",
"less" : "",
"mins" : "",
"months" : "เดือน",
"more" : "",
"on" : "",
"or" : "",
"or estimated age in years" : "",
"oz" : "",
"to" : "",
"today" : "",
"treatments" : "",
"treatments, every" : "",
"weekdays" : "",
"weeks" : "สัปดาห์",
"weeks after last contact." : "",
"years" : "ปี",
"yesterday" : "",
"{0} (under {1} months)" : "",
"{0} - {1} ({2} {3} aged {4})" : "",
"{0} - {1} {2}" : "",
"{0} - {1} {2} ({3}), contact {4} ({5}) - lost in {6}, postcode {7}, on {8}" : "",
"{0} animals successfully updated." : "",
"{0} cannot be blank" : "",
"{0} fine, paid" : "",
"{0} fine, unpaid" : "",
"{0} incurred in costs" : "",
"{0} is running ({1}% complete)." : "",
"{0} payment records created." : "",
"{0} received" : "",
"{0} record(s) match the mail merge." : "",
"{0} results." : "",
"{0} rows affected." : "",
"{0} selected" : "",
"{0} treatments every {1} days" : "",
"{0} treatments every {1} months" : "",
"{0} treatments every {1} weekdays" : "",
"{0} treatments every {1} weeks" : "",
"{0} treatments every {1} years" : "",
"{0} {1} ({2} treatments)" : "",
"{0} {1} aged {2}" : "",
"{0} {1} {2} aged {3}" : "",
"{0} {1}: Moved from {2} to {3}" : "",
"{0} {1}: adopted by {2}" : "",
"{0} {1}: altered" : "",
"{0} {1}: available for adoption" : "",
"{0} {1}: died ({2})" : "",
"{0} {1}: entered the shelter" : "",
"{0} {1}: escaped" : "",
"{0} {1}: euthanised ({2})" : "",
"{0} {1}: fostered to {2}" : "",
"{0} {1}: held" : "",
"{0} {1}: microchipped" : "",
"{0} {1}: not available for adoption" : "",
"{0} {1}: quarantined" : "",
"{0} {1}: received {2}" : "",
"{0} {1}: reclaimed by {2}" : "",
"{0} {1}: released" : "",
"{0} {1}: reserved by {2}" : "",
"{0} {1}: returned by {2}" : "",
"{0} {1}: sent to retailer {2}" : "",
"{0} {1}: stolen" : "",
"{0} {1}: tested positive for FIV" : "",
"{0} {1}: tested positive for FeLV" : "",
"{0} {1}: tested positive for Heartworm" : "",
"{0} {1}: transferred to {2}" : "",
"{0}, Week {1}" : "",
"{0}: Entered shelter {1}, Last changed on {2} by {3}. {4} {5} {6} aged {7}" : "",
"{0}: closed {1} ({2})" : "",
"{0}: opened {1}" : "",
"{0}: waiting list - {1}" : "",
"{0}: {1} {2} - {3} {4}" : "",
"{2}: found in {1}: {0}" : "",
"{2}: lost in {1}: {0}" : "",
"{plural0} animal as dead on arrival" : "",
"{plural0} animal control call due for followup today" : "",
"{plural0} animal died" : "",
"{plural0} animal entered the shelter" : "",
"{plural0} animal has a hold ending today" : "",
"{plural0} animal has been on the shelter longer than {0} months" : "",
"{plural0} animal is not available for adoption" : "",
"{plural0} animal was adopted" : "",
"{plural0} animal was euthanized" : "",
"{plural0} animal was reclaimed by its owner" : "",
"{plural0} animal was transferred to another shelter" : "",
"{plural0} day." : "",
"{plural0} incomplete animal control call" : "",
"{plural0} item of stock expires in the next month" : "",
"{plural0} item of stock has expired" : "",
"{plural0} medical treatment needs to be administered today" : "",
"{plural0} month." : "",
"{plural0} new online form submission" : "",
"{plural0} person has an overdue payment" : "",
"{plural0} person with an active reservation has not been homechecked" : "",
"{plural0} potential match for a lost animal" : "",
"{plural0} recent publisher run had errors" : "",
"{plural0} reservation has been active over a week without adoption" : "",
"{plural0} result found in {1} seconds. Order: {2}" : "",
"{plural0} shelter animal has not been microchipped" : "",
"{plural0} shelter animal has people looking for them" : "",
"{plural0} test needs to be performed today" : "",
"{plural0} transport does not have a driver assigned" : "",
"{plural0} trap is overdue for return" : "",
"{plural0} trial adoption has ended" : "",
"{plural0} unaltered animal has been adopted in the last month" : "",
"{plural0} undispatched animal control call" : "",
"{plural0} unpaid fine" : "",
"{plural0} urgent entry on the waiting list" : "",
"{plural0} vaccination has expired" : "",
"{plural0} vaccination needs to be administered today" : "",
"{plural0} week." : "",
"{plural0} year." : "",
"{plural1} animal control calls due for followup today" : "",
"{plural1} animals are not available for adoption" : "",
"{plural1} animals died" : "",
"{plural1} animals entered the shelter" : "",
"{plural1} animals have been on the shelter longer than {0} months" : "",
"{plural1} animals have holds ending today" : "",
"{plural1} animals were adopted" : "",
"{plural1} animals were dead on arrival" : "",
"{plural1} animals were euthanized" : "",
"{plural1} animals were reclaimed by their owners" : "",
"{plural1} animals were transferred to other shelters" : "",
"{plural1} days." : "",
"{plural1} incomplete animal control calls" : "",
"{plural1} items of stock expire in the next month" : "",
"{plural1} items of stock have expired" : "",
"{plural1} medical treatments need to be administered today" : "",
"{plural1} months." : "",
"{plural1} new online form submissions" : "",
"{plural1} people have overdue payments" : "",
"{plural1} people with active reservations have not been homechecked" : "",
"{plural1} potential matches for lost animals" : "",
"{plural1} recent publisher runs had errors" : "",
"{plural1} reservations have been active over a week without adoption" : "",
"{plural1} results found in {1} seconds. Order: {2}" : "",
"{plural1} shelter animals have not been microchipped" : "",
"{plural1} shelter animals have people looking for them" : "",
"{plural1} tests need to be performed today" : "",
"{plural1} transports do not have a driver assigned" : "",
"{plural1} traps are overdue for return" : "",
"{plural1} trial adoptions have ended" : "",
"{plural1} unaltered animals have been adopted in the last month" : "",
"{plural1} undispatched animal control calls" : "",
"{plural1} unpaid fines" : "",
"{plural1} urgent entries on the waiting list" : "",
"{plural1} vaccinations have expired" : "",
"{plural1} vaccinations need to be administered today" : "",
"{plural1} weeks." : "",
"{plural1} years." : "",
"{plural2} animal control calls due for followup today" : "",
"{plural2} animals are not available for adoption" : "",
"{plural2} animals died" : "",
"{plural2} animals entered the shelter" : "",
"{plural2} animals have been on the shelter longer than {0} months" : "",
"{plural2} animals have holds ending today" : "",
"{plural2} animals were adopted" : "",
"{plural2} animals were dead on arrival" : "",
"{plural2} animals were euthanized" : "",
"{plural2} animals were reclaimed by their owners" : "",
"{plural2} animals were transferred to other shelters" : "",
"{plural2} days." : "",
"{plural2} incomplete animal control calls" : "",
"{plural2} items of stock expire in the next month" : "",
"{plural2} items of stock have expired" : "",
"{plural2} medical treatments need to be administered today" : "",
"{plural2} months." : "",
"{plural2} new online form submissions" : "",
"{plural2} people have overdue payments" : "",
"{plural2} people with active reservations have not been homechecked" : "",
"{plural2} potential matches for lost animals" : "",
"{plural2} recent publisher runs had errors" : "",
"{plural2} reservations have been active over a week without adoption" : "",
"{plural2} results found in {1} seconds. Order: {2}" : "",
"{plural2} shelter animals have not been microchipped" : "",
"{plural2} shelter animals have people looking for them" : "",
"{plural2} tests need to be performed today" : "",
"{plural2} transports do not have a driver assigned" : "",
"{plural2} traps are overdue for return" : "",
"{plural2} trial adoptions have ended" : "",
"{plural2} unaltered animals have been adopted in the last month" : "",
"{plural2} undispatched animal control calls" : "",
"{plural2} unpaid fines" : "",
"{plural2} urgent entries on the waiting list" : "",
"{plural2} vaccinations have expired" : "",
"{plural2} vaccinations need to be administered today" : "",
"{plural2} weeks." : "",
"{plural2} years." : "",
"{plural3} animal control calls due for followup today" : "",
"{plural3} animals are not available for adoption" : "",
"{plural3} animals died" : "",
"{plural3} animals entered the shelter" : "",
"{plural3} animals have been on the shelter longer than {0} months" : "",
"{plural3} animals have holds ending today" : "",
"{plural3} animals were adopted" : "",
"{plural3} animals were dead on arrival" : "",
"{plural3} animals were euthanized" : "",
"{plural3} animals were reclaimed by their owners" : "",
"{plural3} animals were transferred to other shelters" : "",
"{plural3} days." : "",
"{plural3} incomplete animal control calls" : "",
"{plural3} items of stock expire in the next month" : "",
"{plural3} items of stock have expired" : "",
"{plural3} medical treatments need to be administered today" : "",
"{plural3} months." : "",
"{plural3} new online form submissions" : "",
"{plural3} people have overdue payments" : "",
"{plural3} people with active reservations have not been homechecked" : "",
"{plural3} potential matches for lost animals" : "",
"{plural3} recent publisher runs had errors" : "",
"{plural3} reservations have been active over a week without adoption" : "",
"{plural3} results found in {1} seconds. Order: {2}" : "",
"{plural3} shelter animals have not been microchipped" : "",
"{plural3} shelter animals have people looking for them" : "",
"{plural3} tests need to be performed today" : "",
"{plural3} transports do not have a driver assigned" : "",
"{plural3} traps are overdue for return" : "",
"{plural3} trial adoptions have ended" : "",
"{plural3} unaltered animals have been adopted in the last month" : "",
"{plural3} undispatched animal control calls" : "",
"{plural3} unpaid fines" : "",
"{plural3} urgent entries on the waiting list" : "",
"{plural3} vaccinations have expired" : "",
"{plural3} vaccinations need to be administered today" : "",
"{plural3} weeks." : "",
"{plural3} years." : ""
}
| gpl-3.0 | 5,236,897,417,865,618,000 | 36.076176 | 288 | 0.605891 | false |
ammaradil/fibonacci | Lib/site-packages/pip/_vendor/requests/__init__.py | 412 | 1861 | # -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('https://www.python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post('http://httpbin.org/post', data=payload)
>>> print(r.text)
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2015 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '2.7.0'
__build__ = 0x020700
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2015 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
from .packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
| mit | 7,766,339,881,211,058,000 | 23.168831 | 77 | 0.631918 | false |
wolverineav/neutron | neutron/db/migration/autogen.py | 11 | 3994 | # Copyright (c) 2015 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from alembic.operations import ops
from alembic.util import Dispatcher
from alembic.util import rev_id as new_rev_id
from neutron.db.migration import cli
_ec_dispatcher = Dispatcher()
def process_revision_directives(context, revision, directives):
if cli._use_separate_migration_branches(context.config):
directives[:] = [
directive for directive in _assign_directives(context, directives)
]
def _assign_directives(context, directives, phase=None):
for directive in directives:
decider = _ec_dispatcher.dispatch(directive)
if phase is None:
phases = cli.MIGRATION_BRANCHES
else:
phases = (phase,)
for phase in phases:
decided = decider(context, directive, phase)
if decided:
yield decided
@_ec_dispatcher.dispatch_for(ops.MigrationScript)
def _migration_script_ops(context, directive, phase):
"""Generate a new ops.MigrationScript() for a given phase.
E.g. given an ops.MigrationScript() directive from a vanilla autogenerate
and an expand/contract phase name, produce a new ops.MigrationScript()
which contains only those sub-directives appropriate to "expand" or
"contract". Also ensure that the branch directory exists and that
the correct branch labels/depends_on/head revision are set up.
"""
version_path = cli._get_version_branch_path(
context.config, release=cli.CURRENT_RELEASE, branch=phase)
autogen_kwargs = {}
cli._check_bootstrap_new_branch(phase, version_path, autogen_kwargs)
op = ops.MigrationScript(
new_rev_id(),
ops.UpgradeOps(ops=[
d for d in _assign_directives(
context, directive.upgrade_ops.ops, phase)
]),
ops.DowngradeOps(ops=[]),
message=directive.message,
**autogen_kwargs
)
if not op.upgrade_ops.is_empty():
return op
@_ec_dispatcher.dispatch_for(ops.AddConstraintOp)
@_ec_dispatcher.dispatch_for(ops.CreateIndexOp)
@_ec_dispatcher.dispatch_for(ops.CreateTableOp)
@_ec_dispatcher.dispatch_for(ops.AddColumnOp)
def _expands(context, directive, phase):
if phase == 'expand':
return directive
else:
return None
@_ec_dispatcher.dispatch_for(ops.DropConstraintOp)
@_ec_dispatcher.dispatch_for(ops.DropIndexOp)
@_ec_dispatcher.dispatch_for(ops.DropTableOp)
@_ec_dispatcher.dispatch_for(ops.DropColumnOp)
def _contracts(context, directive, phase):
if phase == 'contract':
return directive
else:
return None
@_ec_dispatcher.dispatch_for(ops.AlterColumnOp)
def _alter_column(context, directive, phase):
is_expand = phase == 'expand'
if is_expand and (
directive.modify_nullable is True
):
return directive
elif not is_expand and (
directive.modify_nullable is False
):
return directive
else:
raise NotImplementedError(
"Don't know if operation is an expand or "
"contract at the moment: %s" % directive)
@_ec_dispatcher.dispatch_for(ops.ModifyTableOps)
def _modify_table_ops(context, directive, phase):
op = ops.ModifyTableOps(
directive.table_name,
ops=[
d for d in _assign_directives(context, directive.ops, phase)
],
schema=directive.schema)
if not op.is_empty():
return op
| apache-2.0 | 1,266,508,480,665,518,800 | 31.209677 | 78 | 0.674011 | false |
Swimlane/sw-python-client | functional_tests/driver_tests/test_user_group_fields.py | 1 | 57102 | import pytest
from swimlane import exceptions
@pytest.fixture(autouse=True, scope='module')
def my_fixture(helpers):
# setup stuff
defaultApp = 'user group fields'
pytest.swimlane_instance = helpers.swimlane_instance
pytest.app, pytest.appid = helpers.findCreateApp(defaultApp)
pytest.testUsers = list(pytest.usersCreated.keys())
pytest.testGroups = list(pytest.groupsCreated.keys())
yield
# teardown stuff
helpers.cleanupData()
class TestRequiredUserGroupField:
def test_required_field(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
assert theRecord["Required User/Groups"] == swimUser
def test_required_field_not_set(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(**{"User/Groups": swimUser})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Required field "Required User/Groups" is not set' % pytest.app.acronym
def test_required_field_not_set_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
theRecord["Required User/Groups"] = None
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord.save()
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Required field "Required User/Groups" is not set' % theRecord.tracking_id
class TestUserGroupField:
def test_user_group_field(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser, "User/Groups": swimUser2})
assert theRecord["User/Groups"].id == swimUser2.id
def test_user_group_field_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
theRecord["User/Groups"] = swimUser2
def test_user_group_field_bad_type_group(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)])
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "User/Groups": swimGroup})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `User/Groups`' % (
pytest.app.acronym, swimGroup.name)
def test_user_group_field_on_save_bad_type_group(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["User/Groups"] = swimGroup
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `User/Groups`' % (
theRecord.tracking_id, swimGroup.name)
class TestGroupsOnlyField:
def test_groups_only_field(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser, "Groups Only": swimGroup})
assert theRecord["Groups Only"] == swimGroup
def test_groups_only_field_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
theRecord["Groups Only"] = swimGroup
def test_groups_only_field_bad_type_user(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Groups Only": swimUser2})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: User `%s` is not a valid selection for field `Groups Only`' % (
pytest.app.acronym, swimUser2.username)
def test_groups_only_field_on_save_bad_type_user(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Groups Only"] = swimUser2
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: User `%s` is not a valid selection for field `Groups Only`' % (
theRecord.tracking_id, swimUser2.username)
class TestReadOnlyUserGroupsField:
def test_read_only_user_groups_field(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Read-only User/Groups": swimUser2})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Cannot set readonly field \'Read-only User/Groups\'' % pytest.app.acronym
def test_read_only_user_groups_field_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Read-only User/Groups"] = swimUser2
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Cannot set readonly field \'Read-only User/Groups\'' % theRecord.tracking_id
class TestCreatedByField:
def test_created_by_field_value(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
assert theRecord["Created by"] == swimUser
@pytest.mark.xfail(reason="SPT-6352: This should fail, that the Created by is read only.")
def test_created_by_field(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser, "Created by": swimUser2})
assert theRecord["Created by"] == swimUser
@pytest.mark.xfail(reason="SPT-6352: This should fail, that the Created by is read only.")
def test_created_by_field_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
theRecord["Created by"] = swimUser2
theRecord.save()
assert theRecord["Created by"] == swimUser
class TestLastUpdatedByField:
def test_last_updated_by_field_value(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
assert theRecord["Last updated by"] == swimUser
@pytest.mark.xfail(reason="SPT-6352: This should fail, that the last updated by is read only.")
def test_last_updated_by_field(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser, "Last updated by": swimUser2})
assert theRecord["Last updated by"] == swimUser
@pytest.mark.xfail(reason="SPT-6352: This should fail, that the Last updated by is read only.")
def test_last_updated_by_field_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
theRecord["Last updated by"] = swimUser2
theRecord.save()
assert theRecord["Last updated by"] == swimUser
class TestAllUsersAndGroupsField:
def test_all_users_and_groups_field_user(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser, "All Users and Groups": swimUser2})
assert theRecord["All Users and Groups"].id == swimUser2.id
def test_all_users_and_groups_field_group(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser, "All Users and Groups": swimGroup})
assert theRecord["All Users and Groups"] == swimGroup
def test_all_users_and_groups_field_user_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
theRecord["All Users and Groups"] = swimUser2
theRecord.save()
def test_all_users_and_groups_field_group_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
theRecord["All Users and Groups"] = swimGroup
theRecord.save()
def test_all_users_and_groups_field_on_save_bad_value_type(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["All Users and Groups"] = {"name": swimGroup}
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Field \'All Users and Groups\' expects one of \'UserGroup\', got \'dict\' instead' % theRecord.tracking_id
class TestSelectedGroupsField:
def test_selected_groups_field(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupTwo")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser, "Selected Groups": swimGroup})
assert theRecord["Selected Groups"] == swimGroup
def test_selected_groups_field_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupOne")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
theRecord["Selected Groups"] = swimGroup
theRecord.save()
def test_selected_groups_field_wrong_group(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name="PYTHON-groupFour")
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Selected Groups": swimGroup})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Selected Groups`' % (
pytest.app.acronym, swimGroup.name)
def test_selected_groups_field_wrong_group_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name="PYTHON-groupThree")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Selected Groups"] = swimGroup
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Selected Groups`' % (
theRecord.tracking_id, swimGroup.name)
def test_selected_groups_field_bad_type_user(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Selected Groups": swimUser2})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: User `%s` is not a valid selection for field `Selected Groups`' % (
pytest.app.acronym, swimUser2.username)
def test_selected_groups_field_on_save_bad_type_user(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Selected Groups"] = swimUser2
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: User `%s` is not a valid selection for field `Selected Groups`' % (
theRecord.tracking_id, swimUser2.username)
class TestSelectedUsersField:
def test_selected_users_field(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userOne")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser, "Selected Users": swimUser2})
assert theRecord["Selected Users"].id == swimUser2.id
def test_selected_users_field_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userFour")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
theRecord["Selected Users"] = swimUser2
theRecord.save()
def test_selected_users_field_wrong_user(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userTwo")
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Selected Users": swimUser2})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: User `%s` is not a valid selection for field `Selected Users`' % (
pytest.app.acronym, swimUser2.username)
def test_selected_users_field_wrong_user_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userThree")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Selected Users"] = swimUser2
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: User `%s` is not a valid selection for field `Selected Users`' % (
theRecord.tracking_id, swimUser2.username)
def test_selected_users_field_bad_type_group(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)])
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Selected Users": swimGroup})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Selected Users`' % (
pytest.app.acronym, swimGroup.name)
def test_selected_users_field_on_save_bad_type_group(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Selected Users"] = swimGroup
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Selected Users`' % (
theRecord.tracking_id, swimGroup.name)
class TestSubgroupsOfGroupField:
def test_sub_groups_of_group_field(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupOne")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser, "Sub-groups of Group": swimGroup})
assert theRecord["Sub-groups of Group"] == swimGroup
def test_sub_groups_of_group_field_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupTwo")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
theRecord["Sub-groups of Group"] = swimGroup
theRecord.save()
def test_sub_groups_of_group_field_parent_group(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name="PYTHON-groupCombo")
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Sub-groups of Group": swimGroup})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Sub-groups of Group`' % (
pytest.app.acronym, swimGroup.name)
def test_sub_groups_of_group_field_parent_group_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name="PYTHON-groupCombo")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Sub-groups of Group"] = swimGroup
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Sub-groups of Group`' % (
theRecord.tracking_id, swimGroup.name)
def test_sub_groups_of_group_field_other_group(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name="PYTHON-groupFour")
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Sub-groups of Group": swimGroup})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Sub-groups of Group`' % (
pytest.app.acronym, swimGroup.name)
def test_sub_groups_of_group_field_other_group_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name="PYTHON-groupFour")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Sub-groups of Group"] = swimGroup
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Sub-groups of Group`' % (
theRecord.tracking_id, swimGroup.name)
def test_sub_groups_of_group_field_user(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Sub-groups of Group": swimUser2})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: User `%s` is not a valid selection for field `Sub-groups of Group`' % (
pytest.app.acronym, swimUser2.username)
def test_sub_groups_of_group_field_user_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Sub-groups of Group"] = swimUser2
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: User `%s` is not a valid selection for field `Sub-groups of Group`' % (
theRecord.tracking_id, swimUser2.username)
class TestUsersMembersOfGroupField:
@pytest.mark.xfail(reason="SPT-6355: Says the user who belongs to the group is not a valid selection.")
def test_users_members_of_group_field(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userOne")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser, "Users Members of Group": swimUser2})
assert theRecord["Users Members of Group"] == swimUser2
@pytest.mark.xfail(reason="SPT-6355: Says the user who belongs to the group is not a valid selection.")
def test_users_members_of_group_field_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userTwo")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
theRecord["Users Members of Group"] = swimUser2
theRecord.save()
def test_users_members_of_group_field_user_not_member(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userFour")
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Users Members of Group": swimUser2})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: User `%s` is not a valid selection for field `Users Members of Group`' % (
pytest.app.acronym, swimUser2.username)
def test_users_members_of_group_field_not_member_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userFour")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Users Members of Group"] = swimUser2
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: User `%s` is not a valid selection for field `Users Members of Group`' % (
theRecord.tracking_id, swimUser2.username)
def test_users_members_of_group_field_user_parent_group(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupTwo")
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Users Members of Group": swimGroup})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Users Members of Group`' % (pytest.app.acronym, swimGroup.name)
def test_users_members_of_group_field_parent_group_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupTwo")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Users Members of Group"] = swimGroup
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Users Members of Group`' % (
theRecord.tracking_id, swimGroup.name)
class TestMultiSelectUsersField:
def test_multi_select_users_field(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser, "Multi-select User/Groups": [swimUser2]})
assert len(theRecord["Multi-select User/Groups"]) == 1
for member in theRecord["Multi-select User/Groups"]:
assert member.id == swimUser2.id
def test_multi_select_users_field_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
theRecord["Multi-select User/Groups"] = [swimUser2]
theRecord.save()
# Should we handle this or say it has to be a list/array?
def test_multi_select_users_field_single_user(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
with pytest.raises(TypeError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Multi-select User/Groups": swimUser2})
assert str(excinfo.value) == '\'User\' object is not iterable'
# Should we handle this or say it has to be a list/array?
def test_multi_select_users_field_single_user_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(TypeError) as excinfo:
theRecord["Multi-select User/Groups"] = swimUser2
assert str(excinfo.value) == '\'User\' object is not iterable'
def test_multi_select_users_field_group(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)])
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Multi-select User/Groups": [swimGroup]})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Multi-select User/Groups`' % (pytest.app.acronym, swimGroup.name)
def test_multi_select_users_field_group_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Multi-select User/Groups"] = [swimGroup]
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Multi-select User/Groups`' % (
theRecord.tracking_id, swimGroup.name)
def test_multi_select_users_field_mix_users_groups(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)])
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Multi-select User/Groups": [swimUser2, swimGroup]})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Multi-select User/Groups`' % (pytest.app.acronym, swimGroup.name)
def test_multi_select_users_field_mix_users_groups_on_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
swimGroup = pytest.swimlane_instance.groups.get(
name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Multi-select User/Groups"] = [swimUser2, swimGroup]
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Multi-select User/Groups`' % (
theRecord.tracking_id, swimGroup.name)
@pytest.mark.xfail(reason="SPT-6354: This works for the adminuser, but not the others..")
def test_multi_select_users_field_deselect_user(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser, "Multi-select User/Groups": [swimUser, swimUser2]})
theRecord["Multi-select User/Groups"].deselect(swimUser2)
theRecord.save()
updatedRecord = pytest.app.records.get(id=theRecord.id)
assert len(updatedRecord["Multi-select User/Groups"]) == 1
assert updatedRecord["Multi-select User/Groups"][0].id == swimUser.id
def test_multi_select_users_field_deselect_other_user(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userOne")
swimUser3 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userTwo")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser, "Multi-select User/Groups": [swimUser, swimUser2]})
with pytest.raises(KeyError) as excinfo:
theRecord["Multi-select User/Groups"].deselect(swimUser3)
assert str(excinfo.value) == '<User: %s>' % swimUser3.username
theRecord.save()
updatedRecord = pytest.app.records.get(id=theRecord.id)
assert len(updatedRecord["Multi-select User/Groups"]) == 2
userIds = [updatedRecord["Multi-select User/Groups"][1].id,
updatedRecord["Multi-select User/Groups"][0].id]
assert swimUser.id in userIds
assert swimUser2.id in userIds
def test_multi_select_users_field_select_user(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userOne")
swimUser3 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userTwo")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser, "Multi-select User/Groups": [swimUser, swimUser2]})
theRecord["Multi-select User/Groups"].select(swimUser3)
theRecord.save()
updatedRecord = pytest.app.records.get(id=theRecord.id)
assert len(updatedRecord["Multi-select User/Groups"]) == 3
userIds = [updatedRecord["Multi-select User/Groups"][1].id,
updatedRecord["Multi-select User/Groups"][0].id, updatedRecord["Multi-select User/Groups"][2].id]
assert swimUser3.id in userIds
assert swimUser2.id in userIds
assert swimUser.id in userIds
def test_multi_select_users_field_select_existing_user(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userOne")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser, "Multi-select User/Groups": [swimUser, swimUser2]})
theRecord["Multi-select User/Groups"].select(swimUser2)
theRecord.save()
updatedRecord = pytest.app.records.get(id=theRecord.id)
assert len(updatedRecord["Multi-select User/Groups"]) == 2
userIds = [updatedRecord["Multi-select User/Groups"][1].id,
updatedRecord["Multi-select User/Groups"][0].id]
assert swimUser.id in userIds
assert swimUser2.id in userIds
def test_multi_select_users_field_select_group(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name=pytest.testUsers[pytest.fake.random_int(0, len(pytest.testUsers)-1)])
swimGroup = pytest.swimlane_instance.groups.get(
name=pytest.testGroups[pytest.fake.random_int(0, len(pytest.testGroups)-1)])
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser, "Multi-select User/Groups": [swimUser, swimUser2]})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Multi-select User/Groups"].select(swimGroup)
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Multi-select User/Groups`' % (
theRecord.tracking_id, swimGroup.name)
theRecord.save()
updatedRecord = pytest.app.records.get(id=theRecord.id)
assert len(updatedRecord["Multi-select User/Groups"]) == 2
userIds = [updatedRecord["Multi-select User/Groups"][1].id,
updatedRecord["Multi-select User/Groups"][0].id]
assert swimUser.id in userIds
assert swimUser2.id in userIds
class TestMultiSelectSpecificUsersAndGroupsField:
def test_multi_select_specific_users_groups_field_user_create(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userOne")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser, "Multi-select Specific Users and Groups": [swimUser2]})
assert len(theRecord["Multi-select Specific Users and Groups"]) == 1
for member in theRecord["Multi-select Specific Users and Groups"]:
assert member.id == swimUser2.id
def test_multi_select_specific_users_groups_field_group_create(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupTwo")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser, "Multi-select Specific Users and Groups": [swimGroup]})
assert len(theRecord["Multi-select Specific Users and Groups"]) == 1
for member in theRecord["Multi-select Specific Users and Groups"]:
assert member.id == swimGroup.id
def test_multi_select_specific_users_groups_field_user_and_group_create(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userOne")
swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupTwo")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser, "Multi-select Specific Users and Groups": [swimUser2, swimGroup]})
assert len(theRecord["Multi-select Specific Users and Groups"]) == 2
for member in theRecord["Multi-select Specific Users and Groups"]:
assert member.id in [swimUser2.id, swimGroup.id]
def test_multi_select_specific_users_groups_field_user_and_group_invalid_user_create(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userTwo")
swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupTwo")
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Multi-select Specific Users and Groups": [swimGroup, swimUser2]})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: User `%s` is not a valid selection for field `Multi-select Specific Users and Groups`' % (
pytest.app.acronym, swimUser2.username)
def test_multi_select_specific_users_groups_field_user_and_group_invalid_group_create(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userOne")
swimGroup = pytest.swimlane_instance.groups.get(
name="PYTHON-groupThree")
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Multi-select Specific Users and Groups": [swimGroup, swimUser2]})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Multi-select Specific Users and Groups`' % (pytest.app.acronym, swimGroup.name)
def test_multi_select_specific_users_groups_field_invalid_group_create(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name="PYTHON-groupFour")
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Multi-select Specific Users and Groups": [swimGroup]})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Multi-select Specific Users and Groups`' % (pytest.app.acronym, swimGroup.name)
def test_multi_select_specific_users_groups_field_invalid_group_subgroup_create(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name="PYTHON-groupThree")
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Multi-select Specific Users and Groups": [swimGroup]})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Multi-select Specific Users and Groups`' % (pytest.app.acronym, swimGroup.name)
def test_multi_select_specific_users_groups_field_invalid_user_create(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userTwo")
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Multi-select Specific Users and Groups": [swimUser2]})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: User `%s` is not a valid selection for field `Multi-select Specific Users and Groups`' % (
pytest.app.acronym, swimUser2.username)
def test_multi_select_specific_users_groups_field_invalid_user_group_member_create(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userThree")
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Multi-select Specific Users and Groups": [swimUser2]})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: User `%s` is not a valid selection for field `Multi-select Specific Users and Groups`' % (
pytest.app.acronym, swimUser2.username)
def test_multi_select_specific_users_groups_field_user_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userOne")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
theRecord["Multi-select Specific Users and Groups"] = [swimUser2]
theRecord.save()
def test_multi_select_specific_users_groups_field_group_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupTwo")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
theRecord["Multi-select Specific Users and Groups"] = [swimGroup]
theRecord.save()
def test_multi_select_specific_users_groups_field_user_and_group_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userOne")
swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupTwo")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
theRecord["Multi-select Specific Users and Groups"] = [swimUser2, swimGroup]
theRecord.save()
def test_multi_select_specific_users_groups_field_invalid_group_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name="PYTHON-groupFour")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Multi-select Specific Users and Groups"] = [swimGroup]
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Multi-select Specific Users and Groups`' % (
theRecord.tracking_id, swimGroup.name)
def test_multi_select_specific_users_groups_field_invalid_group_subgroup_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name="PYTHON-groupThree")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Multi-select Specific Users and Groups"] = [swimGroup]
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Multi-select Specific Users and Groups`' % (
theRecord.tracking_id, swimGroup.name)
def test_multi_select_specific_users_groups_field_invalid_user_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userTwo")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Multi-select Specific Users and Groups"] = [swimUser2]
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: User `%s` is not a valid selection for field `Multi-select Specific Users and Groups`' % (
theRecord.tracking_id, swimUser2.username)
def test_multi_select_specific_users_groups_field_invalid_user_group_member_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userThree")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Multi-select Specific Users and Groups"] = [swimUser2]
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: User `%s` is not a valid selection for field `Multi-select Specific Users and Groups`' % (
theRecord.tracking_id, swimUser2.username)
class TestSelectSpecificUsersAndGroupsField:
def test_select_specific_users_groups_field_user_create(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userOne")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser, "Specific Users and Groups": swimUser2})
assert theRecord["Specific Users and Groups"].id == swimUser2.id
def test_select_specific_users_groups_field_group_create(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupTwo")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser, "Specific Users and Groups": swimGroup})
assert theRecord["Specific Users and Groups"] == swimGroup
def test_select_specific_users_groups_field_invalid_group_create(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name="PYTHON-groupFour")
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Specific Users and Groups": swimGroup})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Specific Users and Groups`' % (pytest.app.acronym, swimGroup.name)
def test_select_specific_users_groups_field_invalid_group_subgroup_create(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name="PYTHON-groupThree")
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Specific Users and Groups": swimGroup})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: Group `%s` is not a valid selection for field `Specific Users and Groups`' % (pytest.app.acronym, swimGroup.name)
def test_select_specific_users_groups_field_invalid_user_create(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userTwo")
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Specific Users and Groups": swimUser2})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: User `%s` is not a valid selection for field `Specific Users and Groups`' % (
pytest.app.acronym, swimUser2.username)
def test_select_specific_users_groups_field_invalid_user_group_member_create(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userThree")
with pytest.raises(exceptions.ValidationError) as excinfo:
pytest.app.records.create(
**{"Required User/Groups": swimUser, "Specific Users and Groups": swimUser2})
assert str(excinfo.value) == 'Validation failed for <Record: %s - New>. Reason: User `%s` is not a valid selection for field `Specific Users and Groups`' % (
pytest.app.acronym, swimUser2.username)
def test_select_specific_users_groups_field_user_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userOne")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
theRecord["Specific Users and Groups"] = swimUser2
theRecord.save()
def test_select_specific_users_groups_field_group_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(name="PYTHON-groupTwo")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
theRecord["Specific Users and Groups"] = swimGroup
theRecord.save()
def test_select_specific_users_groups_field_invalid_group_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name="PYTHON-groupFour")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Specific Users and Groups"] = swimGroup
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Specific Users and Groups`' % (
theRecord.tracking_id, swimGroup.name)
def test_select_specific_users_groups_field_invalid_group_subgroup_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimGroup = pytest.swimlane_instance.groups.get(
name="PYTHON-groupThree")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Specific Users and Groups"] = swimGroup
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: Group `%s` is not a valid selection for field `Specific Users and Groups`' % (
theRecord.tracking_id, swimGroup.name)
def test_select_specific_users_groups_field_invalid_user_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userTwo")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Specific Users and Groups"] = swimUser2
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: User `%s` is not a valid selection for field `Specific Users and Groups`' % (
theRecord.tracking_id, swimUser2.username)
def test_select_specific_users_groups_field_invalid_user_group_member_save(helpers):
swimUser = pytest.swimlane_instance.users.get(display_name="admin")
swimUser2 = pytest.swimlane_instance.users.get(
display_name="PYTHON-userThree")
theRecord = pytest.app.records.create(
**{"Required User/Groups": swimUser})
with pytest.raises(exceptions.ValidationError) as excinfo:
theRecord["Specific Users and Groups"] = swimUser2
assert str(excinfo.value) == 'Validation failed for <Record: %s>. Reason: User `%s` is not a valid selection for field `Specific Users and Groups`' % (
theRecord.tracking_id, swimUser2.username)
| mit | -2,311,866,798,916,364,300 | 59.170706 | 214 | 0.673689 | false |
kamijawa/ogc_server | bayesian/test/test_gaussian_bayesian_network.py | 2 | 2497 | from __future__ import division
import pytest
import os
from bayesian.gaussian import MeansVector, CovarianceMatrix
from bayesian.gaussian_bayesian_network import *
from bayesian.examples.gaussian_bayesian_networks.river import (
f_a, f_b, f_c, f_d)
def pytest_funcarg__river_graph(request):
g = build_graph(f_a, f_b, f_c, f_d)
return g
class TestGBN():
def test_get_joint_parameters(self, river_graph):
mu, sigma = river_graph.get_joint_parameters()
assert mu == MeansVector(
[[3],
[4],
[9],
[14]],
names=['a', 'b', 'c', 'd'])
assert sigma == CovarianceMatrix(
[[4, 4, 8, 12],
[4, 5, 8, 13],
[8, 8, 20, 28],
[12, 13, 28, 42]],
names=['a', 'b', 'c', 'd'])
def test_query(self, river_graph):
result = river_graph.query(a=7)
mu = result['joint']['mu']
sigma = result['joint']['sigma']
assert mu == MeansVector([
[8],
[17],
[26]], names=['b', 'c', 'd'])
assert sigma == CovarianceMatrix(
[[1, 0, 1],
[0, 4, 4],
[1, 4, 6]],
names=['b', 'c', 'd'])
result = river_graph.query(a=7, c=17)
mu = result['joint']['mu']
sigma = result['joint']['sigma']
assert mu == MeansVector([
[8],
[26]], names=['b', 'd'])
assert sigma == CovarianceMatrix(
[[1, 1],
[1, 2]],
names=['b', 'd'])
result = river_graph.query(a=7, c=17, b=8)
mu = result['joint']['mu']
sigma = result['joint']['sigma']
assert mu == MeansVector([
[26]], names=['d'])
assert sigma == CovarianceMatrix(
[[1]],
names=['d'])
def test_assignment_of_joint_parameters(self, river_graph):
assert river_graph.nodes['b'].func.joint_mu == MeansVector([
[3],
[4]], names=['a', 'b'])
assert river_graph.nodes['b'].func.covariance_matrix == CovarianceMatrix([
[4, 4],
[4, 5]], names=['a', 'b'])
def test_gaussian_pdf(self, river_graph):
assert round(river_graph.nodes['a'].func(3), 4) == 0.1995
assert round(river_graph.nodes['a'].func(10), 4) == 0.0002
def test_multivariate_gaussian_pdf(self, river_graph):
assert round(river_graph.nodes['d'].func(3, 1, 3), 4) == 0.0005
| mit | 5,904,261,758,801,329,000 | 29.45122 | 82 | 0.487385 | false |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/visualization/data_providers/registry.py | 1 | 5462 | from galaxy.visualization.data_providers.basic import ColumnDataProvider
from galaxy.visualization.data_providers import genome
from galaxy.model import NoConverterException
from galaxy.visualization.data_providers.phyloviz import PhylovizDataProvider
from galaxy.datatypes.tabular import Tabular, Vcf
from galaxy.datatypes.interval import Interval, ENCODEPeak, ChromatinInteractions, Gtf, Gff, Bed
from galaxy.datatypes.xml import Phyloxml
from galaxy.datatypes.data import Newick, Nexus
class DataProviderRegistry( object ):
"""
Registry for data providers that enables listing and lookup.
"""
def __init__( self ):
# Mapping from dataset type name to a class that can fetch data from a file of that
# type. First key is converted dataset type; if result is another dict, second key
# is original dataset type.
self.dataset_type_name_to_data_provider = {
"tabix": {
Vcf: genome.VcfTabixDataProvider,
Bed: genome.BedTabixDataProvider,
Gtf: genome.GtfTabixDataProvider,
ENCODEPeak: genome.ENCODEPeakTabixDataProvider,
Interval: genome.IntervalTabixDataProvider,
ChromatinInteractions: genome.ChromatinInteractionsTabixDataProvider,
"default" : genome.TabixDataProvider
},
"interval_index": genome.IntervalIndexDataProvider,
"bai": genome.BamDataProvider,
"bam": genome.SamDataProvider,
"bigwig": genome.BigWigDataProvider,
"bigbed": genome.BigBedDataProvider,
"column_with_stats": ColumnDataProvider
}
def get_data_provider( self, trans, name=None, source='data', raw=False, original_dataset=None ):
"""
Returns data provider matching parameter values. For standalone data
sources, source parameter is ignored.
"""
data_provider = None
if raw:
# Working with raw data.
if isinstance( original_dataset.datatype, Gff ):
data_provider_class = genome.RawGFFDataProvider
elif isinstance( original_dataset.datatype, Bed ):
data_provider_class = genome.RawBedDataProvider
elif isinstance( original_dataset.datatype, Vcf ):
data_provider_class = genome.RawVcfDataProvider
elif isinstance( original_dataset.datatype, Tabular ):
data_provider_class = ColumnDataProvider
elif isinstance( original_dataset.datatype, ( Nexus, Newick, Phyloxml ) ):
data_provider_class = PhylovizDataProvider
data_provider = data_provider_class( original_dataset=original_dataset )
else:
# Working with converted or standalone dataset.
if name:
# Provider requested by name; get from mappings.
value = self.dataset_type_name_to_data_provider[ name ]
if isinstance( value, dict ):
# Get converter by dataset extension; if there is no data provider,
# get the default.
data_provider_class = value.get( original_dataset.datatype.__class__, value.get( "default" ) )
else:
data_provider_class = value
# If name is the same as original dataset's type, dataset is standalone.
# Otherwise, a converted dataset is being used.
if name == original_dataset.ext:
data_provider = data_provider_class( original_dataset=original_dataset )
else:
converted_dataset = original_dataset.get_converted_dataset( trans, name )
deps = original_dataset.get_converted_dataset_deps( trans, name )
data_provider = data_provider_class( original_dataset=original_dataset,
converted_dataset=converted_dataset,
dependencies=deps )
elif original_dataset:
# No name, so look up a provider name from datatype's information.
# Dataset must have data sources to get data.
if not original_dataset.datatype.data_sources:
return None
# Get data provider mapping and data provider.
data_provider_mapping = original_dataset.datatype.data_sources
if 'data_standalone' in data_provider_mapping:
data_provider = self.get_data_provider( trans,
name=data_provider_mapping[ 'data_standalone' ],
original_dataset=original_dataset )
else:
source_list = data_provider_mapping[ source ]
if isinstance( source_list, str ):
source_list = [ source_list ]
# Find a valid data provider in the source list.
for source in source_list:
try:
data_provider = self.get_data_provider( trans, name=source, original_dataset=original_dataset )
break
except NoConverterException:
pass
return data_provider
| gpl-3.0 | 1,753,964,727,899,154,000 | 48.654545 | 123 | 0.584218 | false |
artur-shaik/qutebrowser | scripts/hostblock_blame.py | 8 | 2305 | #!/usr/bin/env python3
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Check by which hostblock list a host was blocked."""
import sys
import io
import os
import os.path
import configparser
import urllib.request
from PyQt5.QtCore import QStandardPaths
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))
from qutebrowser.browser import adblock
def main():
"""Check by which hostblock list a host was blocked."""
if len(sys.argv) != 2:
print("Usage: {} <host>".format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
confdir = QStandardPaths.writableLocation(QStandardPaths.ConfigLocation)
confdir = confdir.replace('/', os.sep)
if confdir.split(os.sep)[-1] != 'qutebrowser':
confdir = os.path.join(confdir, 'qutebrowser')
confpath = os.path.join(confdir, 'qutebrowser.conf')
parser = configparser.ConfigParser()
print("config path: {}".format(confpath))
successful = parser.read(confpath, encoding='utf-8')
if not successful:
raise OSError("configparser did not read files successfully!")
lists = parser['content']['host-block-lists']
for url in lists.split(','):
print("checking {}...".format(url))
raw_file = urllib.request.urlopen(url)
byte_io = io.BytesIO(raw_file.read())
f = adblock.get_fileobj(byte_io)
for line in f:
if sys.argv[1] in line:
print("FOUND {} in {}:".format(sys.argv[1], url))
print(" " + line.rstrip())
if __name__ == '__main__':
main()
| gpl-3.0 | 43,653,116,805,956,380 | 35.015625 | 76 | 0.678525 | false |
AIML/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause | -555,851,774,725,712,640 | 35.046296 | 79 | 0.670434 | false |
darisandi/geonode | geonode/maps/forms.py | 18 | 1263 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import autocomplete_light
from geonode.maps.models import Map
from geonode.base.forms import ResourceBaseForm
class MapForm(ResourceBaseForm):
class Meta(ResourceBaseForm.Meta):
model = Map
exclude = ResourceBaseForm.Meta.exclude + (
'zoom',
'projection',
'center_x',
'center_y',
)
widgets = autocomplete_light.get_widgets_dict(Map)
| gpl-3.0 | -1,376,420,441,732,966,000 | 33.135135 | 73 | 0.619161 | false |
napkindrawing/ansible | lib/ansible/modules/network/f5/bigip_hostname.py | 26 | 5767 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
---
module: bigip_hostname
short_description: Manage the hostname of a BIG-IP.
description:
- Manage the hostname of a BIG-IP.
version_added: "2.3"
options:
hostname:
description:
- Hostname of the BIG-IP host.
required: True
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
- Matthew Lam (@mryanlam)
'''
EXAMPLES = '''
- name: Set the hostname of the BIG-IP
bigip_hostname:
hostname: "bigip.localhost.localdomain"
password: "admin"
server: "bigip.localhost.localdomain"
user: "admin"
delegate_to: localhost
'''
RETURN = '''
hostname:
description: The new hostname of the device
returned: changed
type: string
sample: "big-ip01.internal"
'''
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
HAS_F5SDK,
F5ModuleError,
iControlUnexpectedHTTPError
)
class Parameters(AnsibleF5Parameters):
api_attributes = ['hostname']
updatables = ['hostname']
returnables = ['hostname']
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
@property
def hostname(self):
if self._values['hostname'] is None:
return None
return str(self._values['hostname'])
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(changed)
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
self.changes = Parameters(changed)
if changed:
return True
return False
def exec_module(self):
result = dict()
try:
changed = self.update()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def read_current_from_device(self):
resource = self.client.api.tm.sys.global_settings.load()
result = resource.attrs
return Parameters(result)
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.sys.global_settings.load()
resource.modify(**params)
self.client.api.tm.cm.devices.exec_cmd(
'mv', name=self.have.hostname, target=self.want.hostname
)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
hostname=dict(
required=True,
default=None,
type='str'
)
)
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 | 7,913,415,885,864,399,000 | 26.461905 | 82 | 0.615918 | false |
Toshakins/wagtail | wagtail/wagtailsnippets/views/snippets.py | 2 | 8601 | from __future__ import absolute_import, unicode_literals
from django.apps import apps
from django.core.urlresolvers import reverse
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from wagtail.utils.pagination import paginate
from wagtail.wagtailadmin import messages
from wagtail.wagtailadmin.edit_handlers import (
ObjectList, extract_panel_definitions_from_model_class)
from wagtail.wagtailadmin.forms import SearchForm
from wagtail.wagtailadmin.utils import permission_denied
from wagtail.wagtailsearch.backends import get_search_backend
from wagtail.wagtailsearch.index import class_is_indexed
from wagtail.wagtailsnippets.models import get_snippet_models
from wagtail.wagtailsnippets.permissions import get_permission_name, user_can_edit_snippet_type
# == Helper functions ==
def get_snippet_model_from_url_params(app_name, model_name):
"""
Retrieve a model from an app_label / model_name combo.
Raise Http404 if the model is not a valid snippet type.
"""
try:
model = apps.get_model(app_name, model_name)
except LookupError:
raise Http404
if model not in get_snippet_models():
# don't allow people to hack the URL to edit content types that aren't registered as snippets
raise Http404
return model
SNIPPET_EDIT_HANDLERS = {}
def get_snippet_edit_handler(model):
if model not in SNIPPET_EDIT_HANDLERS:
if hasattr(model, 'edit_handler'):
# use the edit handler specified on the page class
edit_handler = model.edit_handler
else:
panels = extract_panel_definitions_from_model_class(model)
edit_handler = ObjectList(panels)
SNIPPET_EDIT_HANDLERS[model] = edit_handler.bind_to_model(model)
return SNIPPET_EDIT_HANDLERS[model]
# == Views ==
def index(request):
snippet_model_opts = [
model._meta for model in get_snippet_models()
if user_can_edit_snippet_type(request.user, model)]
return render(request, 'wagtailsnippets/snippets/index.html', {
'snippet_model_opts': sorted(
snippet_model_opts, key=lambda x: x.verbose_name.lower())})
def list(request, app_label, model_name):
model = get_snippet_model_from_url_params(app_label, model_name)
permissions = [
get_permission_name(action, model)
for action in ['add', 'change', 'delete']
]
if not any([request.user.has_perm(perm) for perm in permissions]):
return permission_denied(request)
items = model.objects.all()
# Preserve the snippet's model-level ordering if specified, but fall back on PK if not
# (to ensure pagination is consistent)
if not items.ordered:
items = items.order_by('pk')
# Search
is_searchable = class_is_indexed(model)
is_searching = False
search_query = None
if is_searchable and 'q' in request.GET:
search_form = SearchForm(request.GET, placeholder=_("Search %(snippet_type_name)s") % {
'snippet_type_name': model._meta.verbose_name_plural
})
if search_form.is_valid():
search_query = search_form.cleaned_data['q']
search_backend = get_search_backend()
items = search_backend.search(search_query, items)
is_searching = True
else:
search_form = SearchForm(placeholder=_("Search %(snippet_type_name)s") % {
'snippet_type_name': model._meta.verbose_name_plural
})
paginator, paginated_items = paginate(request, items)
# Template
if request.is_ajax():
template = 'wagtailsnippets/snippets/results.html'
else:
template = 'wagtailsnippets/snippets/type_index.html'
return render(request, template, {
'model_opts': model._meta,
'items': paginated_items,
'can_add_snippet': request.user.has_perm(get_permission_name('add', model)),
'is_searchable': is_searchable,
'search_form': search_form,
'is_searching': is_searching,
'query_string': search_query,
})
def create(request, app_label, model_name):
model = get_snippet_model_from_url_params(app_label, model_name)
permission = get_permission_name('add', model)
if not request.user.has_perm(permission):
return permission_denied(request)
instance = model()
edit_handler_class = get_snippet_edit_handler(model)
form_class = edit_handler_class.get_form_class(model)
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=instance)
if form.is_valid():
form.save()
messages.success(
request,
_("{snippet_type} '{instance}' created.").format(
snippet_type=capfirst(model._meta.verbose_name),
instance=instance
),
buttons=[
messages.button(reverse(
'wagtailsnippets:edit', args=(app_label, model_name, instance.id)
), _('Edit'))
]
)
return redirect('wagtailsnippets:list', app_label, model_name)
else:
messages.error(request, _("The snippet could not be created due to errors."))
edit_handler = edit_handler_class(instance=instance, form=form)
else:
form = form_class(instance=instance)
edit_handler = edit_handler_class(instance=instance, form=form)
return render(request, 'wagtailsnippets/snippets/create.html', {
'model_opts': model._meta,
'edit_handler': edit_handler,
'form': form,
})
def edit(request, app_label, model_name, id):
model = get_snippet_model_from_url_params(app_label, model_name)
permission = get_permission_name('change', model)
if not request.user.has_perm(permission):
return permission_denied(request)
instance = get_object_or_404(model, id=id)
edit_handler_class = get_snippet_edit_handler(model)
form_class = edit_handler_class.get_form_class(model)
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=instance)
if form.is_valid():
form.save()
messages.success(
request,
_("{snippet_type} '{instance}' updated.").format(
snippet_type=capfirst(model._meta.verbose_name_plural),
instance=instance
),
buttons=[
messages.button(reverse(
'wagtailsnippets:edit', args=(app_label, model_name, instance.id)
), _('Edit'))
]
)
return redirect('wagtailsnippets:list', app_label, model_name)
else:
messages.error(request, _("The snippet could not be saved due to errors."))
edit_handler = edit_handler_class(instance=instance, form=form)
else:
form = form_class(instance=instance)
edit_handler = edit_handler_class(instance=instance, form=form)
return render(request, 'wagtailsnippets/snippets/edit.html', {
'model_opts': model._meta,
'instance': instance,
'edit_handler': edit_handler,
'form': form,
})
def delete(request, app_label, model_name, id):
model = get_snippet_model_from_url_params(app_label, model_name)
permission = get_permission_name('delete', model)
if not request.user.has_perm(permission):
return permission_denied(request)
instance = get_object_or_404(model, id=id)
if request.method == 'POST':
instance.delete()
messages.success(
request,
_("{snippet_type} '{instance}' deleted.").format(
snippet_type=capfirst(model._meta.verbose_name_plural),
instance=instance
)
)
return redirect('wagtailsnippets:list', app_label, model_name)
return render(request, 'wagtailsnippets/snippets/confirm_delete.html', {
'model_opts': model._meta,
'instance': instance,
})
def usage(request, app_label, model_name, id):
model = get_snippet_model_from_url_params(app_label, model_name)
instance = get_object_or_404(model, id=id)
paginator, used_by = paginate(request, instance.get_usage())
return render(request, "wagtailsnippets/snippets/usage.html", {
'instance': instance,
'used_by': used_by
})
| bsd-3-clause | -1,387,253,162,009,588,500 | 33.542169 | 101 | 0.63202 | false |
germanovm/vdsm | vdsm/gluster/storagedev.py | 1 | 13988 | #
# Copyright 2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
import errno
import logging
import os
import blivet
import blivet.formats
import blivet.formats.fs
import blivet.size
from blivet.devices import LVMVolumeGroupDevice
from blivet.devices import LVMThinPoolDevice
from blivet.devices import LVMLogicalVolumeDevice
from blivet.devices import LVMThinLogicalVolumeDevice
from blivet import udev
from vdsm import utils
import fstab
import exception as ge
from . import makePublic
log = logging.getLogger("Gluster")
_pvCreateCommandPath = utils.CommandPath("pvcreate",
"/sbin/pvcreate",
"/usr/sbin/pvcreate",)
_vgCreateCommandPath = utils.CommandPath("vgcreate",
"/sbin/vgcreate",
"/usr/sbin/vgcreate",)
_lvconvertCommandPath = utils.CommandPath("lvconvert",
"/sbin/lvconvert",
"/usr/sbin/lvconvert",)
_lvchangeCommandPath = utils.CommandPath("lvchange",
"/sbin/lvchange",
"/usr/sbin/lvchange",)
_vgscanCommandPath = utils.CommandPath("vgscan",
"/sbin/vgscan",
"/usr/sbin/vgscan",)
# All size are in MiB unless otherwise specified
DEFAULT_CHUNK_SIZE_KB = 256
DEFAULT_METADATA_SIZE_KB = 16777216
MIN_VG_SIZE = 1048576
MIN_METADATA_PERCENT = 0.005
DEFAULT_FS_TYPE = "xfs"
DEFAULT_MOUNT_OPTIONS = "inode64,noatime"
def _getDeviceDict(device, createBrick=False):
info = {'name': device.name,
'devPath': device.path,
'devUuid': device.uuid or '',
'bus': device.bus or '',
'model': '',
'fsType': '',
'mountPoint': '',
'uuid': '',
'createBrick': createBrick}
if isinstance(device.size, blivet.size.Size):
info['size'] = '%s' % device.size.convertTo(spec="MiB")
else:
info['size'] = '%s' % device.size
if not info['bus'] and device.parents:
info['bus'] = device.parents[0].bus
if device.model:
info['model'] = "%s (%s)" % (device.model, device.type)
else:
info['model'] = device.type
if device.format:
info['uuid'] = device.format.uuid or ''
# lvm vg will not have sysfs path
if hasattr(udev, 'get_device'):
dev = udev.get_device(device.sysfsPath) or {}
elif hasattr(udev, 'udev_get_device'):
dev = udev.udev_get_device(device.sysfsPath) or {}
else:
dev = {}
info['fsType'] = device.format.type or dev.get('ID_FS_TYPE', '')
if hasattr(device.format, 'mountpoint'):
info['mountPoint'] = device.format.mountpoint or ''
return info
def _parseDevices(devices):
deviceList = []
for device in devices:
deviceList.append(_getDeviceDict(device, _canCreateBrick(device)))
return deviceList
def _canCreateBrick(device):
if not device or device.kids > 0 or device.format.type or \
hasattr(device.format, 'mountpoint') or \
device.type in ['cdrom', 'lvmvg', 'lvmthinpool', 'lvmlv', 'lvmthinlv']:
return False
return True
def _reset_blivet(blivetEnv):
try:
blivetEnv.reset()
except (blivet.errors.UnusableConfigurationError,
blivet.errors.StorageError) as e:
log.error("Error: %s" % e.message)
@makePublic
def storageDevicesList():
blivetEnv = blivet.Blivet()
_reset_blivet(blivetEnv)
return _parseDevices(blivetEnv.devices)
@makePublic
def createBrick(brickName, mountPoint, devNameList, fsType=DEFAULT_FS_TYPE,
raidParams={}):
def _getDeviceList(devNameList):
return [blivetEnv.devicetree.getDeviceByName(devName.split("/")[-1])
for devName in devNameList]
def _makePartition(deviceList):
pvDeviceList = []
doPartitioning = False
for dev in deviceList:
if dev.type not in ['disk', 'dm-multipath']:
pvDeviceList.append(dev)
else:
blivetEnv.initializeDisk(dev)
part = blivetEnv.newPartition(fmt_type="lvmpv", grow=True,
parents=[dev])
blivetEnv.createDevice(part)
pvDeviceList.append(part)
doPartitioning = True
if doPartitioning:
blivet.partitioning.doPartitioning(blivetEnv)
return pvDeviceList
def _createPV(deviceList, alignment=0):
def _createAlignedPV(deviceList, alignment):
for dev in deviceList:
# bz#1178705: Blivet always creates pv with 1MB dataalignment
# Workaround: Till blivet fixes the issue, we use lvm pvcreate
rc, out, err = utils.execCmd([_pvCreateCommandPath.cmd,
'--dataalignment',
'%sk' % alignment,
dev.path])
if rc:
raise ge.GlusterHostStorageDevicePVCreateFailedException(
dev.path, alignment, rc, out, err)
_reset_blivet(blivetEnv)
return _getDeviceList([dev.name for dev in deviceList])
if alignment:
blivetEnv.doIt()
return _createAlignedPV(deviceList, alignment)
for dev in deviceList:
lvmpv = blivet.formats.getFormat("lvmpv", device=dev.path)
blivetEnv.formatDevice(dev, lvmpv)
blivet.partitioning.doPartitioning(blivetEnv)
return deviceList
def _createVG(vgName, deviceList, stripeSize=0):
if stripeSize:
# bz#1198568: Blivet always creates vg with 1MB stripe size
# Workaround: Till blivet fixes the issue, use vgcreate command
devices = ','.join([device.path for device in deviceList])
rc, out, err = utils.execCmd([_vgCreateCommandPath.cmd,
'-s', '%sk' % stripeSize,
vgName, devices])
if rc:
raise ge.GlusterHostStorageDeviceVGCreateFailedException(
vgName, devices, stripeSize, rc, out, err)
blivetEnv.reset()
vg = blivetEnv.devicetree.getDeviceByName(vgName)
else:
vg = LVMVolumeGroupDevice(vgName, parents=deviceList)
blivetEnv.createDevice(vg)
return vg
def _createThinPool(poolName, vg, alignment=0,
poolMetaDataSize=0, poolDataSize=0):
if not alignment:
# bz#1180228: blivet doesn't handle percentage-based sizes properly
# Workaround: Till the bz gets fixed, we take only 99% size from vg
pool = LVMThinPoolDevice(poolName, parents=[vg],
size=(vg.size * 99 / 100),
grow=True)
blivetEnv.createDevice(pool)
return pool
else:
metaName = "meta-%s" % poolName
vgPoolName = "%s/%s" % (vg.name, poolName)
metaLv = LVMLogicalVolumeDevice(
metaName, parents=[vg],
size=blivet.size.Size('%d KiB' % poolMetaDataSize))
poolLv = LVMLogicalVolumeDevice(
poolName, parents=[vg],
size=blivet.size.Size('%d KiB' % poolDataSize))
blivetEnv.createDevice(metaLv)
blivetEnv.createDevice(poolLv)
blivetEnv.doIt()
# bz#1100514: LVM2 currently only supports physical extent sizes
# that are a power of 2. Till that support is available we need
# to use lvconvert to achive that.
# bz#1179826: blivet doesn't support lvconvert functionality.
# Workaround: Till the bz gets fixed, lvconvert command is used
rc, out, err = utils.execCmd([_lvconvertCommandPath.cmd,
'--chunksize', '%sK' % alignment,
'--thinpool', vgPoolName,
'--poolmetadata',
"%s/%s" % (vg.name, metaName),
'--poolmetadataspar', 'n', '-y'])
if rc:
raise ge.GlusterHostStorageDeviceLVConvertFailedException(
vg.path, alignment, rc, out, err)
rc, out, err = utils.execCmd([_lvchangeCommandPath.cmd,
'--zero', 'n', vgPoolName])
if rc:
raise ge.GlusterHostStorageDeviceLVChangeFailedException(
vgPoolName, rc, out, err)
_reset_blivet(blivetEnv)
return blivetEnv.devicetree.getDeviceByName(poolLv.name)
if os.path.ismount(mountPoint):
raise ge.GlusterHostStorageMountPointInUseException(mountPoint)
vgName = "vg-" + brickName
poolName = "pool-" + brickName
alignment = 0
chunkSize = 0
poolDataSize = 0
count = 0
metaDataSize = DEFAULT_METADATA_SIZE_KB
if raidParams.get('type') == '6':
count = raidParams['pdCount'] - 2
alignment = raidParams['stripeSize'] * count
chunkSize = alignment
elif raidParams.get('type') == '10':
count = raidParams['pdCount'] / 2
alignment = raidParams['stripeSize'] * count
chunkSize = DEFAULT_CHUNK_SIZE_KB
blivetEnv = blivet.Blivet()
_reset_blivet(blivetEnv)
# get the devices list from the device name
deviceList = _getDeviceList(devNameList)
# raise an error when any device not actually found in the given list
notFoundList = set(devNameList).difference(
set([dev.name for dev in deviceList]))
if notFoundList:
raise ge.GlusterHostStorageDeviceNotFoundException(notFoundList)
# raise an error when any device is used already in the given list
inUseList = set(devNameList).difference(set([not _canCreateBrick(
dev) or dev.name for dev in deviceList]))
if inUseList:
raise ge.GlusterHostStorageDeviceInUseException(inUseList)
pvDeviceList = _makePartition(deviceList)
pvDeviceList = _createPV(pvDeviceList, alignment)
vg = _createVG(vgName, pvDeviceList, raidParams.get('stripeSize', 0))
# The following calculation is based on the redhat storage performance doc
# http://docbuilder.usersys.redhat.com/22522
# /#chap-Configuring_Red_Hat_Storage_for_Enhancing_Performance
# create ~16GB metadata LV (metaDataSize) that has a size which is
# a multiple of RAID stripe width if it is > minimum vg size
# otherwise allocate a minimum of 0.5% of the data device size
# and create data LV (poolDataSize) that has a size which is
# a multiple of stripe width
if alignment:
vgSizeKib = int(vg.size.convertTo(spec="KiB"))
if vg.size.convertTo(spec='MiB') < MIN_VG_SIZE:
metaDataSize = vgSizeKib * MIN_METADATA_PERCENT
poolDataSize = vgSizeKib - metaDataSize
metaDataSize = (metaDataSize - (metaDataSize % alignment))
poolDataSize = (poolDataSize - (poolDataSize % alignment))
# Creating a thin pool from the data LV and the metadata LV
# lvconvert --chunksize alignment --thinpool VOLGROUP/thin_pool
# --poolmetadata VOLGROUP/metadata_device_name
pool = _createThinPool(poolName, vg, chunkSize, metaDataSize, poolDataSize)
thinlv = LVMThinLogicalVolumeDevice(brickName, parents=[pool],
size=pool.size, grow=True)
blivetEnv.createDevice(thinlv)
blivetEnv.doIt()
if fsType != DEFAULT_FS_TYPE:
log.error("fstype %s is currently unsupported" % fsType)
raise ge.GlusterHostStorageDeviceMkfsFailedException(
thinlv.path, alignment, raidParams.get('stripeSize', 0), fsType)
format = blivet.formats.getFormat(DEFAULT_FS_TYPE, device=thinlv.path)
format._defaultFormatOptions = ["-f", "-i", "size=512", "-n", "size=8192"]
if raidParams.get('type') == '6':
format._defaultFormatOptions += ["-d", "sw=%s,su=%sk" % (
count, raidParams.get('stripeSize'))]
blivetEnv.formatDevice(thinlv, format)
blivetEnv.doIt()
try:
os.makedirs(mountPoint)
except OSError as e:
if errno.EEXIST != e.errno:
errMsg = "[Errno %s] %s: '%s'" % (e.errno, e.strerror, e.filename)
raise ge.GlusterHostStorageDeviceMakeDirsFailedException(
err=[errMsg])
thinlv.format.setup(mountpoint=mountPoint)
blivetEnv.doIt()
# bz#1230495: lvm devices are invisible and appears only after vgscan
# Workaround: Till the bz gets fixed, We use vgscan to refresh LVM devices
rc, out, err = utils.execCmd([_vgscanCommandPath.cmd])
if rc:
raise ge.GlusterHostStorageDeviceVGScanFailedException(rc, out, err)
fstab.FsTab().add(thinlv.path, mountPoint, DEFAULT_FS_TYPE)
return _getDeviceDict(thinlv)
| gpl-2.0 | 6,225,482,757,189,328,000 | 39.544928 | 79 | 0.600729 | false |
pgjones/jinja | tests/test_security.py | 23 | 6015 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.security
~~~~~~~~~~~~~~~~~~~~~~~~~
Checks the sandbox and other security features.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import pytest
from jinja2 import Environment
from jinja2.sandbox import SandboxedEnvironment, \
ImmutableSandboxedEnvironment, unsafe
from jinja2 import Markup, escape
from jinja2.exceptions import SecurityError, TemplateSyntaxError, \
TemplateRuntimeError
from jinja2._compat import text_type
class PrivateStuff(object):
def bar(self):
return 23
@unsafe
def foo(self):
return 42
def __repr__(self):
return 'PrivateStuff'
class PublicStuff(object):
bar = lambda self: 23
_foo = lambda self: 42
def __repr__(self):
return 'PublicStuff'
@pytest.mark.sandbox
class TestSandbox():
def test_unsafe(self, env):
env = SandboxedEnvironment()
pytest.raises(SecurityError, env.from_string("{{ foo.foo() }}").render,
foo=PrivateStuff())
assert env.from_string("{{ foo.bar() }}").render(foo=PrivateStuff()) == '23'
pytest.raises(SecurityError,
env.from_string("{{ foo._foo() }}").render,
foo=PublicStuff())
assert env.from_string("{{ foo.bar() }}").render(foo=PublicStuff()) == '23'
assert env.from_string("{{ foo.__class__ }}").render(foo=42) == ''
assert env.from_string("{{ foo.func_code }}").render(foo=lambda:None) == ''
# security error comes from __class__ already.
pytest.raises(SecurityError, env.from_string(
"{{ foo.__class__.__subclasses__() }}").render, foo=42)
def test_immutable_environment(self, env):
env = ImmutableSandboxedEnvironment()
pytest.raises(SecurityError, env.from_string(
'{{ [].append(23) }}').render)
pytest.raises(SecurityError, env.from_string(
'{{ {1:2}.clear() }}').render)
def test_restricted(self, env):
env = SandboxedEnvironment()
pytest.raises(TemplateSyntaxError, env.from_string,
"{% for item.attribute in seq %}...{% endfor %}")
pytest.raises(TemplateSyntaxError, env.from_string,
"{% for foo, bar.baz in seq %}...{% endfor %}")
def test_markup_operations(self, env):
# adding two strings should escape the unsafe one
unsafe = '<script type="application/x-some-script">alert("foo");</script>'
safe = Markup('<em>username</em>')
assert unsafe + safe == text_type(escape(unsafe)) + text_type(safe)
# string interpolations are safe to use too
assert Markup('<em>%s</em>') % '<bad user>' == \
'<em><bad user></em>'
assert Markup('<em>%(username)s</em>') % {
'username': '<bad user>'
} == '<em><bad user></em>'
# an escaped object is markup too
assert type(Markup('foo') + 'bar') is Markup
# and it implements __html__ by returning itself
x = Markup("foo")
assert x.__html__() is x
# it also knows how to treat __html__ objects
class Foo(object):
def __html__(self):
return '<em>awesome</em>'
def __unicode__(self):
return 'awesome'
assert Markup(Foo()) == '<em>awesome</em>'
assert Markup('<strong>%s</strong>') % Foo() == \
'<strong><em>awesome</em></strong>'
# escaping and unescaping
assert escape('"<>&\'') == '"<>&''
assert Markup("<em>Foo & Bar</em>").striptags() == "Foo & Bar"
assert Markup("<test>").unescape() == "<test>"
def test_template_data(self, env):
env = Environment(autoescape=True)
t = env.from_string('{% macro say_hello(name) %}'
'<p>Hello {{ name }}!</p>{% endmacro %}'
'{{ say_hello("<blink>foo</blink>") }}')
escaped_out = '<p>Hello <blink>foo</blink>!</p>'
assert t.render() == escaped_out
assert text_type(t.module) == escaped_out
assert escape(t.module) == escaped_out
assert t.module.say_hello('<blink>foo</blink>') == escaped_out
assert escape(t.module.say_hello('<blink>foo</blink>')) == escaped_out
def test_attr_filter(self, env):
env = SandboxedEnvironment()
tmpl = env.from_string('{{ cls|attr("__subclasses__")() }}')
pytest.raises(SecurityError, tmpl.render, cls=int)
def test_binary_operator_intercepting(self, env):
def disable_op(left, right):
raise TemplateRuntimeError('that operator so does not work')
for expr, ctx, rv in ('1 + 2', {}, '3'), ('a + 2', {'a': 2}, '4'):
env = SandboxedEnvironment()
env.binop_table['+'] = disable_op
t = env.from_string('{{ %s }}' % expr)
assert t.render(ctx) == rv
env.intercepted_binops = frozenset(['+'])
t = env.from_string('{{ %s }}' % expr)
try:
t.render(ctx)
except TemplateRuntimeError as e:
pass
else:
assert False, 'expected runtime error'
def test_unary_operator_intercepting(self, env):
def disable_op(arg):
raise TemplateRuntimeError('that operator so does not work')
for expr, ctx, rv in ('-1', {}, '-1'), ('-a', {'a': 2}, '-2'):
env = SandboxedEnvironment()
env.unop_table['-'] = disable_op
t = env.from_string('{{ %s }}' % expr)
assert t.render(ctx) == rv
env.intercepted_unops = frozenset(['-'])
t = env.from_string('{{ %s }}' % expr)
try:
t.render(ctx)
except TemplateRuntimeError as e:
pass
else:
assert False, 'expected runtime error'
| bsd-3-clause | -4,719,486,722,143,064,000 | 36.360248 | 84 | 0.544306 | false |
magfest/ubersystem | alembic/versions/e1d3c11eb9dd_add_adult_panels_and_tables.py | 1 | 1928 | """Add adult panels and tables
Revision ID: e1d3c11eb9dd
Revises: 1f862611ba04
Create Date: 2018-06-21 23:06:32.678061
"""
# revision identifiers, used by Alembic.
revision = 'e1d3c11eb9dd'
down_revision = '1f862611ba04'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
try:
is_sqlite = op.get_context().dialect.name == 'sqlite'
except Exception:
is_sqlite = False
if is_sqlite:
op.get_context().connection.execute('PRAGMA foreign_keys=ON;')
utcnow_server_default = "(datetime('now', 'utc'))"
else:
utcnow_server_default = "timezone('utc', current_timestamp)"
def sqlite_column_reflect_listener(inspector, table, column_info):
"""Adds parenthesis around SQLite datetime defaults for utcnow."""
if column_info['default'] == "datetime('now', 'utc')":
column_info['default'] = utcnow_server_default
sqlite_reflect_kwargs = {
'listeners': [('column_reflect', sqlite_column_reflect_listener)]
}
# ===========================================================================
# HOWTO: Handle alter statements in SQLite
#
# def upgrade():
# if is_sqlite:
# with op.batch_alter_table('table_name', reflect_kwargs=sqlite_reflect_kwargs) as batch_op:
# batch_op.alter_column('column_name', type_=sa.Unicode(), server_default='', nullable=False)
# else:
# op.alter_column('table_name', 'column_name', type_=sa.Unicode(), server_default='', nullable=False)
#
# ===========================================================================
def upgrade():
op.add_column('art_show_application', sa.Column('panels_ad', sa.Integer(), server_default='0', nullable=False))
op.add_column('art_show_application', sa.Column('tables_ad', sa.Integer(), server_default='0', nullable=False))
def downgrade():
op.drop_column('art_show_application', 'tables_ad')
op.drop_column('art_show_application', 'panels_ad')
| agpl-3.0 | -673,961,587,485,319,400 | 30.606557 | 115 | 0.631224 | false |
jspan/Open-Knesset | laws/forms.py | 8 | 8038 | from django import forms
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from datetime import date
from tagging.models import Tag
from models import (Vote, Bill, KnessetProposal, BillBudgetEstimation,
CONVERT_TO_DISCUSSION_HEADERS)
from vote_choices import (ORDER_CHOICES, TAGGED_CHOICES, TYPE_CHOICES,
SIMPLE_TYPE_CHOICES, BILL_TAGGED_CHOICES,
BILL_STAGE_CHOICES, BILL_AGRR_STAGES)
STAGE_CHOICES = (
('all', _('All')),
)
LINK_ERRORS = {
'DUP_FIRST': _('Bill already has a First Vote linked to it'),
'DUP_APPROVE': _('Bill already has an Approval Vote linked to it'),
'ALREADY_LINKED': _('Vote is already linked as Approval Vote of another bill'),
}
class AttachBillFromVoteForm(forms.Form):
"""Form for attaching a vote to a bill from the vote page."""
vote_model = forms.ModelChoiceField(queryset=Vote.objects.all(),
widget=forms.HiddenInput,
required=True)
vote_type = forms.ChoiceField(label=_('Vote Type'),
choices=SIMPLE_TYPE_CHOICES,
required=True)
bill_model = forms.ModelChoiceField(label=_('Bill'),
queryset=Bill.objects.all(),
widget=forms.TextInput,
required=True)
def __init__(self, vote, *args, **kwargs):
super(AttachBillFromVoteForm, self).__init__(*args, **kwargs)
self.fields['vote_model'].initial = vote
self.fields['vote_type'].initial = self.get_default_vote_type(vote)
def clean(self):
cleaned_data = super(AttachBillFromVoteForm, self).clean()
vote_type = cleaned_data.get('vote_type')
bill = cleaned_data.get('bill_model')
if vote_type == 'first vote' and bill.first_vote is not None:
raise forms.ValidationError(
LINK_ERRORS['DUP_FIRST'],
code="cannot-link")
elif vote_type == 'approve vote':
if bill.approval_vote is not None:
raise forms.ValidationError(
LINK_ERRORS['DUP_APPROVE'],
code="cannot-link")
vote = cleaned_data.get('vote_model')
vote_already_linked = Bill.objects\
.filter(approval_vote=vote).count() > 0
if vote_already_linked:
raise forms.ValidationError(
LINK_ERRORS['ALREADY_LINKED'],
code="cannot-link")
return cleaned_data
def get_default_vote_type(self, vote):
for h in CONVERT_TO_DISCUSSION_HEADERS:
if vote.title.find(h) >= 0:
return 'pre vote'
if vote.vote_type == 'law-approve':
return 'approve vote'
return None
class BudgetEstimateForm(forms.Form):
"""Form for submitting the budget estimation of a given bill, for a few
types of budget."""
be_one_time_gov = forms.IntegerField(label=_('One-time costs to government'), required=False)
be_yearly_gov = forms.IntegerField(label=_('Yearly costs to government'), required=False)
be_one_time_ext = forms.IntegerField(label=_('One-time costs to external bodies'), required=False)
be_yearly_ext = forms.IntegerField(label=_('Yearly costs to external bodies'), required=False)
be_summary = forms.CharField(label=_('Summary of the estimation'),widget=forms.Textarea,required=False)
def __init__(self, bill, user, *args, **kwargs):
super(BudgetEstimateForm, self).__init__(*args, **kwargs)
if bill is not None and user is not None:
try:
be = BillBudgetEstimation.objects.get(bill=bill,estimator__username=str(user))
self.fields['be_one_time_gov'].initial = be.one_time_gov
self.fields['be_yearly_gov'].initial = be.yearly_gov
self.fields['be_one_time_ext'].initial = be.one_time_ext
self.fields['be_yearly_ext'].initial = be.yearly_ext
self.fields['be_summary'].initial = be.summary
except BillBudgetEstimation.DoesNotExist:
pass
#self.fields['tagged'].choices = new_choices
class VoteSelectForm(forms.Form):
"""Votes filtering form"""
vtype = forms.ChoiceField(label=_('Vote types'),
choices=TYPE_CHOICES,
required=False,
initial='all')
tagged = forms.ChoiceField(label=_('Tags'),
choices=TAGGED_CHOICES,
required=False,
initial='all')
order = forms.ChoiceField(label=_('Order by'),
choices=ORDER_CHOICES,
required=False,
initial='time')
from_date = forms.DateField(label=_('From date'),
required=False)
to_date = forms.DateField(label=_('To date'),
required=False,
initial=date.today)
exclude_user_agendas = forms.BooleanField(label=_('Exclude my agendas'),
required=False,
initial=False)
exclude_ascribed = forms.BooleanField(
label=_('Exclude votes ascribed to bills'),
required=False,
initial=False)
def __init__(self, *args, **kwargs):
super(VoteSelectForm, self).__init__(*args, **kwargs)
tags = Tag.objects.usage_for_model(Vote)
new_choices = list(TAGGED_CHOICES)
new_choices.extend([(t.name, t.name) for t in tags])
self.fields['tagged'].choices = new_choices
class BillSelectForm(forms.Form):
"""Bill filtering form"""
stage = forms.ChoiceField(label=_('Bill Stage'), choices=BILL_STAGE_CHOICES,
required=False, initial='all')
tagged = forms.ChoiceField(label=_('Tags'), choices=BILL_TAGGED_CHOICES,
required=False, initial='all')
changed_after = forms.DateField(label=_('Stage Changed After:'), required=False,
input_formats=["%d/%m/%Y", "%d/%m/%y"])
changed_before = forms.DateField(label=_('Stage Chaged Before:'), required=False,
input_formats=["%d/%m/%Y", "%d/%m/%y"])
pp_id = forms.IntegerField(required=False,
label=_('Private proposal ID'))
knesset_booklet = forms.IntegerField(required=False,
label=_('Knesset booklet'))
gov_booklet = forms.IntegerField(required=False,
label=_('Government booklet'))
# TODO: add more filter options:
# order = forms.ChoiceField(label=_('Order by'), choices=ORDER_CHOICES,
# required=False, initial='time')
# from_date = forms.DateField(label=_('From date'), required=False)
# to_date = forms.DateField(label=_('To date'), required=False,
# initial=date.today)
def __init__(self, *args, **kwargs):
super(BillSelectForm, self).__init__(*args, **kwargs)
tags = Tag.objects.usage_for_model(Bill)
new_choices = list(BILL_TAGGED_CHOICES)
new_choices.extend([(t.name, t.name) for t in tags])
self.fields['tagged'].choices = new_choices
new_stages = list(STAGE_CHOICES)
new_stages.extend(BILL_STAGE_CHOICES)
self.fields['stage'].choices = new_stages
def clean(self):
super(BillSelectForm, self).clean()
#override stage error on aggregate stages (when accessing from mk page)
if ((self.data.get('stage') in BILL_AGRR_STAGES) and
('stage' in self._errors)):
del self._errors['stage']
self.cleaned_data['stage'] = self.data.get('stage')
return self.cleaned_data
| bsd-3-clause | 6,818,616,644,079,561,000 | 40.220513 | 107 | 0.56631 | false |
edx/ecommerce | ecommerce/extensions/api/v2/views/refunds.py | 1 | 6962 | """HTTP endpoints for interacting with refunds."""
import logging
from django.contrib.auth import get_user_model
from django.db import transaction
from django.utils.decorators import method_decorator
from oscar.core.loading import get_model
from rest_framework import generics, status
from rest_framework.exceptions import ParseError
from rest_framework.permissions import IsAdminUser, IsAuthenticated
from rest_framework.response import Response
from ecommerce.core.exceptions import MissingLmsUserIdException
from ecommerce.extensions.api import serializers
from ecommerce.extensions.api.exceptions import BadRequestException
from ecommerce.extensions.api.permissions import CanActForUser
from ecommerce.extensions.refund.api import (
create_refunds,
create_refunds_for_entitlement,
find_orders_associated_with_course
)
Order = get_model('order', 'Order')
OrderLine = get_model('order', 'Line')
Refund = get_model('refund', 'Refund')
User = get_user_model()
logger = logging.getLogger(__name__)
class RefundCreateView(generics.CreateAPIView):
"""Creates refunds.
Given a username and course ID or an order number and a course entitlement,
this view finds and creates a refund for each order matching the following criteria:
* Order was placed by the User linked to username.
* Order is in the COMPLETE state.
* Order has at least one line item associated with the course ID or Course Entitlement.
Note that only the line items associated with the course ID will be refunded.
Items associated with a different course ID, or not associated with any course ID, will NOT be refunded.
With the exception of superusers, users may only create refunds for themselves.
Attempts to create refunds for other users will fail with HTTP 403.
If refunds are created, a list of the refund IDs will be returned along with HTTP 201.
If no refunds are created, HTTP 200 will be returned.
"""
permission_classes = (IsAuthenticated, CanActForUser)
def get_serializer(self, *args, **kwargs):
return None
def create(self, request, *args, **kwargs):
"""
Creates refunds, if eligible orders exist.
This supports creating refunds for both course runs as well as course entitlements.
Arguments:
username (string): This is required by both types of refund
course_run refund:
course_id (string): The course_id for which to refund for the given user
course_entitlement refund:
order_number (string): The order for which to refund the course entitlement
entitlement_uuid (string): The UUID for the course entitlement for the given order to refund
Returns:
refunds (list): List of refunds created
Side effects:
If the given user does not have an LMS user id, tries to find it. If found, adds the id to the user and
saves the user. If the id cannot be found, writes custom metrics to record this fact.
"""
course_id = request.data.get('course_id')
username = request.data.get('username')
order_number = request.data.get('order_number')
entitlement_uuid = request.data.get('entitlement_uuid')
refunds = []
# We should always have a username value as long as CanActForUser is in place.
if not username: # pragma: no cover
raise BadRequestException('No username specified.')
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise BadRequestException('User "{}" does not exist.'.format(username))
# Ensure the user has an LMS user id
try:
if request.user.is_authenticated:
requested_by = request.user.id
else: # pragma: no cover
requested_by = None
called_from = u'refund processing for user {user_id} requested by {requested_by}'.format(
user_id=user.id,
requested_by=requested_by)
user.add_lms_user_id('ecommerce_missing_lms_user_id_refund', called_from)
except MissingLmsUserIdException:
raise BadRequestException('User {} does not have an LMS user id.'.format(user.id))
# Try and create a refund for the passed in order
if entitlement_uuid:
try:
order = user.orders.get(number=order_number)
refunds = create_refunds_for_entitlement(order, entitlement_uuid)
except (Order.DoesNotExist, OrderLine.DoesNotExist):
raise BadRequestException('Order {} does not exist.'.format(order_number))
else:
if not course_id:
raise BadRequestException('No course_id specified.')
# We can only create refunds if the user has orders.
if user.orders.exists():
orders = find_orders_associated_with_course(user, course_id)
refunds = create_refunds(orders, course_id)
# Return HTTP 201 if we created refunds.
if refunds:
refund_ids = [refund.id for refund in refunds]
return Response(refund_ids, status=status.HTTP_201_CREATED)
# Return HTTP 200 if we did NOT create refunds.
return Response([], status=status.HTTP_200_OK)
@method_decorator(transaction.non_atomic_requests, name='dispatch')
class RefundProcessView(generics.UpdateAPIView):
"""Process--approve or deny--refunds.
This view can be used to approve, or deny, a Refund. Under normal conditions, the view returns HTTP status 200
and a serialized Refund. In the event of an error, the view will still return a serialized Refund (to reflect any
changed statuses); however, HTTP status will be 500.
Only staff users are permitted to use this view.
"""
permission_classes = (IsAuthenticated, IsAdminUser,)
queryset = Refund.objects.all()
serializer_class = serializers.RefundSerializer
def update(self, request, *args, **kwargs):
APPROVE = 'approve'
DENY = 'deny'
APPROVE_PAYMENT_ONLY = 'approve_payment_only'
action = request.data.get('action', '').lower()
if action not in (APPROVE, DENY, APPROVE_PAYMENT_ONLY):
raise ParseError('The action [{}] is not valid.'.format(action))
with transaction.atomic():
refund = self.get_object()
result = False
if action in (APPROVE, APPROVE_PAYMENT_ONLY):
revoke_fulfillment = action == APPROVE
result = refund.approve(revoke_fulfillment=revoke_fulfillment)
elif action == DENY:
result = refund.deny()
http_status = status.HTTP_200_OK if result else status.HTTP_500_INTERNAL_SERVER_ERROR
serializer = self.get_serializer(refund)
return Response(serializer.data, status=http_status)
| agpl-3.0 | -7,274,515,693,310,179,000 | 39.71345 | 117 | 0.669779 | false |
jcoady9/python-for-android | python-build/python-libs/gdata/build/lib/gdata/tlslite/utils/rijndael.py | 359 | 11341 | """
A pure python (slow) implementation of rijndael with a decent interface
To include -
from rijndael import rijndael
To do a key setup -
r = rijndael(key, block_size = 16)
key must be a string of length 16, 24, or 32
blocksize must be 16, 24, or 32. Default is 16
To use -
ciphertext = r.encrypt(plaintext)
plaintext = r.decrypt(ciphertext)
If any strings are of the wrong length a ValueError is thrown
"""
# ported from the Java reference code by Bram Cohen, [email protected], April 2001
# this code is public domain, unless someone makes
# an intellectual property claim against the reference
# code, in which case it can be made public domain by
# deleting all the comments and renaming all the variables
import copy
import string
#-----------------------
#TREV - ADDED BECAUSE THERE'S WARNINGS ABOUT INT OVERFLOW BEHAVIOR CHANGING IN
#2.4.....
import os
if os.name != "java":
import exceptions
if hasattr(exceptions, "FutureWarning"):
import warnings
warnings.filterwarnings("ignore", category=FutureWarning, append=1)
#-----------------------
shifts = [[[0, 0], [1, 3], [2, 2], [3, 1]],
[[0, 0], [1, 5], [2, 4], [3, 3]],
[[0, 0], [1, 7], [3, 5], [4, 4]]]
# [keysize][block_size]
num_rounds = {16: {16: 10, 24: 12, 32: 14}, 24: {16: 12, 24: 12, 32: 14}, 32: {16: 14, 24: 14, 32: 14}}
A = [[1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 1]]
# produce log and alog tables, needed for multiplying in the
# field GF(2^m) (generator = 3)
alog = [1]
for i in xrange(255):
j = (alog[-1] << 1) ^ alog[-1]
if j & 0x100 != 0:
j ^= 0x11B
alog.append(j)
log = [0] * 256
for i in xrange(1, 255):
log[alog[i]] = i
# multiply two elements of GF(2^m)
def mul(a, b):
if a == 0 or b == 0:
return 0
return alog[(log[a & 0xFF] + log[b & 0xFF]) % 255]
# substitution box based on F^{-1}(x)
box = [[0] * 8 for i in xrange(256)]
box[1][7] = 1
for i in xrange(2, 256):
j = alog[255 - log[i]]
for t in xrange(8):
box[i][t] = (j >> (7 - t)) & 0x01
B = [0, 1, 1, 0, 0, 0, 1, 1]
# affine transform: box[i] <- B + A*box[i]
cox = [[0] * 8 for i in xrange(256)]
for i in xrange(256):
for t in xrange(8):
cox[i][t] = B[t]
for j in xrange(8):
cox[i][t] ^= A[t][j] * box[i][j]
# S-boxes and inverse S-boxes
S = [0] * 256
Si = [0] * 256
for i in xrange(256):
S[i] = cox[i][0] << 7
for t in xrange(1, 8):
S[i] ^= cox[i][t] << (7-t)
Si[S[i] & 0xFF] = i
# T-boxes
G = [[2, 1, 1, 3],
[3, 2, 1, 1],
[1, 3, 2, 1],
[1, 1, 3, 2]]
AA = [[0] * 8 for i in xrange(4)]
for i in xrange(4):
for j in xrange(4):
AA[i][j] = G[i][j]
AA[i][i+4] = 1
for i in xrange(4):
pivot = AA[i][i]
if pivot == 0:
t = i + 1
while AA[t][i] == 0 and t < 4:
t += 1
assert t != 4, 'G matrix must be invertible'
for j in xrange(8):
AA[i][j], AA[t][j] = AA[t][j], AA[i][j]
pivot = AA[i][i]
for j in xrange(8):
if AA[i][j] != 0:
AA[i][j] = alog[(255 + log[AA[i][j] & 0xFF] - log[pivot & 0xFF]) % 255]
for t in xrange(4):
if i != t:
for j in xrange(i+1, 8):
AA[t][j] ^= mul(AA[i][j], AA[t][i])
AA[t][i] = 0
iG = [[0] * 4 for i in xrange(4)]
for i in xrange(4):
for j in xrange(4):
iG[i][j] = AA[i][j + 4]
def mul4(a, bs):
if a == 0:
return 0
r = 0
for b in bs:
r <<= 8
if b != 0:
r = r | mul(a, b)
return r
T1 = []
T2 = []
T3 = []
T4 = []
T5 = []
T6 = []
T7 = []
T8 = []
U1 = []
U2 = []
U3 = []
U4 = []
for t in xrange(256):
s = S[t]
T1.append(mul4(s, G[0]))
T2.append(mul4(s, G[1]))
T3.append(mul4(s, G[2]))
T4.append(mul4(s, G[3]))
s = Si[t]
T5.append(mul4(s, iG[0]))
T6.append(mul4(s, iG[1]))
T7.append(mul4(s, iG[2]))
T8.append(mul4(s, iG[3]))
U1.append(mul4(t, iG[0]))
U2.append(mul4(t, iG[1]))
U3.append(mul4(t, iG[2]))
U4.append(mul4(t, iG[3]))
# round constants
rcon = [1]
r = 1
for t in xrange(1, 30):
r = mul(2, r)
rcon.append(r)
del A
del AA
del pivot
del B
del G
del box
del log
del alog
del i
del j
del r
del s
del t
del mul
del mul4
del cox
del iG
class rijndael:
def __init__(self, key, block_size = 16):
if block_size != 16 and block_size != 24 and block_size != 32:
raise ValueError('Invalid block size: ' + str(block_size))
if len(key) != 16 and len(key) != 24 and len(key) != 32:
raise ValueError('Invalid key size: ' + str(len(key)))
self.block_size = block_size
ROUNDS = num_rounds[len(key)][block_size]
BC = block_size / 4
# encryption round keys
Ke = [[0] * BC for i in xrange(ROUNDS + 1)]
# decryption round keys
Kd = [[0] * BC for i in xrange(ROUNDS + 1)]
ROUND_KEY_COUNT = (ROUNDS + 1) * BC
KC = len(key) / 4
# copy user material bytes into temporary ints
tk = []
for i in xrange(0, KC):
tk.append((ord(key[i * 4]) << 24) | (ord(key[i * 4 + 1]) << 16) |
(ord(key[i * 4 + 2]) << 8) | ord(key[i * 4 + 3]))
# copy values into round key arrays
t = 0
j = 0
while j < KC and t < ROUND_KEY_COUNT:
Ke[t / BC][t % BC] = tk[j]
Kd[ROUNDS - (t / BC)][t % BC] = tk[j]
j += 1
t += 1
tt = 0
rconpointer = 0
while t < ROUND_KEY_COUNT:
# extrapolate using phi (the round key evolution function)
tt = tk[KC - 1]
tk[0] ^= (S[(tt >> 16) & 0xFF] & 0xFF) << 24 ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 16 ^ \
(S[ tt & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) ^ \
(rcon[rconpointer] & 0xFF) << 24
rconpointer += 1
if KC != 8:
for i in xrange(1, KC):
tk[i] ^= tk[i-1]
else:
for i in xrange(1, KC / 2):
tk[i] ^= tk[i-1]
tt = tk[KC / 2 - 1]
tk[KC / 2] ^= (S[ tt & 0xFF] & 0xFF) ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 16) & 0xFF] & 0xFF) << 16 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) << 24
for i in xrange(KC / 2 + 1, KC):
tk[i] ^= tk[i-1]
# copy values into round key arrays
j = 0
while j < KC and t < ROUND_KEY_COUNT:
Ke[t / BC][t % BC] = tk[j]
Kd[ROUNDS - (t / BC)][t % BC] = tk[j]
j += 1
t += 1
# inverse MixColumn where needed
for r in xrange(1, ROUNDS):
for j in xrange(BC):
tt = Kd[r][j]
Kd[r][j] = U1[(tt >> 24) & 0xFF] ^ \
U2[(tt >> 16) & 0xFF] ^ \
U3[(tt >> 8) & 0xFF] ^ \
U4[ tt & 0xFF]
self.Ke = Ke
self.Kd = Kd
def encrypt(self, plaintext):
if len(plaintext) != self.block_size:
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext)))
Ke = self.Ke
BC = self.block_size / 4
ROUNDS = len(Ke) - 1
if BC == 4:
SC = 0
elif BC == 6:
SC = 1
else:
SC = 2
s1 = shifts[SC][1][0]
s2 = shifts[SC][2][0]
s3 = shifts[SC][3][0]
a = [0] * BC
# temporary work array
t = []
# plaintext to ints + key
for i in xrange(BC):
t.append((ord(plaintext[i * 4 ]) << 24 |
ord(plaintext[i * 4 + 1]) << 16 |
ord(plaintext[i * 4 + 2]) << 8 |
ord(plaintext[i * 4 + 3]) ) ^ Ke[0][i])
# apply round transforms
for r in xrange(1, ROUNDS):
for i in xrange(BC):
a[i] = (T1[(t[ i ] >> 24) & 0xFF] ^
T2[(t[(i + s1) % BC] >> 16) & 0xFF] ^
T3[(t[(i + s2) % BC] >> 8) & 0xFF] ^
T4[ t[(i + s3) % BC] & 0xFF] ) ^ Ke[r][i]
t = copy.copy(a)
# last round is special
result = []
for i in xrange(BC):
tt = Ke[ROUNDS][i]
result.append((S[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((S[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((S[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((S[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF)
return string.join(map(chr, result), '')
def decrypt(self, ciphertext):
if len(ciphertext) != self.block_size:
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext)))
Kd = self.Kd
BC = self.block_size / 4
ROUNDS = len(Kd) - 1
if BC == 4:
SC = 0
elif BC == 6:
SC = 1
else:
SC = 2
s1 = shifts[SC][1][1]
s2 = shifts[SC][2][1]
s3 = shifts[SC][3][1]
a = [0] * BC
# temporary work array
t = [0] * BC
# ciphertext to ints + key
for i in xrange(BC):
t[i] = (ord(ciphertext[i * 4 ]) << 24 |
ord(ciphertext[i * 4 + 1]) << 16 |
ord(ciphertext[i * 4 + 2]) << 8 |
ord(ciphertext[i * 4 + 3]) ) ^ Kd[0][i]
# apply round transforms
for r in xrange(1, ROUNDS):
for i in xrange(BC):
a[i] = (T5[(t[ i ] >> 24) & 0xFF] ^
T6[(t[(i + s1) % BC] >> 16) & 0xFF] ^
T7[(t[(i + s2) % BC] >> 8) & 0xFF] ^
T8[ t[(i + s3) % BC] & 0xFF] ) ^ Kd[r][i]
t = copy.copy(a)
# last round is special
result = []
for i in xrange(BC):
tt = Kd[ROUNDS][i]
result.append((Si[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((Si[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((Si[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((Si[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF)
return string.join(map(chr, result), '')
def encrypt(key, block):
return rijndael(key, len(block)).encrypt(block)
def decrypt(key, block):
return rijndael(key, len(block)).decrypt(block)
def test():
def t(kl, bl):
b = 'b' * bl
r = rijndael('a' * kl, bl)
assert r.decrypt(r.encrypt(b)) == b
t(16, 16)
t(16, 24)
t(16, 32)
t(24, 16)
t(24, 24)
t(24, 32)
t(32, 16)
t(32, 24)
t(32, 32)
| apache-2.0 | -4,625,957,302,812,123,000 | 27.931122 | 116 | 0.432854 | false |
TanPhongPhan/ABC4GSD | ABC v3/library/basic_frames/ApplicationList.py | 2 | 6114 | #!/usr/bin/env python
# -*- coding: utf8 -*-
import wx
import sys, os
sys.path.append(os.getcwd())
import library.constants as CO
from library.utils import utils as UT
from library.client.ABCAppInterface import ABCAppInterface
class ApplicationList(wx.Frame, ABCAppInterface):
def __init__(self, parent, id, title, param):
wx.Frame.__init__(self, parent, id, title)
ABCAppInterface.__init__(self, 'ApplicationList')
self._definedProperties = ['pos_x', 'pos_y', 'dim_x', 'dim_y']
self.lastActivity=None
self.__order = [ ('ID', '_id'), ('Name', 'name'), ('Status', 'state')]
self.__conversion = {}
self.__conversion['state'] = {CO.application_DETACHED:'Detached', CO.application_PINNED:'Pinned', CO.application_FULLSYNC:'Fullsync', CO.abc_UNKNOWN:'Unknown', CO.abc_INITIALIZED:'Initialized'}
self.__obj_content = {}
self.__obj_property = {}
self.__obj_content["list"] = [ "ListCtrl", wx.ListCtrl(self, style=wx.LC_REPORT
| wx.BORDER_NONE
| wx.LC_EDIT_LABELS
| wx.LC_SORT_ASCENDING) ]
for i, x in enumerate(self.__order):
self.__obj_content["list"][1].InsertColumn(i, x[0])
self.CreateStatusBar()
menuBar = wx.MenuBar()
# filemenu= wx.Menu()
#
# filemenu.Append(wx.ID_EXIT,"E&xit"," Terminate the program")
# menuBar.Append(filemenu,"&File")
#
# filemenu= wx.Menu()
# filemenu.Append(CO.menu_USER_ADD,"New"," Create new app")
# filemenu.Append(CO.menu_USER_MODIFY,"Modify"," Modify current app")
#
# menuBar.Append(filemenu,"&App")
#
# filemenu= wx.Menu()
# filemenu.Append(CO.menu_APPLICATION_PIN,"Pin"," Share the application with the activity partecipants")
# menuBar.Append(filemenu,"&Action")
self.SetMenuBar(menuBar)
wx.EVT_MENU(self, wx.ID_EXIT, self.onExit)
self.Bind(wx.EVT_CLOSE, self.onClose)
self.Bind(wx.EVT_MENU, self.onNew, id=CO.menu_USER_ADD)
self.Bind(wx.EVT_MENU, self.onModify, id=CO.menu_USER_MODIFY)
box = wx.BoxSizer( wx.VERTICAL )
box.Add( self.__obj_content[ "list" ][1], 1, wx.EXPAND )
self.SetSizer( box )
self.SetAutoLayout(1)
box.Fit(self)
self.resume()
self.Show(1)
def killOperation(self):
self.Close(True)
def suspendOperation(self):
self.setProperty('pos_x', self.GetPosition()[0])
self.setProperty('pos_y', self.GetPosition()[1])
self.setProperty('dim_x', self.GetSize()[0])
self.setProperty('dim_y', self.GetSize()[1])
def resumeOperation(self):
try:
self.SetPosition( (int(self.getProperty('pos_x')), int(self.getProperty('pos_y'))) )
self.SetSize( (int(self.getProperty('dim_x')), int(self.getProperty('dim_y'))) )
except:
pass
self.deleteAllItems(True)
self.refreshApplications()
self.setApplicationList()
for x in self.__obj_property.keys():
self.standardSubscribtion(x)
def standardSubscribtion(self, id):
self.subscribe('abc.application.%s.state'% (id, ), self.changeState )
# self.subscribe('abc.application.%s.name'% (id, ), self.changeName )
def personalHandler(self, ch, msg):
type = msg[0]
msg = msg[1]
if type == 'INFO':
actId = long(msg.split(' ', 1)[0])
field = msg.split(' ', 1)[1].split('=')[0]
value = msg.split('=')[1]
x = self.getapp(actId)
if x == None:
x = {}
self.__obj_property["apps"][actId] = x
self.__obj_property["apps"][actId][field] = value
self.SetappList(actId)
if type == 'CMD':
if 'INIT' in msg:
self.DeleteAllItems(True)
def refreshApplications(self):
resp = self._query('abc.activity.%s.application'%(self._actId, ))
if isinstance(resp, long):
resp = [resp]
elif isinstance(resp, str):
resp = eval(resp)
if resp == None:
return
for id in resp:
if id in self.__obj_property.keys():
del self.__obj_property[id]
self.__obj_property[id] = [id]
for y in self.__order[1:]:
self.__obj_property[id].append( self._query('abc.application.%s.%s'%(id, y[1])) )
def deleteAllItems(self, content = False):
self.__obj_content["list"][1].DeleteAllItems()
if content:
self.__obj_property = {}
def setApplicationList(self):
self.deleteAllItems()
for i, x in enumerate(self.__obj_property.keys()):
for l, y in enumerate(self.__order):
val = self.__obj_property[x][l]
if y[1] in self.__conversion.keys():
val = self.__conversion[y[1]][int(val)]
if isinstance(val, int) or isinstance(val, long):
val = str(val)
if not l:
self.__obj_content["list"][1].InsertStringItem(i, val)
else:
self.__obj_content["list"][1].SetStringItem(i, l, val)
def onNew(self, event):
pass
def onModify(self, event):
pass
def onExit(self,e):
self.Close( True )
def onClose(self, e):
#self.Suspend()
self.Destroy()
def changeState(self, wip):
wip = wip.split('.')
state = wip[-1]
id = long(wip[2])
self.__obj_property[id][2] = state
self.setApplicationList()
def main( param = None ):
global app
app = wx.PySimpleApp()
frame = ApplicationList(None,-1,"Application List", param)
app.SetExitOnFrameDelete(True)
app.MainLoop()
if __name__ == "__main__":
main()
| mit | -8,339,285,038,717,697,000 | 32.048649 | 209 | 0.535002 | false |
aabbox/kbengine | kbe/res/scripts/common/Lib/test/test_code_module.py | 79 | 3009 | "Test InteractiveConsole and InteractiveInterpreter from code module"
import sys
import unittest
from contextlib import ExitStack
from unittest import mock
from test import support
code = support.import_module('code')
class TestInteractiveConsole(unittest.TestCase):
def setUp(self):
self.console = code.InteractiveConsole()
self.mock_sys()
def mock_sys(self):
"Mock system environment for InteractiveConsole"
# use exit stack to match patch context managers to addCleanup
stack = ExitStack()
self.addCleanup(stack.close)
self.infunc = stack.enter_context(mock.patch('code.input',
create=True))
self.stdout = stack.enter_context(mock.patch('code.sys.stdout'))
self.stderr = stack.enter_context(mock.patch('code.sys.stderr'))
prepatch = mock.patch('code.sys', wraps=code.sys, spec=code.sys)
self.sysmod = stack.enter_context(prepatch)
if sys.excepthook is sys.__excepthook__:
self.sysmod.excepthook = self.sysmod.__excepthook__
def test_ps1(self):
self.infunc.side_effect = EOFError('Finished')
self.console.interact()
self.assertEqual(self.sysmod.ps1, '>>> ')
def test_ps2(self):
self.infunc.side_effect = EOFError('Finished')
self.console.interact()
self.assertEqual(self.sysmod.ps2, '... ')
def test_console_stderr(self):
self.infunc.side_effect = ["'antioch'", "", EOFError('Finished')]
self.console.interact()
for call in list(self.stdout.method_calls):
if 'antioch' in ''.join(call[1]):
break
else:
raise AssertionError("no console stdout")
def test_syntax_error(self):
self.infunc.side_effect = ["undefined", EOFError('Finished')]
self.console.interact()
for call in self.stderr.method_calls:
if 'NameError' in ''.join(call[1]):
break
else:
raise AssertionError("No syntax error from console")
def test_sysexcepthook(self):
self.infunc.side_effect = ["raise ValueError('')",
EOFError('Finished')]
hook = mock.Mock()
self.sysmod.excepthook = hook
self.console.interact()
self.assertTrue(hook.called)
def test_banner(self):
# with banner
self.infunc.side_effect = EOFError('Finished')
self.console.interact(banner='Foo')
self.assertEqual(len(self.stderr.method_calls), 2)
banner_call = self.stderr.method_calls[0]
self.assertEqual(banner_call, ['write', ('Foo\n',), {}])
# no banner
self.stderr.reset_mock()
self.infunc.side_effect = EOFError('Finished')
self.console.interact(banner='')
self.assertEqual(len(self.stderr.method_calls), 1)
def test_main():
support.run_unittest(TestInteractiveConsole)
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 | 4,484,334,654,329,402,400 | 33.988372 | 73 | 0.615155 | false |
dorotan/pythontraining | env/Lib/base64.py | 15 | 20442 | #! /usr/bin/env python3
"""Base16, Base32, Base64 (RFC 3548), Base85 and Ascii85 data encodings"""
# Modified 04-Oct-1995 by Jack Jansen to use binascii module
# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
# Modified 22-May-2007 by Guido van Rossum to use bytes everywhere
import re
import struct
import binascii
__all__ = [
# Legacy interface exports traditional RFC 2045 Base64 encodings
'encode', 'decode', 'encodebytes', 'decodebytes',
# Generalized interface for other encodings
'b64encode', 'b64decode', 'b32encode', 'b32decode',
'b16encode', 'b16decode',
# Base85 and Ascii85 encodings
'b85encode', 'b85decode', 'a85encode', 'a85decode',
# Standard Base64 encoding
'standard_b64encode', 'standard_b64decode',
# Some common Base64 alternatives. As referenced by RFC 3458, see thread
# starting at:
#
# http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
'urlsafe_b64encode', 'urlsafe_b64decode',
]
bytes_types = (bytes, bytearray) # Types acceptable as binary data
def _bytes_from_decode_data(s):
if isinstance(s, str):
try:
return s.encode('ascii')
except UnicodeEncodeError:
raise ValueError('string argument should contain only ASCII characters')
if isinstance(s, bytes_types):
return s
try:
return memoryview(s).tobytes()
except TypeError:
raise TypeError("argument should be a bytes-like object or ASCII "
"string, not %r" % s.__class__.__name__) from None
# Base64 encoding/decoding uses binascii
def b64encode(s, altchars=None):
"""Encode the bytes-like object s using Base64 and return a bytes object.
Optional altchars should be a byte string of length 2 which specifies an
alternative alphabet for the '+' and '/' characters. This allows an
application to e.g. generate url or filesystem safe Base64 strings.
"""
# Strip off the trailing newline
encoded = binascii.b2a_base64(s)[:-1]
if altchars is not None:
assert len(altchars) == 2, repr(altchars)
return encoded.translate(bytes.maketrans(b'+/', altchars))
return encoded
def b64decode(s, altchars=None, validate=False):
"""Decode the Base64 encoded bytes-like object or ASCII string s.
Optional altchars must be a bytes-like object or ASCII string of length 2
which specifies the alternative alphabet used instead of the '+' and '/'
characters.
The result is returned as a bytes object. A binascii.Error is raised if
s is incorrectly padded.
If validate is False (the default), characters that are neither in the
normal base-64 alphabet nor the alternative alphabet are discarded prior
to the padding check. If validate is True, these non-alphabet characters
in the input result in a binascii.Error.
"""
s = _bytes_from_decode_data(s)
if altchars is not None:
altchars = _bytes_from_decode_data(altchars)
assert len(altchars) == 2, repr(altchars)
s = s.translate(bytes.maketrans(altchars, b'+/'))
if validate and not re.match(b'^[A-Za-z0-9+/]*={0,2}$', s):
raise binascii.Error('Non-base64 digit found')
return binascii.a2b_base64(s)
def standard_b64encode(s):
"""Encode bytes-like object s using the standard Base64 alphabet.
The result is returned as a bytes object.
"""
return b64encode(s)
def standard_b64decode(s):
"""Decode bytes encoded with the standard Base64 alphabet.
Argument s is a bytes-like object or ASCII string to decode. The result
is returned as a bytes object. A binascii.Error is raised if the input
is incorrectly padded. Characters that are not in the standard alphabet
are discarded prior to the padding check.
"""
return b64decode(s)
_urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_')
_urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/')
def urlsafe_b64encode(s):
"""Encode bytes using the URL- and filesystem-safe Base64 alphabet.
Argument s is a bytes-like object to encode. The result is returned as a
bytes object. The alphabet uses '-' instead of '+' and '_' instead of
'/'.
"""
return b64encode(s).translate(_urlsafe_encode_translation)
def urlsafe_b64decode(s):
"""Decode bytes using the URL- and filesystem-safe Base64 alphabet.
Argument s is a bytes-like object or ASCII string to decode. The result
is returned as a bytes object. A binascii.Error is raised if the input
is incorrectly padded. Characters that are not in the URL-safe base-64
alphabet, and are not a plus '+' or slash '/', are discarded prior to the
padding check.
The alphabet uses '-' instead of '+' and '_' instead of '/'.
"""
s = _bytes_from_decode_data(s)
s = s.translate(_urlsafe_decode_translation)
return b64decode(s)
# Base32 encoding/decoding must be done in Python
_b32alphabet = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'
_b32tab2 = None
_b32rev = None
def b32encode(s):
"""Encode the bytes-like object s using Base32 and return a bytes object.
"""
global _b32tab2
# Delay the initialization of the table to not waste memory
# if the function is never called
if _b32tab2 is None:
b32tab = [bytes((i,)) for i in _b32alphabet]
_b32tab2 = [a + b for a in b32tab for b in b32tab]
b32tab = None
if not isinstance(s, bytes_types):
s = memoryview(s).tobytes()
leftover = len(s) % 5
# Pad the last quantum with zero bits if necessary
if leftover:
s = s + bytes(5 - leftover) # Don't use += !
encoded = bytearray()
from_bytes = int.from_bytes
b32tab2 = _b32tab2
for i in range(0, len(s), 5):
c = from_bytes(s[i: i + 5], 'big')
encoded += (b32tab2[c >> 30] + # bits 1 - 10
b32tab2[(c >> 20) & 0x3ff] + # bits 11 - 20
b32tab2[(c >> 10) & 0x3ff] + # bits 21 - 30
b32tab2[c & 0x3ff] # bits 31 - 40
)
# Adjust for any leftover partial quanta
if leftover == 1:
encoded[-6:] = b'======'
elif leftover == 2:
encoded[-4:] = b'===='
elif leftover == 3:
encoded[-3:] = b'==='
elif leftover == 4:
encoded[-1:] = b'='
return bytes(encoded)
def b32decode(s, casefold=False, map01=None):
"""Decode the Base32 encoded bytes-like object or ASCII string s.
Optional casefold is a flag specifying whether a lowercase alphabet is
acceptable as input. For security purposes, the default is False.
RFC 3548 allows for optional mapping of the digit 0 (zero) to the
letter O (oh), and for optional mapping of the digit 1 (one) to
either the letter I (eye) or letter L (el). The optional argument
map01 when not None, specifies which letter the digit 1 should be
mapped to (when map01 is not None, the digit 0 is always mapped to
the letter O). For security purposes the default is None, so that
0 and 1 are not allowed in the input.
The result is returned as a bytes object. A binascii.Error is raised if
the input is incorrectly padded or if there are non-alphabet
characters present in the input.
"""
global _b32rev
# Delay the initialization of the table to not waste memory
# if the function is never called
if _b32rev is None:
_b32rev = {v: k for k, v in enumerate(_b32alphabet)}
s = _bytes_from_decode_data(s)
if len(s) % 8:
raise binascii.Error('Incorrect padding')
# Handle section 2.4 zero and one mapping. The flag map01 will be either
# False, or the character to map the digit 1 (one) to. It should be
# either L (el) or I (eye).
if map01 is not None:
map01 = _bytes_from_decode_data(map01)
assert len(map01) == 1, repr(map01)
s = s.translate(bytes.maketrans(b'01', b'O' + map01))
if casefold:
s = s.upper()
# Strip off pad characters from the right. We need to count the pad
# characters because this will tell us how many null bytes to remove from
# the end of the decoded string.
l = len(s)
s = s.rstrip(b'=')
padchars = l - len(s)
# Now decode the full quanta
decoded = bytearray()
b32rev = _b32rev
for i in range(0, len(s), 8):
quanta = s[i: i + 8]
acc = 0
try:
for c in quanta:
acc = (acc << 5) + b32rev[c]
except KeyError:
raise binascii.Error('Non-base32 digit found') from None
decoded += acc.to_bytes(5, 'big')
# Process the last, partial quanta
if padchars:
acc <<= 5 * padchars
last = acc.to_bytes(5, 'big')
if padchars == 1:
decoded[-5:] = last[:-1]
elif padchars == 3:
decoded[-5:] = last[:-2]
elif padchars == 4:
decoded[-5:] = last[:-3]
elif padchars == 6:
decoded[-5:] = last[:-4]
else:
raise binascii.Error('Incorrect padding')
return bytes(decoded)
# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
# lowercase. The RFC also recommends against accepting input case
# insensitively.
def b16encode(s):
"""Encode the bytes-like object s using Base16 and return a bytes object.
"""
return binascii.hexlify(s).upper()
def b16decode(s, casefold=False):
"""Decode the Base16 encoded bytes-like object or ASCII string s.
Optional casefold is a flag specifying whether a lowercase alphabet is
acceptable as input. For security purposes, the default is False.
The result is returned as a bytes object. A binascii.Error is raised if
s is incorrectly padded or if there are non-alphabet characters present
in the input.
"""
s = _bytes_from_decode_data(s)
if casefold:
s = s.upper()
if re.search(b'[^0-9A-F]', s):
raise binascii.Error('Non-base16 digit found')
return binascii.unhexlify(s)
#
# Ascii85 encoding/decoding
#
_a85chars = None
_a85chars2 = None
_A85START = b"<~"
_A85END = b"~>"
def _85encode(b, chars, chars2, pad=False, foldnuls=False, foldspaces=False):
# Helper function for a85encode and b85encode
if not isinstance(b, bytes_types):
b = memoryview(b).tobytes()
padding = (-len(b)) % 4
if padding:
b = b + b'\0' * padding
words = struct.Struct('!%dI' % (len(b) // 4)).unpack(b)
chunks = [b'z' if foldnuls and not word else
b'y' if foldspaces and word == 0x20202020 else
(chars2[word // 614125] +
chars2[word // 85 % 7225] +
chars[word % 85])
for word in words]
if padding and not pad:
if chunks[-1] == b'z':
chunks[-1] = chars[0] * 5
chunks[-1] = chunks[-1][:-padding]
return b''.join(chunks)
def a85encode(b, *, foldspaces=False, wrapcol=0, pad=False, adobe=False):
"""Encode bytes-like object b using Ascii85 and return a bytes object.
foldspaces is an optional flag that uses the special short sequence 'y'
instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This
feature is not supported by the "standard" Adobe encoding.
wrapcol controls whether the output should have newline (b'\\n') characters
added to it. If this is non-zero, each output line will be at most this
many characters long.
pad controls whether the input is padded to a multiple of 4 before
encoding. Note that the btoa implementation always pads.
adobe controls whether the encoded byte sequence is framed with <~ and ~>,
which is used by the Adobe implementation.
"""
global _a85chars, _a85chars2
# Delay the initialization of tables to not waste memory
# if the function is never called
if _a85chars is None:
_a85chars = [bytes((i,)) for i in range(33, 118)]
_a85chars2 = [(a + b) for a in _a85chars for b in _a85chars]
result = _85encode(b, _a85chars, _a85chars2, pad, True, foldspaces)
if adobe:
result = _A85START + result
if wrapcol:
wrapcol = max(2 if adobe else 1, wrapcol)
chunks = [result[i: i + wrapcol]
for i in range(0, len(result), wrapcol)]
if adobe:
if len(chunks[-1]) + 2 > wrapcol:
chunks.append(b'')
result = b'\n'.join(chunks)
if adobe:
result += _A85END
return result
def a85decode(b, *, foldspaces=False, adobe=False, ignorechars=b' \t\n\r\v'):
"""Decode the Ascii85 encoded bytes-like object or ASCII string b.
foldspaces is a flag that specifies whether the 'y' short sequence should be
accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is
not supported by the "standard" Adobe encoding.
adobe controls whether the input sequence is in Adobe Ascii85 format (i.e.
is framed with <~ and ~>).
ignorechars should be a byte string containing characters to ignore from the
input. This should only contain whitespace characters, and by default
contains all whitespace characters in ASCII.
The result is returned as a bytes object.
"""
b = _bytes_from_decode_data(b)
if adobe:
if not b.endswith(_A85END):
raise ValueError(
"Ascii85 encoded byte sequences must end "
"with {!r}".format(_A85END)
)
if b.startswith(_A85START):
b = b[2:-2] # Strip off start/end markers
else:
b = b[:-2]
#
# We have to go through this stepwise, so as to ignore spaces and handle
# special short sequences
#
packI = struct.Struct('!I').pack
decoded = []
decoded_append = decoded.append
curr = []
curr_append = curr.append
curr_clear = curr.clear
for x in b + b'u' * 4:
if b'!'[0] <= x <= b'u'[0]:
curr_append(x)
if len(curr) == 5:
acc = 0
for x in curr:
acc = 85 * acc + (x - 33)
try:
decoded_append(packI(acc))
except struct.error:
raise ValueError('Ascii85 overflow') from None
curr_clear()
elif x == b'z'[0]:
if curr:
raise ValueError('z inside Ascii85 5-tuple')
decoded_append(b'\0\0\0\0')
elif foldspaces and x == b'y'[0]:
if curr:
raise ValueError('y inside Ascii85 5-tuple')
decoded_append(b'\x20\x20\x20\x20')
elif x in ignorechars:
# Skip whitespace
continue
else:
raise ValueError('Non-Ascii85 digit found: %c' % x)
result = b''.join(decoded)
padding = 4 - len(curr)
if padding:
# Throw away the extra padding
result = result[:-padding]
return result
# The following code is originally taken (with permission) from Mercurial
_b85alphabet = (b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~")
_b85chars = None
_b85chars2 = None
_b85dec = None
def b85encode(b, pad=False):
"""Encode bytes-like object b in base85 format and return a bytes object.
If pad is true, the input is padded with b'\\0' so its length is a multiple of
4 bytes before encoding.
"""
global _b85chars, _b85chars2
# Delay the initialization of tables to not waste memory
# if the function is never called
if _b85chars is None:
_b85chars = [bytes((i,)) for i in _b85alphabet]
_b85chars2 = [(a + b) for a in _b85chars for b in _b85chars]
return _85encode(b, _b85chars, _b85chars2, pad)
def b85decode(b):
"""Decode the base85-encoded bytes-like object or ASCII string b
The result is returned as a bytes object.
"""
global _b85dec
# Delay the initialization of tables to not waste memory
# if the function is never called
if _b85dec is None:
_b85dec = [None] * 256
for i, c in enumerate(_b85alphabet):
_b85dec[c] = i
b = _bytes_from_decode_data(b)
padding = (-len(b)) % 5
b = b + b'~' * padding
out = []
packI = struct.Struct('!I').pack
for i in range(0, len(b), 5):
chunk = b[i:i + 5]
acc = 0
try:
for c in chunk:
acc = acc * 85 + _b85dec[c]
except TypeError:
for j, c in enumerate(chunk):
if _b85dec[c] is None:
raise ValueError('bad base85 character at position %d'
% (i + j)) from None
raise
try:
out.append(packI(acc))
except struct.error:
raise ValueError('base85 overflow in hunk starting at byte %d'
% i) from None
result = b''.join(out)
if padding:
result = result[:-padding]
return result
# Legacy interface. This code could be cleaned up since I don't believe
# binascii has any line length limitations. It just doesn't seem worth it
# though. The files should be opened in binary mode.
MAXLINESIZE = 76 # Excluding the CRLF
MAXBINSIZE = (MAXLINESIZE//4)*3
def encode(input, output):
"""Encode a file; input and output are binary files."""
while True:
s = input.read(MAXBINSIZE)
if not s:
break
while len(s) < MAXBINSIZE:
ns = input.read(MAXBINSIZE-len(s))
if not ns:
break
s += ns
line = binascii.b2a_base64(s)
output.write(line)
def decode(input, output):
"""Decode a file; input and output are binary files."""
while True:
line = input.readline()
if not line:
break
s = binascii.a2b_base64(line)
output.write(s)
def _input_type_check(s):
try:
m = memoryview(s)
except TypeError as err:
msg = "expected bytes-like object, not %s" % s.__class__.__name__
raise TypeError(msg) from err
if m.format not in ('c', 'b', 'B'):
msg = ("expected single byte elements, not %r from %s" %
(m.format, s.__class__.__name__))
raise TypeError(msg)
if m.ndim != 1:
msg = ("expected 1-D data, not %d-D data from %s" %
(m.ndim, s.__class__.__name__))
raise TypeError(msg)
def encodebytes(s):
"""Encode a bytestring into a bytes object containing multiple lines
of base-64 data."""
_input_type_check(s)
pieces = []
for i in range(0, len(s), MAXBINSIZE):
chunk = s[i : i + MAXBINSIZE]
pieces.append(binascii.b2a_base64(chunk))
return b"".join(pieces)
def encodestring(s):
"""Legacy alias of encodebytes()."""
import warnings
warnings.warn("encodestring() is a deprecated alias, use encodebytes()",
DeprecationWarning, 2)
return encodebytes(s)
def decodebytes(s):
"""Decode a bytestring of base-64 data into a bytes object."""
_input_type_check(s)
return binascii.a2b_base64(s)
def decodestring(s):
"""Legacy alias of decodebytes()."""
import warnings
warnings.warn("decodestring() is a deprecated alias, use decodebytes()",
DeprecationWarning, 2)
return decodebytes(s)
# Usable as a script...
def main():
"""Small main program"""
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'deut')
except getopt.error as msg:
sys.stdout = sys.stderr
print(msg)
print("""usage: %s [-d|-e|-u|-t] [file|-]
-d, -u: decode
-e: encode (default)
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0])
sys.exit(2)
func = encode
for o, a in opts:
if o == '-e': func = encode
if o == '-d': func = decode
if o == '-u': func = decode
if o == '-t': test(); return
if args and args[0] != '-':
with open(args[0], 'rb') as f:
func(f, sys.stdout.buffer)
else:
func(sys.stdin.buffer, sys.stdout.buffer)
def test():
s0 = b"Aladdin:open sesame"
print(repr(s0))
s1 = encodebytes(s0)
print(repr(s1))
s2 = decodebytes(s1)
print(repr(s2))
assert s0 == s2
if __name__ == '__main__':
main()
| apache-2.0 | 5,730,906,868,646,942,000 | 33.013311 | 84 | 0.608551 | false |
larks/mbed | workspace_tools/host_tests/udpecho_server_auto.py | 101 | 2515 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import sys
import uuid
from sys import stdout
from socket import socket, AF_INET, SOCK_DGRAM
class UDPEchoServerTest():
ECHO_SERVER_ADDRESS = ""
ECHO_PORT = 0
s = None # Socket
PATTERN_SERVER_IP = "Server IP Address is (\d+).(\d+).(\d+).(\d+):(\d+)"
re_detect_server_ip = re.compile(PATTERN_SERVER_IP)
def test(self, selftest):
result = True
serial_ip_msg = selftest.mbed.serial_readline()
if serial_ip_msg is None:
return selftest.RESULT_IO_SERIAL
selftest.notify(serial_ip_msg)
# Searching for IP address and port prompted by server
m = self.re_detect_server_ip.search(serial_ip_msg)
if m and len(m.groups()):
self.ECHO_SERVER_ADDRESS = ".".join(m.groups()[:4])
self.ECHO_PORT = int(m.groups()[4]) # must be integer for socket.connect method
selftest.notify("HOST: UDP Server found at: " + self.ECHO_SERVER_ADDRESS + ":" + str(self.ECHO_PORT))
# We assume this test fails so can't send 'error' message to server
try:
self.s = socket(AF_INET, SOCK_DGRAM)
except Exception, e:
self.s = None
selftest.notify("HOST: Socket error: %s"% e)
return selftest.RESULT_ERROR
for i in range(0, 100):
TEST_STRING = str(uuid.uuid4())
self.s.sendto(TEST_STRING, (self.ECHO_SERVER_ADDRESS, self.ECHO_PORT))
data = self.s.recv(len(TEST_STRING))
received_str = repr(data)[1:-1]
if TEST_STRING != received_str:
result = False
break
sys.stdout.write('.')
stdout.flush()
else:
result = False
if self.s is not None:
self.s.close()
return selftest.RESULT_SUCCESS if result else selftest.RESULT_FAILURE
| apache-2.0 | 7,854,504,982,349,431,000 | 35.985294 | 113 | 0.608748 | false |
vitan/hue | desktop/core/ext-py/django-extensions-1.5.0/django_extensions/management/commands/validate_templates.py | 35 | 3355 | import os
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import color_style
from django.template.base import add_to_builtins
from django.template.loaders.filesystem import Loader
from django_extensions.utils import validatingtemplatetags
from django_extensions.management.utils import signalcommand
#
# TODO: Render the template with fake request object ?
#
class Command(BaseCommand):
args = ''
help = "Validate templates on syntax and compile errors"
option_list = BaseCommand.option_list + (
make_option('--break', '-b', action='store_true', dest='break',
default=False, help="Break on first error."),
make_option('--check-urls', '-u', action='store_true', dest='check_urls',
default=False, help="Check url tag view names are quoted appropriately"),
make_option('--force-new-urls', '-n', action='store_true', dest='force_new_urls',
default=False, help="Error on usage of old style url tags (without {% load urls from future %}"),
make_option('--include', '-i', action='append', dest='includes',
default=[], help="Append these paths to TEMPLATE_DIRS")
)
@signalcommand
def handle(self, *args, **options):
from django.conf import settings
style = color_style()
template_dirs = set(settings.TEMPLATE_DIRS)
template_dirs |= set(options.get('includes', []))
template_dirs |= set(getattr(settings, 'VALIDATE_TEMPLATES_EXTRA_TEMPLATE_DIRS', []))
settings.TEMPLATE_DIRS = list(template_dirs)
settings.TEMPLATE_DEBUG = True
verbosity = int(options.get('verbosity', 1))
errors = 0
template_loader = Loader()
# Replace built in template tags with our own validating versions
if options.get('check_urls', False):
add_to_builtins('django_extensions.utils.validatingtemplatetags')
for template_dir in template_dirs:
for root, dirs, filenames in os.walk(template_dir):
for filename in filenames:
if filename.endswith(".swp"):
continue
if filename.endswith("~"):
continue
filepath = os.path.join(root, filename)
if verbosity > 1:
print(filepath)
validatingtemplatetags.before_new_template(options.get('force_new_urls', False))
try:
template_loader.load_template(filename, [root])
except Exception as e:
errors += 1
print("%s: %s" % (filepath, style.ERROR("%s %s" % (e.__class__.__name__, str(e)))))
template_errors = validatingtemplatetags.get_template_errors()
for origin, line, message in template_errors:
errors += 1
print("%s(%s): %s" % (origin, line, style.ERROR(message)))
if errors and options.get('break', False):
raise CommandError("Errors found")
if errors:
raise CommandError("%s errors found" % errors)
print("%s errors found" % errors)
| apache-2.0 | -1,038,923,111,988,380,800 | 44.958904 | 117 | 0.584203 | false |
c0defreak/python-for-android | python3-alpha/python3-src/Tools/unicode/mkstringprep.py | 47 | 10046 | import re, unicodedata, sys
if sys.maxunicode == 65535:
raise RuntimeError("need UCS-4 Python")
def gen_category(cats):
for i in range(0, 0x110000):
if unicodedata.category(chr(i)) in cats:
yield(i)
def gen_bidirectional(cats):
for i in range(0, 0x110000):
if unicodedata.bidirectional(chr(i)) in cats:
yield(i)
def compact_set(l):
single = []
tuple = []
prev = None
span = 0
for e in l:
if prev is None:
prev = e
span = 0
continue
if prev+span+1 != e:
if span > 2:
tuple.append((prev,prev+span+1))
else:
for i in range(prev, prev+span+1):
single.append(i)
prev = e
span = 0
else:
span += 1
if span:
tuple.append((prev,prev+span+1))
else:
single.append(prev)
tuple = " + ".join(["list(range(%d,%d))" % t for t in tuple])
if not single:
return "set(%s)" % tuple
if not tuple:
return "set(%s)" % repr(single)
return "set(%s + %s)" % (repr(single),tuple)
############## Read the tables in the RFC #######################
data = open("rfc3454.txt").readlines()
tables = []
curname = None
for l in data:
l = l.strip()
if not l:
continue
# Skip RFC page breaks
if l.startswith("Hoffman & Blanchet") or\
l.startswith("RFC 3454"):
continue
# Find start/end lines
m = re.match("----- (Start|End) Table ([A-Z](.[0-9])+) -----", l)
if m:
if m.group(1) == "Start":
if curname:
raise RuntimeError("Double Start", (curname, l))
curname = m.group(2)
table = {}
tables.append((curname, table))
continue
else:
if not curname:
raise RuntimeError("End without start", l)
curname = None
continue
if not curname:
continue
# Now we are in a table
fields = l.split(";")
if len(fields) > 1:
# Drop comment field
fields = fields[:-1]
if len(fields) == 1:
fields = fields[0].split("-")
if len(fields) > 1:
# range
try:
start, end = fields
except ValueError:
raise RuntimeError("Unpacking problem", l)
else:
start = end = fields[0]
start = int(start, 16)
end = int(end, 16)
for i in range(start, end+1):
table[i] = i
else:
code, value = fields
value = value.strip()
if value:
value = [int(v, 16) for v in value.split(" ")]
else:
# table B.1
value = None
table[int(code, 16)] = value
########### Generate compact Python versions of the tables #############
print("""# This file is generated by mkstringprep.py. DO NOT EDIT.
\"\"\"Library that exposes various tables found in the StringPrep RFC 3454.
There are two kinds of tables: sets, for which a member test is provided,
and mappings, for which a mapping function is provided.
\"\"\"
import unicodedata
""")
print("assert unicodedata.unidata_version == %s" % repr(unicodedata.unidata_version))
# A.1 is the table of unassigned characters
# XXX Plane 15 PUA is listed as unassigned in Python.
name, table = tables[0]
del tables[0]
assert name == "A.1"
table = set(table.keys())
Cn = set(gen_category(["Cn"]))
# FDD0..FDEF are process internal codes
Cn -= set(range(0xFDD0, 0xFDF0))
# not a character
Cn -= set(range(0xFFFE, 0x110000, 0x10000))
Cn -= set(range(0xFFFF, 0x110000, 0x10000))
# assert table == Cn
print("""
def in_table_a1(code):
if unicodedata.category(code) != 'Cn': return False
c = ord(code)
if 0xFDD0 <= c < 0xFDF0: return False
return (c & 0xFFFF) not in (0xFFFE, 0xFFFF)
""")
# B.1 cannot easily be derived
name, table = tables[0]
del tables[0]
assert name == "B.1"
table = sorted(table.keys())
print("""
b1_set = """ + compact_set(table) + """
def in_table_b1(code):
return ord(code) in b1_set
""")
# B.2 and B.3 is case folding.
# It takes CaseFolding.txt into account, which is
# not available in the Python database. Since
# B.2 is derived from B.3, we process B.3 first.
# B.3 supposedly *is* CaseFolding-3.2.0.txt.
name, table_b2 = tables[0]
del tables[0]
assert name == "B.2"
name, table_b3 = tables[0]
del tables[0]
assert name == "B.3"
# B.3 is mostly Python's .lower, except for a number
# of special cases, e.g. considering canonical forms.
b3_exceptions = {}
for k,v in table_b2.items():
if map(ord, unichr(k).lower()) != v:
b3_exceptions[k] = u"".join(map(unichr,v))
b3 = sorted(b3_exceptions.items())
print("""
b3_exceptions = {""")
for i,(k,v) in enumerate(b3):
print("0x%x:%s," % (k, repr(v)), end=' ')
if i % 4 == 3:
print()
print("}")
print("""
def map_table_b3(code):
r = b3_exceptions.get(ord(code))
if r is not None: return r
return code.lower()
""")
def map_table_b3(code):
r = b3_exceptions.get(ord(code))
if r is not None: return r
return code.lower()
# B.2 is case folding for NFKC. This is the same as B.3,
# except where NormalizeWithKC(Fold(a)) !=
# NormalizeWithKC(Fold(NormalizeWithKC(Fold(a))))
def map_table_b2(a):
al = map_table_b3(a)
b = unicodedata.normalize("NFKC", al)
bl = "".join([map_table_b3(ch) for ch in b])
c = unicodedata.normalize("NFKC", bl)
if b != c:
return c
else:
return al
specials = {}
for k,v in table_b2.items():
if list(map(ord, map_table_b2(chr(k)))) != v:
specials[k] = v
# B.3 should not add any additional special cases
assert specials == {}
print("""
def map_table_b2(a):
al = map_table_b3(a)
b = unicodedata.normalize("NFKC", al)
bl = u"".join([map_table_b3(ch) for ch in b])
c = unicodedata.normalize("NFKC", bl)
if b != c:
return c
else:
return al
""")
# C.1.1 is a table with a single character
name, table = tables[0]
del tables[0]
assert name == "C.1.1"
assert table == {0x20:0x20}
print("""
def in_table_c11(code):
return code == u" "
""")
# C.1.2 is the rest of all space characters
name, table = tables[0]
del tables[0]
assert name == "C.1.2"
# table = set(table.keys())
# Zs = set(gen_category(["Zs"])) - set([0x20])
# assert Zs == table
print("""
def in_table_c12(code):
return unicodedata.category(code) == "Zs" and code != u" "
def in_table_c11_c12(code):
return unicodedata.category(code) == "Zs"
""")
# C.2.1 ASCII control characters
name, table_c21 = tables[0]
del tables[0]
assert name == "C.2.1"
Cc = set(gen_category(["Cc"]))
Cc_ascii = Cc & set(range(128))
table_c21 = set(table_c21.keys())
assert Cc_ascii == table_c21
print("""
def in_table_c21(code):
return ord(code) < 128 and unicodedata.category(code) == "Cc"
""")
# C.2.2 Non-ASCII control characters. It also includes
# a number of characters in category Cf.
name, table_c22 = tables[0]
del tables[0]
assert name == "C.2.2"
Cc_nonascii = Cc - Cc_ascii
table_c22 = set(table_c22.keys())
assert len(Cc_nonascii - table_c22) == 0
specials = list(table_c22 - Cc_nonascii)
specials.sort()
print("""c22_specials = """ + compact_set(specials) + """
def in_table_c22(code):
c = ord(code)
if c < 128: return False
if unicodedata.category(code) == "Cc": return True
return c in c22_specials
def in_table_c21_c22(code):
return unicodedata.category(code) == "Cc" or \\
ord(code) in c22_specials
""")
# C.3 Private use
name, table = tables[0]
del tables[0]
assert name == "C.3"
Co = set(gen_category(["Co"]))
assert set(table.keys()) == Co
print("""
def in_table_c3(code):
return unicodedata.category(code) == "Co"
""")
# C.4 Non-character code points, xFFFE, xFFFF
# plus process internal codes
name, table = tables[0]
del tables[0]
assert name == "C.4"
nonchar = set(range(0xFDD0,0xFDF0))
nonchar.update(range(0xFFFE,0x110000,0x10000))
nonchar.update(range(0xFFFF,0x110000,0x10000))
table = set(table.keys())
assert table == nonchar
print("""
def in_table_c4(code):
c = ord(code)
if c < 0xFDD0: return False
if c < 0xFDF0: return True
return (ord(code) & 0xFFFF) in (0xFFFE, 0xFFFF)
""")
# C.5 Surrogate codes
name, table = tables[0]
del tables[0]
assert name == "C.5"
Cs = set(gen_category(["Cs"]))
assert set(table.keys()) == Cs
print("""
def in_table_c5(code):
return unicodedata.category(code) == "Cs"
""")
# C.6 Inappropriate for plain text
name, table = tables[0]
del tables[0]
assert name == "C.6"
table = sorted(table.keys())
print("""
c6_set = """ + compact_set(table) + """
def in_table_c6(code):
return ord(code) in c6_set
""")
# C.7 Inappropriate for canonical representation
name, table = tables[0]
del tables[0]
assert name == "C.7"
table = sorted(table.keys())
print("""
c7_set = """ + compact_set(table) + """
def in_table_c7(code):
return ord(code) in c7_set
""")
# C.8 Change display properties or are deprecated
name, table = tables[0]
del tables[0]
assert name == "C.8"
table = sorted(table.keys())
print("""
c8_set = """ + compact_set(table) + """
def in_table_c8(code):
return ord(code) in c8_set
""")
# C.9 Tagging characters
name, table = tables[0]
del tables[0]
assert name == "C.9"
table = sorted(table.keys())
print("""
c9_set = """ + compact_set(table) + """
def in_table_c9(code):
return ord(code) in c9_set
""")
# D.1 Characters with bidirectional property "R" or "AL"
name, table = tables[0]
del tables[0]
assert name == "D.1"
RandAL = set(gen_bidirectional(["R","AL"]))
assert set(table.keys()) == RandAL
print("""
def in_table_d1(code):
return unicodedata.bidirectional(code) in ("R","AL")
""")
# D.2 Characters with bidirectional property "L"
name, table = tables[0]
del tables[0]
assert name == "D.2"
L = set(gen_bidirectional(["L"]))
assert set(table.keys()) == L
print("""
def in_table_d2(code):
return unicodedata.bidirectional(code) == "L"
""")
| apache-2.0 | 6,488,502,835,338,982,000 | 22.637647 | 85 | 0.594167 | false |
Daniex/horizon | openstack_dashboard/dashboards/project/data_processing/jobs/tabs.py | 38 | 1172 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import tabs
from openstack_dashboard.api import sahara as saharaclient
LOG = logging.getLogger(__name__)
class GeneralTab(tabs.Tab):
name = _("General Info")
slug = "job_details_tab"
template_name = ("project/data_processing.jobs/_details.html")
def get_context_data(self, request):
job_id = self.tab_group.kwargs['job_id']
job = saharaclient.job_get(request, job_id)
return {"job": job}
class JobDetailsTabs(tabs.TabGroup):
slug = "job_details"
tabs = (GeneralTab,)
sticky = True
| apache-2.0 | 12,117,635,423,639,100 | 29.051282 | 69 | 0.71843 | false |
wd5/jangr | djangoappengine/tests/not_return_sets.py | 36 | 4303 | import datetime
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.test import TestCase
from .testmodels import FieldsWithOptionsModel, OrderedModel, \
SelfReferenceModel
class NonReturnSetsTest(TestCase):
floats = [5.3, 2.6, 9.1, 1.58, 2.4]
emails = ['[email protected]', '[email protected]',
'[email protected]', '[email protected]', '[email protected]']
def setUp(self):
for index, (float, email) in enumerate(zip(NonReturnSetsTest.floats,
NonReturnSetsTest.emails)):
self.last_save_time = datetime.datetime.now().time()
ordered_instance = OrderedModel(priority=index, pk=index + 1)
ordered_instance.save()
model = FieldsWithOptionsModel(floating_point=float,
integer=int(float), email=email,
time=self.last_save_time,
foreign_key=ordered_instance)
model.save()
def test_get(self):
self.assertEquals(
FieldsWithOptionsModel.objects.get(
email='[email protected]').email,
'[email protected]')
# Test exception when matching multiple entities.
self.assertRaises(MultipleObjectsReturned,
FieldsWithOptionsModel.objects.get,
integer=2)
# Test exception when entity does not exist.
self.assertRaises(ObjectDoesNotExist,
FieldsWithOptionsModel.objects.get,
floating_point=5.2)
# TODO: Test create when djangos model.save_base is refactored.
# TODO: Test get_or_create when refactored.
def test_count(self):
self.assertEquals(
FieldsWithOptionsModel.objects.filter(integer=2).count(), 2)
def test_in_bulk(self):
self.assertEquals(
[key in ['[email protected]', '[email protected]']
for key in FieldsWithOptionsModel.objects.in_bulk(
['[email protected]', '[email protected]']).keys()],
[True, ] * 2)
def test_latest(self):
self.assertEquals(
FieldsWithOptionsModel.objects.latest('time').email,
'[email protected]')
def test_exists(self):
self.assertEquals(FieldsWithOptionsModel.objects.exists(), True)
def test_deletion(self):
# TODO: ForeignKeys will not be deleted! This has to be done
# via background tasks.
self.assertEquals(FieldsWithOptionsModel.objects.count(), 5)
FieldsWithOptionsModel.objects.get(email='[email protected]').delete()
self.assertEquals(FieldsWithOptionsModel.objects.count(), 4)
FieldsWithOptionsModel.objects.filter(email__in=[
'[email protected]', '[email protected]',
'[email protected]', ]).delete()
self.assertEquals(FieldsWithOptionsModel.objects.count(), 2)
def test_selfref_deletion(self):
entity = SelfReferenceModel()
entity.save()
entity.delete()
def test_foreign_key_fetch(self):
# Test fetching the ForeignKey.
ordered_instance = OrderedModel.objects.get(priority=2)
self.assertEquals(
FieldsWithOptionsModel.objects.get(integer=9).foreign_key,
ordered_instance)
def test_foreign_key_backward(self):
entity = OrderedModel.objects.all()[0]
self.assertEquals(entity.keys.count(), 1)
# TODO: Add should save the added instance transactional via for
# example force_insert.
new_foreign_key = FieldsWithOptionsModel(
floating_point=5.6, integer=3,
email='[email protected]', time=datetime.datetime.now())
entity.keys.add(new_foreign_key)
self.assertEquals(entity.keys.count(), 2)
# TODO: Add test for create.
entity.keys.remove(new_foreign_key)
self.assertEquals(entity.keys.count(), 1)
entity.keys.clear()
self.assertTrue(not entity.keys.exists())
entity.keys = [new_foreign_key, new_foreign_key]
self.assertEquals(entity.keys.count(), 1)
self.assertEquals(entity.keys.all()[0].integer, 3)
| bsd-3-clause | -755,480,898,497,230,200 | 39.214953 | 78 | 0.616314 | false |
GREO/GNU-Radio | gnuradio-examples/python/digital_voice/encdec.py | 10 | 2029 | #!/usr/bin/env python
#
# Copyright 2005 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
from gnuradio import audio
from gnuradio.eng_option import eng_option
from optparse import OptionParser
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
parser = OptionParser(option_class=eng_option)
parser.add_option("-I", "--audio-input", type="string", default="",
help="pcm input device name. E.g., hw:0,0 or /dev/dsp")
parser.add_option("-O", "--audio-output", type="string", default="",
help="pcm output device name. E.g., hw:0,0 or /dev/dsp")
(options, args) = parser.parse_args ()
if len(args) != 0:
parser.print_help()
raise SystemExit, 1
sample_rate = 8000
src = audio.source(sample_rate, options.audio_input)
tx = blks2.digital_voice_tx(self)
if_gain = gr.multiply_const_cc(10000)
# channel simulator here...
rx = blks2.digital_voice_rx(self)
dst = audio.sink(sample_rate, options.audio_output)
self.connect(src, tx, if_gain, rx, dst)
if __name__ == '__main__':
try:
my_top_block().run()
except KeyboardInterrupt:
pass
| gpl-3.0 | 9,143,325,763,615,766,000 | 33.982759 | 83 | 0.646624 | false |
yakky/django | tests/check_framework/test_caches.py | 249 | 1114 | from django.core.checks.caches import E001
from django.test import SimpleTestCase
from django.test.utils import override_settings
class CheckCacheSettingsAppDirsTest(SimpleTestCase):
VALID_CACHES_CONFIGURATION = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
}
INVALID_CACHES_CONFIGURATION = {
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
}
@property
def func(self):
from django.core.checks.caches import check_default_cache_is_configured
return check_default_cache_is_configured
@override_settings(CACHES=VALID_CACHES_CONFIGURATION)
def test_default_cache_included(self):
"""
Don't error if 'default' is present in CACHES setting.
"""
self.assertEqual(self.func(None), [])
@override_settings(CACHES=INVALID_CACHES_CONFIGURATION)
def test_default_cache_not_included(self):
"""
Error if 'default' not present in CACHES setting.
"""
self.assertEqual(self.func(None), [E001])
| bsd-3-clause | -8,606,499,409,156,496,000 | 30.828571 | 79 | 0.653501 | false |
westinedu/newertrends | django/conf/locale/pt_BR/formats.py | 231 | 1530 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \de N \de Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'j \de N \de Y à\s H:i'
YEAR_MONTH_FORMAT = r'F \de Y'
MONTH_DAY_FORMAT = r'j \de F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d/%m/%Y', '%d/%m/%y', # '2006-10-25', '25/10/2006', '25/10/06'
# '%d de %b de %Y', '%d de %b, %Y', # '25 de Out de 2006', '25 Out, 2006'
# '%d de %B de %Y', '%d de %B, %Y', # '25 de Outubro de 2006', '25 de Outubro, 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause | 951,488,050,521,827,200 | 37.225 | 90 | 0.527796 | false |
TixLo/kademlia | lib/jsoncpp/devtools/antglob.py | 3 | 7695 | #!/usr/bin/env python
# encoding: utf-8
# Baptiste Lepilleur, 2009
from __future__ import print_function
from dircache import listdir
import re
import fnmatch
import os.path
# These fnmatch expressions are used by default to prune the directory tree
# while doing the recursive traversal in the glob_impl method of glob function.
prune_dirs = '.git .bzr .hg .svn _MTN _darcs CVS SCCS '
# These fnmatch expressions are used by default to exclude files and dirs
# while doing the recursive traversal in the glob_impl method of glob function.
##exclude_pats = prune_pats + '*~ #*# .#* %*% ._* .gitignore .cvsignore vssver.scc .DS_Store'.split()
# These ant_glob expressions are used by default to exclude files and dirs and also prune the directory tree
# while doing the recursive traversal in the glob_impl method of glob function.
default_excludes = '''
**/*~
**/#*#
**/.#*
**/%*%
**/._*
**/CVS
**/CVS/**
**/.cvsignore
**/SCCS
**/SCCS/**
**/vssver.scc
**/.svn
**/.svn/**
**/.git
**/.git/**
**/.gitignore
**/.bzr
**/.bzr/**
**/.hg
**/.hg/**
**/_MTN
**/_MTN/**
**/_darcs
**/_darcs/**
**/.DS_Store '''
DIR = 1
FILE = 2
DIR_LINK = 4
FILE_LINK = 8
LINKS = DIR_LINK | FILE_LINK
ALL_NO_LINK = DIR | FILE
ALL = DIR | FILE | LINKS
_ANT_RE = re.compile(r'(/\*\*/)|(\*\*/)|(/\*\*)|(\*)|(/)|([^\*/]*)')
def ant_pattern_to_re(ant_pattern):
"""Generates a regular expression from the ant pattern.
Matching convention:
**/a: match 'a', 'dir/a', 'dir1/dir2/a'
a/**/b: match 'a/b', 'a/c/b', 'a/d/c/b'
*.py: match 'script.py' but not 'a/script.py'
"""
rex = ['^']
next_pos = 0
sep_rex = r'(?:/|%s)' % re.escape(os.path.sep)
## print 'Converting', ant_pattern
for match in _ANT_RE.finditer(ant_pattern):
## print 'Matched', match.group()
## print match.start(0), next_pos
if match.start(0) != next_pos:
raise ValueError("Invalid ant pattern")
if match.group(1): # /**/
rex.append(sep_rex + '(?:.*%s)?' % sep_rex)
elif match.group(2): # **/
rex.append('(?:.*%s)?' % sep_rex)
elif match.group(3): # /**
rex.append(sep_rex + '.*')
elif match.group(4): # *
rex.append('[^/%s]*' % re.escape(os.path.sep))
elif match.group(5): # /
rex.append(sep_rex)
else: # somepath
rex.append(re.escape(match.group(6)))
next_pos = match.end()
rex.append('$')
return re.compile(''.join(rex))
def _as_list(l):
if isinstance(l, basestring):
return l.split()
return l
def glob(dir_path,
includes = '**/*',
excludes = default_excludes,
entry_type = FILE,
prune_dirs = prune_dirs,
max_depth = 25):
include_filter = [ant_pattern_to_re(p) for p in _as_list(includes)]
exclude_filter = [ant_pattern_to_re(p) for p in _as_list(excludes)]
prune_dirs = [p.replace('/',os.path.sep) for p in _as_list(prune_dirs)]
dir_path = dir_path.replace('/',os.path.sep)
entry_type_filter = entry_type
def is_pruned_dir(dir_name):
for pattern in prune_dirs:
if fnmatch.fnmatch(dir_name, pattern):
return True
return False
def apply_filter(full_path, filter_rexs):
"""Return True if at least one of the filter regular expression match full_path."""
for rex in filter_rexs:
if rex.match(full_path):
return True
return False
def glob_impl(root_dir_path):
child_dirs = [root_dir_path]
while child_dirs:
dir_path = child_dirs.pop()
for entry in listdir(dir_path):
full_path = os.path.join(dir_path, entry)
## print 'Testing:', full_path,
is_dir = os.path.isdir(full_path)
if is_dir and not is_pruned_dir(entry): # explore child directory ?
## print '===> marked for recursion',
child_dirs.append(full_path)
included = apply_filter(full_path, include_filter)
rejected = apply_filter(full_path, exclude_filter)
if not included or rejected: # do not include entry ?
## print '=> not included or rejected'
continue
link = os.path.islink(full_path)
is_file = os.path.isfile(full_path)
if not is_file and not is_dir:
## print '=> unknown entry type'
continue
if link:
entry_type = is_file and FILE_LINK or DIR_LINK
else:
entry_type = is_file and FILE or DIR
## print '=> type: %d' % entry_type,
if (entry_type & entry_type_filter) != 0:
## print ' => KEEP'
yield os.path.join(dir_path, entry)
## else:
## print ' => TYPE REJECTED'
return list(glob_impl(dir_path))
if __name__ == "__main__":
import unittest
class AntPatternToRETest(unittest.TestCase):
## def test_conversion(self):
## self.assertEqual('^somepath$', ant_pattern_to_re('somepath').pattern)
def test_matching(self):
test_cases = [ ('path',
['path'],
['somepath', 'pathsuffix', '/path', '/path']),
('*.py',
['source.py', 'source.ext.py', '.py'],
['path/source.py', '/.py', 'dir.py/z', 'z.pyc', 'z.c']),
('**/path',
['path', '/path', '/a/path', 'c:/a/path', '/a/b/path', '//a/path', '/a/path/b/path'],
['path/', 'a/path/b', 'dir.py/z', 'somepath', 'pathsuffix', 'a/somepath']),
('path/**',
['path/a', 'path/path/a', 'path//'],
['path', 'somepath/a', 'a/path', 'a/path/a', 'pathsuffix/a']),
('/**/path',
['/path', '/a/path', '/a/b/path/path', '/path/path'],
['path', 'path/', 'a/path', '/pathsuffix', '/somepath']),
('a/b',
['a/b'],
['somea/b', 'a/bsuffix', 'a/b/c']),
('**/*.py',
['script.py', 'src/script.py', 'a/b/script.py', '/a/b/script.py'],
['script.pyc', 'script.pyo', 'a.py/b']),
('src/**/*.py',
['src/a.py', 'src/dir/a.py'],
['a/src/a.py', '/src/a.py']),
]
for ant_pattern, accepted_matches, rejected_matches in list(test_cases):
def local_path(paths):
return [ p.replace('/',os.path.sep) for p in paths ]
test_cases.append((ant_pattern, local_path(accepted_matches), local_path(rejected_matches)))
for ant_pattern, accepted_matches, rejected_matches in test_cases:
rex = ant_pattern_to_re(ant_pattern)
print('ant_pattern:', ant_pattern, ' => ', rex.pattern)
for accepted_match in accepted_matches:
print('Accepted?:', accepted_match)
self.assertTrue(rex.match(accepted_match) is not None)
for rejected_match in rejected_matches:
print('Rejected?:', rejected_match)
self.assertTrue(rex.match(rejected_match) is None)
unittest.main()
| gpl-3.0 | 2,982,295,225,310,341,600 | 37.094059 | 114 | 0.495257 | false |
crr0004/taiga-back | taiga/users/services.py | 2 | 3912 | # Copyright (C) 2014 Andrey Antukh <[email protected]>
# Copyright (C) 2014 Jesús Espino <[email protected]>
# Copyright (C) 2014 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This model contains a domain logic for users application.
"""
from django.apps import apps
from django.db.models import Q
from django.conf import settings
from django.utils.translation import ugettext as _
from easy_thumbnails.files import get_thumbnailer
from easy_thumbnails.exceptions import InvalidImageFormatError
from taiga.base import exceptions as exc
from taiga.base.utils.urls import get_absolute_url
from .gravatar import get_gravatar_url
def get_and_validate_user(*, username:str, password:str) -> bool:
"""
Check if user with username/email exists and specified
password matchs well with existing user password.
if user is valid, user is returned else, corresponding
exception is raised.
"""
user_model = apps.get_model("users", "User")
qs = user_model.objects.filter(Q(username=username) |
Q(email=username))
if len(qs) == 0:
raise exc.WrongArguments(_("Username or password does not matches user."))
user = qs[0]
if not user.check_password(password):
raise exc.WrongArguments(_("Username or password does not matches user."))
return user
def get_photo_url(photo):
"""Get a photo absolute url and the photo automatically cropped."""
try:
url = get_thumbnailer(photo)['avatar'].url
return get_absolute_url(url)
except InvalidImageFormatError as e:
return None
def get_photo_or_gravatar_url(user):
"""Get the user's photo/gravatar url."""
if user:
return get_photo_url(user.photo) if user.photo else get_gravatar_url(user.email)
return ""
def get_big_photo_url(photo):
"""Get a big photo absolute url and the photo automatically cropped."""
try:
url = get_thumbnailer(photo)['big-avatar'].url
return get_absolute_url(url)
except InvalidImageFormatError as e:
return None
def get_big_photo_or_gravatar_url(user):
"""Get the user's big photo/gravatar url."""
if not user:
return ""
if user.photo:
return get_big_photo_url(user.photo)
else:
return get_gravatar_url(user.email, size=settings.DEFAULT_BIG_AVATAR_SIZE)
def get_stats_for_user(user):
"""Get the user stats"""
project_ids = user.memberships.values_list("project__id", flat=True).distinct()
total_num_projects = project_ids.count()
roles = [_(r) for r in user.memberships.values_list("role__name", flat=True)]
roles = list(set(roles))
User = apps.get_model('users', 'User')
total_num_contacts = User.objects.filter(memberships__project__id__in=project_ids)\
.exclude(id=user.id)\
.distinct()\
.count()
UserStory = apps.get_model('userstories', 'UserStory')
total_num_closed_userstories = UserStory.objects.filter(is_closed=True, assigned_to=user).count()
project_stats = {
'total_num_projects': total_num_projects,
'roles': roles,
'total_num_contacts': total_num_contacts,
'total_num_closed_userstories': total_num_closed_userstories,
}
return project_stats
| agpl-3.0 | 8,420,372,464,591,207,000 | 33.298246 | 101 | 0.690793 | false |
huihoo/reader | vendor/appdotnet.py | 19 | 9191 | import json
import requests
# To add
# - Identity Delegation
# - Streams (in dev by app.net)
# - Filters (in dev by app.net)
class Appdotnet:
''' Once access has been given, you don't have to pass through the
client_id, client_secret, redirect_uri, or scope. These are just
to get the authentication token.
Once authenticated, you can initialise appdotnet with only the
access token: ie
api = Appdotnet(access_token='<insert token here>')
'''
def __init__(self, client_id=None, client_secret=None, redirect_uri=None,
scope=None, access_token=None):
#for server authentication flow
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.scope = scope
self.access_token = access_token
self.api_anchor = "alpha.app.net" #for when the versions change
#anchors currently different
self.public_api_anchor = "alpha-api.app.net"
#scopes provided by app.net API
self.allowed_scopes = ['stream', 'email', 'write_post',
'follow', 'messages','export']
def generateAuthUrl(self):
url = "https://" + self.api_anchor + "/oauth/authenticate?client_id="+\
self.client_id + "&response_type=code&adnview=appstore&redirect_uri=" +\
self.redirect_uri + "&scope="
for scope in self.scope:
if scope in self.allowed_scopes:
url += scope + " "
return url
def getAuthResponse(self, code):
#generate POST request
url = "https://alpha.app.net/oauth/access_token"
post_data = {'client_id':self.client_id,
'client_secret':self.client_secret,
'grant_type':'authorization_code',
'redirect_uri':self.redirect_uri,
'code':code}
r = requests.post(url,data=post_data)
return r.text
'''
API Calls
'''
#GET REQUESTS
def getRequest(self, url, getParameters=None):
if not getParameters:
getParameters = {}
#access token
url = url + "?access_token=" + self.access_token
#if there are any extra get parameters aside from the access_token, append to the url
if getParameters != {}:
for key, value in getParameters.iteritems():
if not value: continue
url = url + "&" + key + "=" + unicode(value)
print url
r = requests.get(url)
if r.status_code == requests.codes.ok:
return r.text
else:
j = json.loads(r.text)
resp = {'error_code': r.status_code,
'message' : j['error']['message']}
return json.dumps(resp)
def getUser(self, user_id):
url = "https://%s/stream/0/users/%s" % (self.public_api_anchor,
user_id)
return self.getRequest(url)
def getUserPosts(self, user_id):
url = "https://%s/stream/0/users/%s/posts" % (self.public_api_anchor,
user_id)
return self.getRequest(url)
def getUserStars(self, user_id):
url = "https://%s/stream/0/users/%s/stars" % (self.public_api_anchor,
user_id)
return self.getRequest(url)
def getGlobalStream(self):
url = "https://%s/stream/0/posts/stream/global" % self.public_api_anchor
return self.getRequest(url)
def getUserStream(self):
url = "https://%s/stream/0/posts/stream" % self.public_api_anchor
return self.getRequest(url)
def getUserMentions(self, user_id):
url = "https://%s/stream/0/users/%s/mentions" % (self.public_api_anchor,user_id)
return self.getRequest(url)
def getPost(self, post_id):
url = "https://%s/stream/0/posts/%s" % (self.public_api_anchor,post_id)
return self.getRequest(url)
def getReposters(self, post_id):
url ="https://%s/stream/0/posts/%s/reposters" % (self.public_api_anchor,post_id)
return self.getRequest(url)
def getStars(self, post_id):
url ="https://%s/stream/0/posts/%s/stars" % (self.public_api_anchor,post_id)
return self.getRequest(url)
def getPostReplies(self, post_id):
url = "https://%s/stream/0/posts/%s/replies" % (self.public_api_anchor,post_id)
return self.getRequest(url)
def getPostsByTag(self, tag):
url = "https://%s/stream/0/posts/tag/%s" % (self.public_api_anchor, tag)
return self.getRequest(url)
def getUserFollowing(self, user_id, since_id=None, before_id=None):
url = "https://%s/stream/0/users/%s/following" % (self.public_api_anchor, user_id)
return self.getRequest(url, getParameters={
'since_id': since_id,
'before_id': before_id,
})
def getUserFollowingIds(self, user_id, since_id=None, before_id=None):
url = "https://%s/stream/0/users/%s/following/ids" % (self.public_api_anchor, user_id)
return self.getRequest(url, getParameters={
'since_id': since_id,
'before_id': before_id,
})
def getUserFollowers(self, user_id):
url = "https://%s/stream/0/users/%s/followers" % (self.public_api_anchor, user_id)
return self.getRequest(url)
def getMutedUsers(self):
url = "https://%s/stream/0/users/me/muted" % self.public_api_anchor
return self.getRequest(url)
def searchUsers(self,q):
url = "https://%s/stream/0/users/search" % (self.public_api_anchor)
return self.getRequest(url,getParameters={'q':q})
def getCurrentToken(self):
url = "https://%s/stream/0/token" % self.public_api_anchor
return self.getRequest(url)
#POST REQUESTS
def postRequest(self, url, data=None, headers=None):
if not data:
data = {}
if not headers:
headers = {}
headers['Authorization'] = 'Bearer %s' % self.access_token
url = url
r = requests.post(url,data=json.dumps(data),headers=headers)
if r.status_code == requests.codes.ok:
return r.text
else:
try:
j = json.loads(r.text)
resp = {'error_code': r.status_code,
'message' : j['error']['message']}
return resp
except: #generic error
print r.text
return "{'error':'There was an error'}"
def followUser(self,user_id):
url = "https://%s/stream/0/users/%s/follow" % (self.public_api_anchor, user_id)
return self.postRequest(url)
def repostPost(self,post_id):
url = "https://%s/stream/0/posts/%s/repost" % (self.public_api_anchor, post_id)
return self.postRequest(url)
def starPost(self,post_id):
url = "https://%s/stream/0/posts/%s/star" % (self.public_api_anchor, post_id)
return self.postRequest(url)
def muteUser(self,user_id):
url = "https://%s/stream/0/users/%s/mute" % (self.public_api_anchor, user_id)
return self.postRequest(url)
#requires: text
#optional: reply_to, annotations, links
def createPost(self, text, reply_to = None, annotations=None, links=None):
url = "https://%s/stream/0/posts" % self.public_api_anchor
if annotations != None:
url = url + "?include_annotations=1"
data = {'text':text}
if reply_to != None:
data['reply_to'] = reply_to
if annotations != None:
data['annotations'] = annotations
if links != None:
data['links'] = links
return self.postRequest(url,data,headers={'content-type':'application/json'})
#DELETE request
def deleteRequest(self, url):
url = url + "?access_token=" + self.access_token
r = requests.delete(url)
if r.status_code == requests.codes.ok:
return r.text
else:
try:
j = json.loads(r.text)
resp = {'error_code': r.status_code,
'message' : j['error']['message']}
return resp
except: #generic error
print r.text
return "{'error':'There was an error'}"
def deletePost(self, post_id):
url = "https://%s/stream/0/posts/%s" % (self.public_api_anchor,post_id)
return self.deleteRequest(url)
def unrepostPost(self, post_id):
url = "https://%s/stream/0/posts/%s/repost" % (self.public_api_anchor,post_id)
return self.deleteRequest(url)
def unstarPost(self, post_id):
url = "https://%s/stream/0/posts/%s/star" % (self.public_api_anchor,post_id)
return self.deleteRequest(url)
def unfollowUser(self, user_id):
url = "https://%s/stream/0/users/%s/follow" % (self.public_api_anchor,user_id)
return self.deleteRequest(url)
def unmuteUser(self, user_id):
url = "https://%s/stream/0/users/%s/mute" % (self.public_api_anchor,user_id)
return self.deleteRequest(url)
| mit | 6,203,805,073,119,702,000 | 34.762646 | 94 | 0.573278 | false |
fga-verival/2017-1Grupo2 | backend/game/tests/acceptance/test_13.py | 1 | 1980 | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
class TC13(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://localhost:8000"
self.verificationErrors = []
self.accept_next_alert = True
def test_t_c13(self):
driver = self.driver
driver.get(self.base_url + "/admin/login/?next=/admin/")
driver.find_element_by_id("id_password").clear()
driver.find_element_by_id("id_password").send_keys("qwer1234")
driver.find_element_by_id("id_password").clear()
driver.find_element_by_id("id_password").send_keys("qwer1234")
driver.find_element_by_css_selector("input.btn.btn-info").click()
driver.find_element_by_css_selector("input.btn.btn-info").click()
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException as e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException as e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | 7,939,839,606,976,226,000 | 35.666667 | 73 | 0.640404 | false |
giggsey/SickRage | lib/github/Permissions.py | 74 | 3027 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
class Permissions(github.GithubObject.NonCompletableGithubObject):
"""
This class represents Permissionss as returned for example by http://developer.github.com/v3/todo
"""
@property
def admin(self):
"""
:type: bool
"""
return self._admin.value
@property
def pull(self):
"""
:type: bool
"""
return self._pull.value
@property
def push(self):
"""
:type: bool
"""
return self._push.value
def _initAttributes(self):
self._admin = github.GithubObject.NotSet
self._pull = github.GithubObject.NotSet
self._push = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "admin" in attributes: # pragma no branch
self._admin = self._makeBoolAttribute(attributes["admin"])
if "pull" in attributes: # pragma no branch
self._pull = self._makeBoolAttribute(attributes["pull"])
if "push" in attributes: # pragma no branch
self._push = self._makeBoolAttribute(attributes["push"])
| gpl-3.0 | -2,401,500,693,479,580,000 | 44.179104 | 101 | 0.471093 | false |
Sh4kE/ofm_helper | core/tests/unit/views/ofm_views/test_ofm_finances_view.py | 2 | 4895 | import json
from django.core.urlresolvers import reverse
from django.test import TestCase
from core.factories.core_factories import MatchdayFactory, FinanceFactory
from users.models import OFMUser
class OFMFinancesViewTestCase(TestCase):
def setUp(self):
self.matchday = MatchdayFactory.create()
self.next_matchday = MatchdayFactory.create(number=1)
self.user1 = OFMUser.objects.create_user(
username='alice',
email='[email protected]',
password='alice',
ofm_username='alice',
ofm_password='alice'
)
self.finances = FinanceFactory.create(user=self.user1, matchday=self.matchday)
self.next_finances = FinanceFactory.create(
user=self.user1,
matchday=self.next_matchday,
balance=2000,
income_visitors_league=200,
expenses_player_salaries=200
)
self.client.login(username='alice', password='alice')
def test_user_can_see_his_finances(self):
response = self.client.get(reverse('core:ofm:finance_overview'))
self.assertEqual(response.status_code, 200)
self.assertTrue('matchdays' in response.context_data)
def test_user_can_choose_between_matchdays(self):
response = self.client.get(reverse('core:ofm:finance_overview'))
self.assertEqual(response.status_code, 200)
self.assertEqual(self.next_matchday, response.context_data['matchdays'][0])
self.assertEqual(self.matchday, response.context_data['matchdays'][1])
def test_user_can_see_his_latest_finances_when_given_no_matchday(self):
response = self.client.get(reverse('core:ofm:finances_json'))
self.assertEqual(response.status_code, 200)
returned_json_data = json.loads(response.content.decode('utf-8'))
self.assertEqual(len(returned_json_data), 1)
self.assertEqual(returned_json_data[0]['account_balance'], 2000)
self.assertEqual(returned_json_data[0]['income_visitors_league'], 200)
self.assertEqual(returned_json_data[0]['expenses_player_salaries'], -200)
def test_user_can_see_his_finances_diff_when_given_both_matchdays(self):
third_matchday = MatchdayFactory.create(number=self.matchday.number + 2)
FinanceFactory.create(
user=self.user1,
matchday=third_matchday,
balance=2500,
income_visitors_league=250,
income_sponsoring=250,
expenses_player_salaries=250,
expenses_youth=100
)
response = self.client.get(reverse('core:ofm:finances_json'),
{'newer_matchday_season': third_matchday.season.number,
'newer_matchday': third_matchday.number,
'older_matchday_season': self.matchday.season.number,
'older_matchday': self.matchday.number
})
self.assertEqual(response.status_code, 200)
returned_json_data = json.loads(response.content.decode('utf-8'))
self.assertEqual(len(returned_json_data), 1)
self.assertEqual(returned_json_data[0]['account_balance'], 2500)
self.assertEqual(returned_json_data[0]['balance'], 150)
self.assertEqual(returned_json_data[0]['sum_income'], 400)
self.assertEqual(returned_json_data[0]['sum_expenses'], -250)
self.assertEqual(returned_json_data[0]['income_visitors_league'], 150)
self.assertEqual(returned_json_data[0]['expenses_player_salaries'], -150)
def test_user_can_see_his_finances_diff_when_given_only_newer_matchday(self):
third_matchday = MatchdayFactory.create(number=self.matchday.number + 2)
FinanceFactory.create(
user=self.user1,
matchday=third_matchday,
balance=2500,
income_visitors_league=250,
expenses_player_salaries=250
)
response = self.client.get(reverse('core:ofm:finances_json'),
{'newer_matchday_season': third_matchday.season.number,
'newer_matchday': third_matchday.number
})
self.assertEqual(response.status_code, 200)
returned_json_data = json.loads(response.content.decode('utf-8'))
self.assertEqual(len(returned_json_data), 1)
self.assertEqual(returned_json_data[0]['account_balance'], 2500)
self.assertEqual(returned_json_data[0]['balance'], 0)
self.assertEqual(returned_json_data[0]['sum_income'], 250)
self.assertEqual(returned_json_data[0]['sum_expenses'], -250)
self.assertEqual(returned_json_data[0]['income_visitors_league'], 250)
self.assertEqual(returned_json_data[0]['expenses_player_salaries'], -250)
| agpl-3.0 | 5,398,088,682,602,229,000 | 45.619048 | 90 | 0.630031 | false |
whip112/Whip112 | vendor/packages/lockfile/symlinklockfile.py | 487 | 2613 | from __future__ import absolute_import
import time
import os
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class SymlinkLockFile(LockBase):
"""Lock access to a file using symlink(2)."""
def __init__(self, path, threaded=True, timeout=None):
# super(SymlinkLockFile).__init(...)
LockBase.__init__(self, path, threaded, timeout)
# split it back!
self.unique_name = os.path.split(self.unique_name)[1]
def acquire(self, timeout=None):
# Hopefully unnecessary for symlink.
#try:
# open(self.unique_name, "wb").close()
#except IOError:
# raise LockFailed("failed to create %s" % self.unique_name)
timeout = timeout is not None and timeout or self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
# Try and create a symbolic link to it.
try:
os.symlink(self.unique_name, self.lock_file)
except OSError:
# Link creation failed. Maybe we've double-locked?
if self.i_am_locking():
# Linked to out unique name. Proceed.
return
else:
# Otherwise the lock creation failed.
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout/10 if timeout is not None else 0.1)
else:
# Link creation succeeded. We're good to go.
return
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
elif not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me" % self.path)
os.unlink(self.lock_file)
def is_locked(self):
return os.path.islink(self.lock_file)
def i_am_locking(self):
return os.path.islink(self.lock_file) and \
os.readlink(self.lock_file) == self.unique_name
def break_lock(self):
if os.path.islink(self.lock_file): # exists && link
os.unlink(self.lock_file)
| mpl-2.0 | 3,509,958,178,352,571,000 | 36.869565 | 74 | 0.52124 | false |
cedk/odoo | addons/website_sale_options/controllers/main.py | 236 | 3610 | # -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website_sale.controllers.main import website_sale
class website_sale_options(website_sale):
@http.route(['/shop/product/<model("product.template"):product>'], type='http', auth="public", website=True)
def product(self, product, category='', search='', **kwargs):
r = super(website_sale_options, self).product(product, category, search, **kwargs)
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
template_obj = pool['product.template']
optional_product_ids = []
for p in product.optional_product_ids:
ctx = dict(context, active_id=p.id)
optional_product_ids.append(template_obj.browse(cr, uid, p.id, context=ctx))
r.qcontext['optional_product_ids'] = optional_product_ids
return r
@http.route(['/shop/cart/update_option'], type='http', auth="public", methods=['POST'], website=True, multilang=False)
def cart_options_update_json(self, product_id, add_qty=1, set_qty=0, goto_shop=None, lang=None, **kw):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
if lang:
context = dict(context, lang=lang)
request.website = request.website.with_context(context)
order = request.website.sale_get_order(force_create=1)
product = pool['product.product'].browse(cr, uid, int(product_id), context=context)
option_ids = [p.id for tmpl in product.optional_product_ids for p in tmpl.product_variant_ids]
optional_product_ids = []
for k, v in kw.items():
if "optional-product-" in k and int(kw.get(k.replace("product", "add"))) and int(v) in option_ids:
optional_product_ids.append(int(v))
value = {}
if add_qty or set_qty:
value = order._cart_update(product_id=int(product_id),
add_qty=int(add_qty), set_qty=int(set_qty),
optional_product_ids=optional_product_ids)
# options have all time the same quantity
for option_id in optional_product_ids:
order._cart_update(product_id=option_id,
set_qty=value.get('quantity'),
linked_line_id=value.get('line_id'))
return str(order.cart_quantity)
@http.route(['/shop/modal'], type='json', auth="public", methods=['POST'], website=True)
def modal(self, product_id, **kw):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
pricelist = self.get_pricelist()
if not context.get('pricelist'):
context['pricelist'] = int(pricelist)
website_context = kw.get('kwargs', {}).get('context', {})
context = dict(context or {}, **website_context)
from_currency = pool.get('product.price.type')._get_field_currency(cr, uid, 'list_price', context)
to_currency = pricelist.currency_id
compute_currency = lambda price: pool['res.currency']._compute(cr, uid, from_currency, to_currency, price, context=context)
product = pool['product.product'].browse(cr, uid, int(product_id), context=context)
request.website = request.website.with_context(context)
return request.website._render("website_sale_options.modal", {
'product': product,
'compute_currency': compute_currency,
'get_attribute_value_ids': self.get_attribute_value_ids,
})
| agpl-3.0 | -8,846,457,390,886,843,000 | 47.133333 | 131 | 0.634072 | false |
andreimacavei/coala | coalib/tests/bearlib/abstractions/SectionCreatableTest.py | 2 | 2504 | import sys
sys.path.insert(0, ".")
import unittest
from coalib.bearlib.abstractions.SectionCreatable import SectionCreatable
from coalib.settings.Section import Section, Setting
class TestObject(SectionCreatable):
def __init__(self,
setting_one: int,
raw_setting,
setting_two: bool=False,
setting_three: list=[1, 2],
opt_raw_set=5):
SectionCreatable.__init__(self)
assert isinstance(setting_one, int)
assert isinstance(raw_setting, Setting)
assert isinstance(setting_two, bool)
assert isinstance(setting_three, list)
assert isinstance(opt_raw_set, Setting) or isinstance(opt_raw_set, int)
self.setting_one = setting_one
self.raw_setting = raw_setting
self.setting_two = setting_two
self.setting_three = setting_three
self.opt_raw_set = opt_raw_set
class SectionCreatableTest(unittest.TestCase):
def test_api(self):
uut = SectionCreatable()
self.assertRaises(TypeError, uut.from_section, 5)
self.assertEqual(uut.get_non_optional_settings(), {})
self.assertEqual(uut.get_optional_settings(), {})
def test_needed_settings(self):
self.assertEqual(sorted(list(TestObject.get_non_optional_settings())),
sorted(["setting_one", "raw_setting"]))
self.assertEqual(
sorted(list(TestObject.get_optional_settings())),
sorted(["setting_two", "setting_three", "opt_raw_set"]))
def test_from_section(self):
section = Section("name")
section.append(Setting("setting_one", " 5"))
section.append(Setting("raw_setting", " 5s"))
uut = TestObject.from_section(section)
self.assertEqual(uut.setting_one, 5)
self.assertEqual(str(uut.raw_setting), "5s")
self.assertEqual(uut.setting_two, False)
self.assertEqual(uut.setting_three, [1, 2])
self.assertEqual(str(uut.opt_raw_set), "5")
section.append(Setting("setting_three", "2, 4"))
section.append(Setting("opt_raw_set", "tst ,"))
uut = TestObject.from_section(section)
self.assertEqual(uut.setting_one, 5)
self.assertEqual(str(uut.raw_setting), "5s")
self.assertEqual(uut.setting_two, False)
self.assertEqual(uut.setting_three, ["2", "4"])
self.assertEqual(str(uut.opt_raw_set), "tst ,")
if __name__ == '__main__':
unittest.main(verbosity=2)
| agpl-3.0 | 7,747,048,642,713,402,000 | 36.939394 | 79 | 0.622204 | false |
kingsamchen/Eureka | crack-data-structures-and-algorithms/leetcode/populating_next_right_pointers_in_each_node_II_q117.py | 1 | 1821 | """
# Definition for a Node.
class Node(object):
def __init__(self, val=0, left=None, right=None, next=None):
self.val = val
self.left = left
self.right = right
self.next = next
"""
# 核心思路
# 递归处理,因为题目已经说过递归调用栈不算extra space
# 对于每一个节点,首先找到这个节点的不为空的,最靠右的孩子节点,作为下一层连接操作的左端
# 然后在这一层往右扫描,找到第一个有孩子结点的结点,他的最靠左的孩子节点作为连接端的右端
# 如果上一个操作能找到左右两端,则进行连接
# 然后递归处理当前节点的子结点情况。注意:一定要先处理右孩子,再处理左孩子,因为前面水平扫描是自左向右。
class Solution(object):
def connect(self, root):
"""
:type root: Node
:rtype: Node
"""
if not root:
return root
# connect children, even root.right == None is well taken.
if root.left:
root.left.next = root.right
# locate the right part node of left children.
lc_right = root.right if root.right else root.left
# because a node may have no child, therefore
# scan from left to right, stop at the first node who has at least one child.
pn = root.next
while pn and not (pn.left or pn.right):
pn = pn.next
# connect if may.
if pn and lc_right:
next_left = pn.left if pn.left else pn.right
lc_right.next = next_left
# recursively.
# NOTE: must handle right part ahead of left part.
# because our horizontal scan is from left to right.
self.connect(root.right)
self.connect(root.left)
return root
| mit | -8,504,431,898,645,499,000 | 28.18 | 85 | 0.604524 | false |
Changaco/oh-mainline | vendor/packages/Django/django/contrib/admin/templatetags/admin_modify.py | 101 | 2428 | from django import template
register = template.Library()
@register.inclusion_tag('admin/prepopulated_fields_js.html', takes_context=True)
def prepopulated_fields_js(context):
"""
Creates a list of prepopulated_fields that should render Javascript for
the prepopulated fields for both the admin form and inlines.
"""
prepopulated_fields = []
if context['add'] and 'adminform' in context:
prepopulated_fields.extend(context['adminform'].prepopulated_fields)
if 'inline_admin_formsets' in context:
for inline_admin_formset in context['inline_admin_formsets']:
for inline_admin_form in inline_admin_formset:
if inline_admin_form.original is None:
prepopulated_fields.extend(inline_admin_form.prepopulated_fields)
context.update({'prepopulated_fields': prepopulated_fields})
return context
@register.inclusion_tag('admin/submit_line.html', takes_context=True)
def submit_row(context):
"""
Displays the row of buttons for delete and save.
"""
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
ctx = {
'opts': opts,
'onclick_attrib': (opts.get_ordered_objects() and change
and 'onclick="submitOrderForm();"' or ''),
'show_delete_link': (not is_popup and context['has_delete_permission']
and change and context.get('show_delete', True)),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': context['has_add_permission'] and
not is_popup and (not save_as or context['add']),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'show_save': True
}
if context.get('original') is not None:
ctx['original'] = context['original']
return ctx
@register.filter
def cell_count(inline_admin_form):
"""Returns the number of cells used in a tabular inline"""
count = 1 # Hidden cell with hidden 'id' field
for fieldset in inline_admin_form:
# Loop through all the fields (one per cell)
for line in fieldset:
for field in line:
count += 1
if inline_admin_form.formset.can_delete:
# Delete checkbox
count += 1
return count
| agpl-3.0 | 6,413,520,125,397,554,000 | 39.466667 | 85 | 0.632208 | false |
codeforamerica/skillcamp | ENV/lib/python2.7/site-packages/psycopg2/tests/test_extras_dictcursor.py | 62 | 17404 | #!/usr/bin/env python
#
# extras_dictcursor - test if DictCursor extension class works
#
# Copyright (C) 2004-2010 Federico Di Gregorio <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import time
from datetime import timedelta
import psycopg2
import psycopg2.extras
from testutils import unittest, ConnectingTestCase, skip_before_postgres
from testutils import skip_if_no_namedtuple
class ExtrasDictCursorTests(ConnectingTestCase):
"""Test if DictCursor extension class works."""
def setUp(self):
ConnectingTestCase.setUp(self)
curs = self.conn.cursor()
curs.execute("CREATE TEMPORARY TABLE ExtrasDictCursorTests (foo text)")
curs.execute("INSERT INTO ExtrasDictCursorTests VALUES ('bar')")
self.conn.commit()
def testDictConnCursorArgs(self):
self.conn.close()
self.conn = self.connect(connection_factory=psycopg2.extras.DictConnection)
cur = self.conn.cursor()
self.assert_(isinstance(cur, psycopg2.extras.DictCursor))
self.assertEqual(cur.name, None)
# overridable
cur = self.conn.cursor('foo', cursor_factory=psycopg2.extras.NamedTupleCursor)
self.assertEqual(cur.name, 'foo')
self.assert_(isinstance(cur, psycopg2.extras.NamedTupleCursor))
def testDictCursorWithPlainCursorFetchOne(self):
self._testWithPlainCursor(lambda curs: curs.fetchone())
def testDictCursorWithPlainCursorFetchMany(self):
self._testWithPlainCursor(lambda curs: curs.fetchmany(100)[0])
def testDictCursorWithPlainCursorFetchManyNoarg(self):
self._testWithPlainCursor(lambda curs: curs.fetchmany()[0])
def testDictCursorWithPlainCursorFetchAll(self):
self._testWithPlainCursor(lambda curs: curs.fetchall()[0])
def testDictCursorWithPlainCursorIter(self):
def getter(curs):
for row in curs:
return row
self._testWithPlainCursor(getter)
def testUpdateRow(self):
row = self._testWithPlainCursor(lambda curs: curs.fetchone())
row['foo'] = 'qux'
self.failUnless(row['foo'] == 'qux')
self.failUnless(row[0] == 'qux')
@skip_before_postgres(8, 0)
def testDictCursorWithPlainCursorIterRowNumber(self):
curs = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
self._testIterRowNumber(curs)
def _testWithPlainCursor(self, getter):
curs = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
curs.execute("SELECT * FROM ExtrasDictCursorTests")
row = getter(curs)
self.failUnless(row['foo'] == 'bar')
self.failUnless(row[0] == 'bar')
return row
def testDictCursorWithPlainCursorRealFetchOne(self):
self._testWithPlainCursorReal(lambda curs: curs.fetchone())
def testDictCursorWithPlainCursorRealFetchMany(self):
self._testWithPlainCursorReal(lambda curs: curs.fetchmany(100)[0])
def testDictCursorWithPlainCursorRealFetchManyNoarg(self):
self._testWithPlainCursorReal(lambda curs: curs.fetchmany()[0])
def testDictCursorWithPlainCursorRealFetchAll(self):
self._testWithPlainCursorReal(lambda curs: curs.fetchall()[0])
def testDictCursorWithPlainCursorRealIter(self):
def getter(curs):
for row in curs:
return row
self._testWithPlainCursorReal(getter)
@skip_before_postgres(8, 0)
def testDictCursorWithPlainCursorRealIterRowNumber(self):
curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
self._testIterRowNumber(curs)
def _testWithPlainCursorReal(self, getter):
curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
curs.execute("SELECT * FROM ExtrasDictCursorTests")
row = getter(curs)
self.failUnless(row['foo'] == 'bar')
def testDictCursorWithNamedCursorFetchOne(self):
self._testWithNamedCursor(lambda curs: curs.fetchone())
def testDictCursorWithNamedCursorFetchMany(self):
self._testWithNamedCursor(lambda curs: curs.fetchmany(100)[0])
def testDictCursorWithNamedCursorFetchManyNoarg(self):
self._testWithNamedCursor(lambda curs: curs.fetchmany()[0])
def testDictCursorWithNamedCursorFetchAll(self):
self._testWithNamedCursor(lambda curs: curs.fetchall()[0])
def testDictCursorWithNamedCursorIter(self):
def getter(curs):
for row in curs:
return row
self._testWithNamedCursor(getter)
@skip_before_postgres(8, 2)
def testDictCursorWithNamedCursorNotGreedy(self):
curs = self.conn.cursor('tmp', cursor_factory=psycopg2.extras.DictCursor)
self._testNamedCursorNotGreedy(curs)
@skip_before_postgres(8, 0)
def testDictCursorWithNamedCursorIterRowNumber(self):
curs = self.conn.cursor('tmp', cursor_factory=psycopg2.extras.DictCursor)
self._testIterRowNumber(curs)
def _testWithNamedCursor(self, getter):
curs = self.conn.cursor('aname', cursor_factory=psycopg2.extras.DictCursor)
curs.execute("SELECT * FROM ExtrasDictCursorTests")
row = getter(curs)
self.failUnless(row['foo'] == 'bar')
self.failUnless(row[0] == 'bar')
def testDictCursorRealWithNamedCursorFetchOne(self):
self._testWithNamedCursorReal(lambda curs: curs.fetchone())
def testDictCursorRealWithNamedCursorFetchMany(self):
self._testWithNamedCursorReal(lambda curs: curs.fetchmany(100)[0])
def testDictCursorRealWithNamedCursorFetchManyNoarg(self):
self._testWithNamedCursorReal(lambda curs: curs.fetchmany()[0])
def testDictCursorRealWithNamedCursorFetchAll(self):
self._testWithNamedCursorReal(lambda curs: curs.fetchall()[0])
def testDictCursorRealWithNamedCursorIter(self):
def getter(curs):
for row in curs:
return row
self._testWithNamedCursorReal(getter)
@skip_before_postgres(8, 2)
def testDictCursorRealWithNamedCursorNotGreedy(self):
curs = self.conn.cursor('tmp', cursor_factory=psycopg2.extras.RealDictCursor)
self._testNamedCursorNotGreedy(curs)
@skip_before_postgres(8, 0)
def testDictCursorRealWithNamedCursorIterRowNumber(self):
curs = self.conn.cursor('tmp', cursor_factory=psycopg2.extras.RealDictCursor)
self._testIterRowNumber(curs)
def _testWithNamedCursorReal(self, getter):
curs = self.conn.cursor('aname', cursor_factory=psycopg2.extras.RealDictCursor)
curs.execute("SELECT * FROM ExtrasDictCursorTests")
row = getter(curs)
self.failUnless(row['foo'] == 'bar')
def _testNamedCursorNotGreedy(self, curs):
curs.itersize = 2
curs.execute("""select clock_timestamp() as ts from generate_series(1,3)""")
recs = []
for t in curs:
time.sleep(0.01)
recs.append(t)
# check that the dataset was not fetched in a single gulp
self.assert_(recs[1]['ts'] - recs[0]['ts'] < timedelta(seconds=0.005))
self.assert_(recs[2]['ts'] - recs[1]['ts'] > timedelta(seconds=0.0099))
def _testIterRowNumber(self, curs):
# Only checking for dataset < itersize:
# see CursorTests.test_iter_named_cursor_rownumber
curs.itersize = 20
curs.execute("""select * from generate_series(1,10)""")
for i, r in enumerate(curs):
self.assertEqual(i + 1, curs.rownumber)
def testPickleDictRow(self):
import pickle
curs = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
curs.execute("select 10 as a, 20 as b")
r = curs.fetchone()
d = pickle.dumps(r)
r1 = pickle.loads(d)
self.assertEqual(r, r1)
self.assertEqual(r[0], r1[0])
self.assertEqual(r[1], r1[1])
self.assertEqual(r['a'], r1['a'])
self.assertEqual(r['b'], r1['b'])
self.assertEqual(r._index, r1._index)
def testPickleRealDictRow(self):
import pickle
curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
curs.execute("select 10 as a, 20 as b")
r = curs.fetchone()
d = pickle.dumps(r)
r1 = pickle.loads(d)
self.assertEqual(r, r1)
self.assertEqual(r['a'], r1['a'])
self.assertEqual(r['b'], r1['b'])
self.assertEqual(r._column_mapping, r1._column_mapping)
class NamedTupleCursorTest(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
from psycopg2.extras import NamedTupleConnection
try:
from collections import namedtuple
except ImportError:
return
self.conn = self.connect(connection_factory=NamedTupleConnection)
curs = self.conn.cursor()
curs.execute("CREATE TEMPORARY TABLE nttest (i int, s text)")
curs.execute("INSERT INTO nttest VALUES (1, 'foo')")
curs.execute("INSERT INTO nttest VALUES (2, 'bar')")
curs.execute("INSERT INTO nttest VALUES (3, 'baz')")
self.conn.commit()
@skip_if_no_namedtuple
def test_cursor_args(self):
cur = self.conn.cursor('foo', cursor_factory=psycopg2.extras.DictCursor)
self.assertEqual(cur.name, 'foo')
self.assert_(isinstance(cur, psycopg2.extras.DictCursor))
@skip_if_no_namedtuple
def test_fetchone(self):
curs = self.conn.cursor()
curs.execute("select * from nttest order by 1")
t = curs.fetchone()
self.assertEqual(t[0], 1)
self.assertEqual(t.i, 1)
self.assertEqual(t[1], 'foo')
self.assertEqual(t.s, 'foo')
self.assertEqual(curs.rownumber, 1)
self.assertEqual(curs.rowcount, 3)
@skip_if_no_namedtuple
def test_fetchmany_noarg(self):
curs = self.conn.cursor()
curs.arraysize = 2
curs.execute("select * from nttest order by 1")
res = curs.fetchmany()
self.assertEqual(2, len(res))
self.assertEqual(res[0].i, 1)
self.assertEqual(res[0].s, 'foo')
self.assertEqual(res[1].i, 2)
self.assertEqual(res[1].s, 'bar')
self.assertEqual(curs.rownumber, 2)
self.assertEqual(curs.rowcount, 3)
@skip_if_no_namedtuple
def test_fetchmany(self):
curs = self.conn.cursor()
curs.execute("select * from nttest order by 1")
res = curs.fetchmany(2)
self.assertEqual(2, len(res))
self.assertEqual(res[0].i, 1)
self.assertEqual(res[0].s, 'foo')
self.assertEqual(res[1].i, 2)
self.assertEqual(res[1].s, 'bar')
self.assertEqual(curs.rownumber, 2)
self.assertEqual(curs.rowcount, 3)
@skip_if_no_namedtuple
def test_fetchall(self):
curs = self.conn.cursor()
curs.execute("select * from nttest order by 1")
res = curs.fetchall()
self.assertEqual(3, len(res))
self.assertEqual(res[0].i, 1)
self.assertEqual(res[0].s, 'foo')
self.assertEqual(res[1].i, 2)
self.assertEqual(res[1].s, 'bar')
self.assertEqual(res[2].i, 3)
self.assertEqual(res[2].s, 'baz')
self.assertEqual(curs.rownumber, 3)
self.assertEqual(curs.rowcount, 3)
@skip_if_no_namedtuple
def test_executemany(self):
curs = self.conn.cursor()
curs.executemany("delete from nttest where i = %s",
[(1,), (2,)])
curs.execute("select * from nttest order by 1")
res = curs.fetchall()
self.assertEqual(1, len(res))
self.assertEqual(res[0].i, 3)
self.assertEqual(res[0].s, 'baz')
@skip_if_no_namedtuple
def test_iter(self):
curs = self.conn.cursor()
curs.execute("select * from nttest order by 1")
i = iter(curs)
self.assertEqual(curs.rownumber, 0)
t = i.next()
self.assertEqual(t.i, 1)
self.assertEqual(t.s, 'foo')
self.assertEqual(curs.rownumber, 1)
self.assertEqual(curs.rowcount, 3)
t = i.next()
self.assertEqual(t.i, 2)
self.assertEqual(t.s, 'bar')
self.assertEqual(curs.rownumber, 2)
self.assertEqual(curs.rowcount, 3)
t = i.next()
self.assertEqual(t.i, 3)
self.assertEqual(t.s, 'baz')
self.assertRaises(StopIteration, i.next)
self.assertEqual(curs.rownumber, 3)
self.assertEqual(curs.rowcount, 3)
def test_error_message(self):
try:
from collections import namedtuple
except ImportError:
# an import error somewhere
from psycopg2.extras import NamedTupleConnection
try:
self.conn = self.connect(
connection_factory=NamedTupleConnection)
curs = self.conn.cursor()
curs.execute("select 1")
curs.fetchone()
except ImportError:
pass
else:
self.fail("expecting ImportError")
else:
return self.skipTest("namedtuple available")
@skip_if_no_namedtuple
def test_record_updated(self):
curs = self.conn.cursor()
curs.execute("select 1 as foo;")
r = curs.fetchone()
self.assertEqual(r.foo, 1)
curs.execute("select 2 as bar;")
r = curs.fetchone()
self.assertEqual(r.bar, 2)
self.assertRaises(AttributeError, getattr, r, 'foo')
@skip_if_no_namedtuple
def test_no_result_no_surprise(self):
curs = self.conn.cursor()
curs.execute("update nttest set s = s")
self.assertRaises(psycopg2.ProgrammingError, curs.fetchone)
curs.execute("update nttest set s = s")
self.assertRaises(psycopg2.ProgrammingError, curs.fetchall)
@skip_if_no_namedtuple
def test_minimal_generation(self):
# Instrument the class to verify it gets called the minimum number of times.
from psycopg2.extras import NamedTupleCursor
f_orig = NamedTupleCursor._make_nt
calls = [0]
def f_patched(self_):
calls[0] += 1
return f_orig(self_)
NamedTupleCursor._make_nt = f_patched
try:
curs = self.conn.cursor()
curs.execute("select * from nttest order by 1")
curs.fetchone()
curs.fetchone()
curs.fetchone()
self.assertEqual(1, calls[0])
curs.execute("select * from nttest order by 1")
curs.fetchone()
curs.fetchall()
self.assertEqual(2, calls[0])
curs.execute("select * from nttest order by 1")
curs.fetchone()
curs.fetchmany(1)
self.assertEqual(3, calls[0])
finally:
NamedTupleCursor._make_nt = f_orig
@skip_if_no_namedtuple
@skip_before_postgres(8, 0)
def test_named(self):
curs = self.conn.cursor('tmp')
curs.execute("""select i from generate_series(0,9) i""")
recs = []
recs.extend(curs.fetchmany(5))
recs.append(curs.fetchone())
recs.extend(curs.fetchall())
self.assertEqual(range(10), [t.i for t in recs])
@skip_if_no_namedtuple
def test_named_fetchone(self):
curs = self.conn.cursor('tmp')
curs.execute("""select 42 as i""")
t = curs.fetchone()
self.assertEqual(t.i, 42)
@skip_if_no_namedtuple
def test_named_fetchmany(self):
curs = self.conn.cursor('tmp')
curs.execute("""select 42 as i""")
recs = curs.fetchmany(10)
self.assertEqual(recs[0].i, 42)
@skip_if_no_namedtuple
def test_named_fetchall(self):
curs = self.conn.cursor('tmp')
curs.execute("""select 42 as i""")
recs = curs.fetchall()
self.assertEqual(recs[0].i, 42)
@skip_if_no_namedtuple
@skip_before_postgres(8, 2)
def test_not_greedy(self):
curs = self.conn.cursor('tmp')
curs.itersize = 2
curs.execute("""select clock_timestamp() as ts from generate_series(1,3)""")
recs = []
for t in curs:
time.sleep(0.01)
recs.append(t)
# check that the dataset was not fetched in a single gulp
self.assert_(recs[1].ts - recs[0].ts < timedelta(seconds=0.005))
self.assert_(recs[2].ts - recs[1].ts > timedelta(seconds=0.0099))
@skip_if_no_namedtuple
@skip_before_postgres(8, 0)
def test_named_rownumber(self):
curs = self.conn.cursor('tmp')
# Only checking for dataset < itersize:
# see CursorTests.test_iter_named_cursor_rownumber
curs.itersize = 4
curs.execute("""select * from generate_series(1,3)""")
for i, t in enumerate(curs):
self.assertEqual(i + 1, curs.rownumber)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
| mit | -5,147,085,961,778,336,000 | 35.033126 | 87 | 0.634969 | false |
MaxStrange/ArtieInfant | scripts/plotaudio/plotaudio.py | 1 | 2598 | """
This is code that I find I use a LOT while debugging or analyzing.
"""
import audiosegment
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
#################################################
#### These are the parameters I have been using #
#################################################
# ---- for long spectrograms ------
sample_rate_hz = 16000.0 # 16kHz sample rate
bytewidth = 2 # 16-bit samples
nchannels = 1 # mono
duration_s = 0.5 # Duration of each complete spectrogram
window_length_s = 0.03 # How long each FFT is
overlap = 0.2 # How much each FFT overlaps with each other one
# ---- for short spectrograms ------
#sample_rate_hz = 8000.0 # 8kHz sample rate
#bytewidth = 2 # 16-bit samples
#nchannels = 1 # mono
#duration_s = 0.3 # Duration of each complete spectrogram
#window_length_s = 0.02 # How long each FFT is
#overlap = 0.2 # How much each FFT overlaps with each other one
#################################################
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Need a path to a WAV file.")
exit(1)
seg = audiosegment.from_file(sys.argv[1])
print(seg)
print(" -> RMS:", seg.rms)
print(" -> SPL:", seg.spl)
print(" -> Length (s):", seg.duration_seconds)
print(" -> NChannels:", seg.channels)
print(" -> Frequency (Hz):", seg.frame_rate)
print(" -> Bytes per sample:", seg.sample_width)
print(" -> Human audible?", seg.human_audible())
name = os.path.basename(sys.argv[1])
name, _ext = os.path.splitext(name)
plt.title("Raw Values")
arr = seg.to_numpy_array()
times = np.linspace(0, len(arr) / seg.frame_rate, num=len(arr))
plt.plot(times, seg.to_numpy_array())
plt.xlabel("Time (s)")
plt.ylabel("PCM")
plt.savefig("{}-waveform.png".format(name))
plt.show()
plt.title("Histogram")
hist_bins, hist_vals = seg.fft()
hist_vals_real_normed = np.abs(hist_vals) / len(hist_vals)
plt.plot(hist_bins/1000, hist_vals_real_normed)
plt.xlabel("kHz")
plt.ylabel("dB")
plt.savefig("{}-histogram.png".format(name))
plt.show()
plt.title("Spectrogram")
fs, ts, amps = seg.spectrogram(0, duration_s, window_length_s=window_length_s, overlap=overlap, window=('tukey', 0.5))
#amps = 10.0 * np.log10(amps)
plt.pcolormesh(ts, fs, amps)
plt.xlabel("Time (s)")
plt.ylabel("Hz")
plt.savefig("{}-spectrogram.png".format(name))
plt.show()
| mit | 4,521,299,774,523,878,000 | 33.64 | 122 | 0.566205 | false |
glaubitz/fs-uae-debian | arcade/launcher/ui/floppiesgroup.py | 2 | 4358 | import fsui
from launcher.cd_manager import CDManager
from launcher.floppy_manager import FloppyManager
from launcher.i18n import gettext
from launcher.option import Option
from launcher.ui.behaviors.platformbehavior import (
AMIGA_PLATFORMS,
CDEnableBehavior,
FloppyEnableBehavior,
)
from launcher.ui.floppyselector import FloppySelector
from launcher.ui.options import ConfigWidgetFactory
class FloppiesGroup(fsui.Group):
FLOPPY_MODE = FloppySelector.FLOPPY_MODE
CD_MODE = FloppySelector.CD_MODE
TAPE_MODE = FloppySelector.TAPE_MODE
CARTRIDGE_MODE = FloppySelector.CARTRIDGE_MODE
def __init__(self, parent, drives=2, cd_mode=False, removable_media=False):
fsui.Group.__init__(self, parent)
self.layout = fsui.VerticalLayout()
self.cd_mode = cd_mode
self.num_drives = drives
hori_layout = fsui.HorizontalLayout()
self.layout.add(hori_layout, fill=True)
self.mode = self.FLOPPY_MODE
if cd_mode:
self.mode = self.CD_MODE
if self.mode == self.CD_MODE:
title = gettext("CD-ROM Drive")
drive_count_option = Option.CDROM_DRIVE_COUNT
behavior_class = CDEnableBehavior
elif self.mode == self.TAPE_MODE:
title = gettext("Tape Drive")
drive_count_option = None
behavior_class = None
elif self.mode == self.CARTRIDGE_MODE:
title = gettext("Cartridge")
drive_count_option = None
behavior_class = None
else:
title = gettext("Floppy Drives")
drive_count_option = Option.FLOPPY_DRIVE_COUNT
behavior_class = FloppyEnableBehavior
if removable_media:
# Removable media group will change type dynamically
behavior_class = None
self.label = fsui.HeadingLabel(self, title)
hori_layout.add(self.label, margin=10)
hori_layout.add_spacer(0, expand=True)
if drive_count_option and not removable_media:
# FIXME: Drive count option does not work on the main page when
# changing to CD mode. Workaround for now is to not include it.
hori_layout.add(
ConfigWidgetFactory().create(
self,
drive_count_option,
text=gettext("Drive Count"),
platforms=AMIGA_PLATFORMS,
),
fill=True,
margin_right=20,
)
self.multi_select_button = fsui.Button(
self, gettext("Multi-Select...")
)
if self.cd_mode:
self.multi_select_button.set_tooltip(
gettext("Add Multiple CD-ROMs at Once")
)
else:
self.multi_select_button.set_tooltip(
gettext("Add Multiple Floppies at Once")
)
if behavior_class:
behavior_class(self.multi_select_button)
self.multi_select_button.activated.connect(self.on_multi_select_button)
hori_layout.add(self.multi_select_button, margin_right=10)
self.layout.add_spacer(0)
self.selectors = []
for i in range(drives):
selector = FloppySelector(parent, i, show_path=not removable_media)
if behavior_class:
behavior_class(selector)
selector.set_mode(self.mode)
self.selectors.append(selector)
self.layout.add(selector, fill=True, margin=10, margin_bottom=0)
def on_multi_select_button(self):
if self.cd_mode:
CDManager.multi_select(self.get_window())
else:
FloppyManager.multi_select(self.get_window())
def update_heading_label(self):
if self.mode == self.CD_MODE:
if self.num_drives > 1:
self.label.set_text(gettext("CD-ROM Drives"))
else:
self.label.set_text(gettext("CD-ROM Drive"))
elif self.mode == self.TAPE_MODE:
self.label.set_text(gettext("Tape Drive"))
elif self.mode == self.CARTRIDGE_MODE:
self.label.set_text(gettext("Cartridge"))
else:
self.label.set_text(gettext("Floppy Drives"))
# Need to update the layout to account for label widget size change.
self.layout.update()
| gpl-2.0 | -6,323,017,166,057,155,000 | 35.016529 | 79 | 0.59844 | false |
kalvdans/scipy | scipy/io/matlab/tests/test_mio.py | 14 | 42133 | # -*- coding: latin-1 -*-
''' Nose test generators
Need function load / save / roundtrip tests
'''
from __future__ import division, print_function, absolute_import
import os
from os.path import join as pjoin, dirname
from glob import glob
from io import BytesIO
from tempfile import mkdtemp
from scipy._lib.six import u, text_type, string_types
import warnings
import shutil
import gzip
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_equal, assert_raises, run_module_suite,
assert_)
import numpy as np
from numpy import array
import scipy.sparse as SP
import scipy.io.matlab.byteordercodes as boc
from scipy.io.matlab.miobase import matdims, MatWriteError, MatReadError
from scipy.io.matlab.mio import (mat_reader_factory, loadmat, savemat, whosmat)
from scipy.io.matlab.mio5 import (MatlabObject, MatFile5Writer, MatFile5Reader,
MatlabFunction, varmats_from_mat,
to_writeable, EmptyStructMarker)
from scipy.io.matlab import mio5_params as mio5p
test_data_path = pjoin(dirname(__file__), 'data')
def mlarr(*args, **kwargs):
"""Convenience function to return matlab-compatible 2D array."""
arr = np.array(*args, **kwargs)
arr.shape = matdims(arr)
return arr
# Define cases to test
theta = np.pi/4*np.arange(9,dtype=float).reshape(1,9)
case_table4 = [
{'name': 'double',
'classes': {'testdouble': 'double'},
'expected': {'testdouble': theta}
}]
case_table4.append(
{'name': 'string',
'classes': {'teststring': 'char'},
'expected': {'teststring':
array([u('"Do nine men interpret?" "Nine men," I nod.')])}
})
case_table4.append(
{'name': 'complex',
'classes': {'testcomplex': 'double'},
'expected': {'testcomplex': np.cos(theta) + 1j*np.sin(theta)}
})
A = np.zeros((3,5))
A[0] = list(range(1,6))
A[:,0] = list(range(1,4))
case_table4.append(
{'name': 'matrix',
'classes': {'testmatrix': 'double'},
'expected': {'testmatrix': A},
})
case_table4.append(
{'name': 'sparse',
'classes': {'testsparse': 'sparse'},
'expected': {'testsparse': SP.coo_matrix(A)},
})
B = A.astype(complex)
B[0,0] += 1j
case_table4.append(
{'name': 'sparsecomplex',
'classes': {'testsparsecomplex': 'sparse'},
'expected': {'testsparsecomplex': SP.coo_matrix(B)},
})
case_table4.append(
{'name': 'multi',
'classes': {'theta': 'double', 'a': 'double'},
'expected': {'theta': theta, 'a': A},
})
case_table4.append(
{'name': 'minus',
'classes': {'testminus': 'double'},
'expected': {'testminus': mlarr(-1)},
})
case_table4.append(
{'name': 'onechar',
'classes': {'testonechar': 'char'},
'expected': {'testonechar': array([u('r')])},
})
# Cell arrays stored as object arrays
CA = mlarr(( # tuple for object array creation
[],
mlarr([1]),
mlarr([[1,2]]),
mlarr([[1,2,3]])), dtype=object).reshape(1,-1)
CA[0,0] = array(
[u('This cell contains this string and 3 arrays of increasing length')])
case_table5 = [
{'name': 'cell',
'classes': {'testcell': 'cell'},
'expected': {'testcell': CA}}]
CAE = mlarr(( # tuple for object array creation
mlarr(1),
mlarr(2),
mlarr([]),
mlarr([]),
mlarr(3)), dtype=object).reshape(1,-1)
objarr = np.empty((1,1),dtype=object)
objarr[0,0] = mlarr(1)
case_table5.append(
{'name': 'scalarcell',
'classes': {'testscalarcell': 'cell'},
'expected': {'testscalarcell': objarr}
})
case_table5.append(
{'name': 'emptycell',
'classes': {'testemptycell': 'cell'},
'expected': {'testemptycell': CAE}})
case_table5.append(
{'name': 'stringarray',
'classes': {'teststringarray': 'char'},
'expected': {'teststringarray': array(
[u('one '), u('two '), u('three')])},
})
case_table5.append(
{'name': '3dmatrix',
'classes': {'test3dmatrix': 'double'},
'expected': {
'test3dmatrix': np.transpose(np.reshape(list(range(1,25)), (4,3,2)))}
})
st_sub_arr = array([np.sqrt(2),np.exp(1),np.pi]).reshape(1,3)
dtype = [(n, object) for n in ['stringfield', 'doublefield', 'complexfield']]
st1 = np.zeros((1,1), dtype)
st1['stringfield'][0,0] = array([u('Rats live on no evil star.')])
st1['doublefield'][0,0] = st_sub_arr
st1['complexfield'][0,0] = st_sub_arr * (1 + 1j)
case_table5.append(
{'name': 'struct',
'classes': {'teststruct': 'struct'},
'expected': {'teststruct': st1}
})
CN = np.zeros((1,2), dtype=object)
CN[0,0] = mlarr(1)
CN[0,1] = np.zeros((1,3), dtype=object)
CN[0,1][0,0] = mlarr(2, dtype=np.uint8)
CN[0,1][0,1] = mlarr([[3]], dtype=np.uint8)
CN[0,1][0,2] = np.zeros((1,2), dtype=object)
CN[0,1][0,2][0,0] = mlarr(4, dtype=np.uint8)
CN[0,1][0,2][0,1] = mlarr(5, dtype=np.uint8)
case_table5.append(
{'name': 'cellnest',
'classes': {'testcellnest': 'cell'},
'expected': {'testcellnest': CN},
})
st2 = np.empty((1,1), dtype=[(n, object) for n in ['one', 'two']])
st2[0,0]['one'] = mlarr(1)
st2[0,0]['two'] = np.empty((1,1), dtype=[('three', object)])
st2[0,0]['two'][0,0]['three'] = array([u('number 3')])
case_table5.append(
{'name': 'structnest',
'classes': {'teststructnest': 'struct'},
'expected': {'teststructnest': st2}
})
a = np.empty((1,2), dtype=[(n, object) for n in ['one', 'two']])
a[0,0]['one'] = mlarr(1)
a[0,0]['two'] = mlarr(2)
a[0,1]['one'] = array([u('number 1')])
a[0,1]['two'] = array([u('number 2')])
case_table5.append(
{'name': 'structarr',
'classes': {'teststructarr': 'struct'},
'expected': {'teststructarr': a}
})
ODT = np.dtype([(n, object) for n in
['expr', 'inputExpr', 'args',
'isEmpty', 'numArgs', 'version']])
MO = MatlabObject(np.zeros((1,1), dtype=ODT), 'inline')
m0 = MO[0,0]
m0['expr'] = array([u('x')])
m0['inputExpr'] = array([u(' x = INLINE_INPUTS_{1};')])
m0['args'] = array([u('x')])
m0['isEmpty'] = mlarr(0)
m0['numArgs'] = mlarr(1)
m0['version'] = mlarr(1)
case_table5.append(
{'name': 'object',
'classes': {'testobject': 'object'},
'expected': {'testobject': MO}
})
fp_u_str = open(pjoin(test_data_path, 'japanese_utf8.txt'), 'rb')
u_str = fp_u_str.read().decode('utf-8')
fp_u_str.close()
case_table5.append(
{'name': 'unicode',
'classes': {'testunicode': 'char'},
'expected': {'testunicode': array([u_str])}
})
case_table5.append(
{'name': 'sparse',
'classes': {'testsparse': 'sparse'},
'expected': {'testsparse': SP.coo_matrix(A)},
})
case_table5.append(
{'name': 'sparsecomplex',
'classes': {'testsparsecomplex': 'sparse'},
'expected': {'testsparsecomplex': SP.coo_matrix(B)},
})
case_table5.append(
{'name': 'bool',
'classes': {'testbools': 'logical'},
'expected': {'testbools':
array([[True], [False]])},
})
case_table5_rt = case_table5[:]
# Inline functions can't be concatenated in matlab, so RT only
case_table5_rt.append(
{'name': 'objectarray',
'classes': {'testobjectarray': 'object'},
'expected': {'testobjectarray': np.repeat(MO, 2).reshape(1,2)}})
def types_compatible(var1, var2):
"""Check if types are same or compatible.
0-D numpy scalars are compatible with bare python scalars.
"""
type1 = type(var1)
type2 = type(var2)
if type1 is type2:
return True
if type1 is np.ndarray and var1.shape == ():
return type(var1.item()) is type2
if type2 is np.ndarray and var2.shape == ():
return type(var2.item()) is type1
return False
def _check_level(label, expected, actual):
""" Check one level of a potentially nested array """
if SP.issparse(expected): # allow different types of sparse matrices
assert_(SP.issparse(actual))
assert_array_almost_equal(actual.todense(),
expected.todense(),
err_msg=label,
decimal=5)
return
# Check types are as expected
assert_(types_compatible(expected, actual),
"Expected type %s, got %s at %s" %
(type(expected), type(actual), label))
# A field in a record array may not be an ndarray
# A scalar from a record array will be type np.void
if not isinstance(expected,
(np.void, np.ndarray, MatlabObject)):
assert_equal(expected, actual)
return
# This is an ndarray-like thing
assert_(expected.shape == actual.shape,
msg='Expected shape %s, got %s at %s' % (expected.shape,
actual.shape,
label))
ex_dtype = expected.dtype
if ex_dtype.hasobject: # array of objects
if isinstance(expected, MatlabObject):
assert_equal(expected.classname, actual.classname)
for i, ev in enumerate(expected):
level_label = "%s, [%d], " % (label, i)
_check_level(level_label, ev, actual[i])
return
if ex_dtype.fields: # probably recarray
for fn in ex_dtype.fields:
level_label = "%s, field %s, " % (label, fn)
_check_level(level_label,
expected[fn], actual[fn])
return
if ex_dtype.type in (text_type, # string or bool
np.unicode_,
np.bool_):
assert_equal(actual, expected, err_msg=label)
return
# Something numeric
assert_array_almost_equal(actual, expected, err_msg=label, decimal=5)
def _load_check_case(name, files, case):
for file_name in files:
matdict = loadmat(file_name, struct_as_record=True)
label = "test %s; file %s" % (name, file_name)
for k, expected in case.items():
k_label = "%s, variable %s" % (label, k)
assert_(k in matdict, "Missing key at %s" % k_label)
_check_level(k_label, expected, matdict[k])
def _whos_check_case(name, files, case, classes):
for file_name in files:
label = "test %s; file %s" % (name, file_name)
whos = whosmat(file_name)
expected_whos = []
for k, expected in case.items():
expected_whos.append((k, expected.shape, classes[k]))
whos.sort()
expected_whos.sort()
assert_equal(whos, expected_whos,
"%s: %r != %r" % (label, whos, expected_whos)
)
# Round trip tests
def _rt_check_case(name, expected, format):
mat_stream = BytesIO()
savemat(mat_stream, expected, format=format)
mat_stream.seek(0)
_load_check_case(name, [mat_stream], expected)
# generator for load tests
def test_load():
for case in case_table4 + case_table5:
name = case['name']
expected = case['expected']
filt = pjoin(test_data_path, 'test%s_*.mat' % name)
files = glob(filt)
assert_(len(files) > 0,
"No files for test %s using filter %s" % (name, filt))
yield _load_check_case, name, files, expected
# generator for whos tests
def test_whos():
for case in case_table4 + case_table5:
name = case['name']
expected = case['expected']
classes = case['classes']
filt = pjoin(test_data_path, 'test%s_*.mat' % name)
files = glob(filt)
assert_(len(files) > 0,
"No files for test %s using filter %s" % (name, filt))
yield _whos_check_case, name, files, expected, classes
# generator for round trip tests
def test_round_trip():
for case in case_table4 + case_table5_rt:
case_table4_names = [case['name'] for case in case_table4]
name = case['name'] + '_round_trip'
expected = case['expected']
for format in (['4', '5'] if case['name'] in case_table4_names else ['5']):
yield _rt_check_case, name, expected, format
def test_gzip_simple():
xdense = np.zeros((20,20))
xdense[2,3] = 2.3
xdense[4,5] = 4.5
x = SP.csc_matrix(xdense)
name = 'gzip_test'
expected = {'x':x}
format = '4'
tmpdir = mkdtemp()
try:
fname = pjoin(tmpdir,name)
mat_stream = gzip.open(fname,mode='wb')
savemat(mat_stream, expected, format=format)
mat_stream.close()
mat_stream = gzip.open(fname,mode='rb')
actual = loadmat(mat_stream, struct_as_record=True)
mat_stream.close()
finally:
shutil.rmtree(tmpdir)
assert_array_almost_equal(actual['x'].todense(),
expected['x'].todense(),
err_msg=repr(actual))
def test_multiple_open():
# Ticket #1039, on Windows: check that files are not left open
tmpdir = mkdtemp()
try:
x = dict(x=np.zeros((2, 2)))
fname = pjoin(tmpdir, "a.mat")
# Check that file is not left open
savemat(fname, x)
os.unlink(fname)
savemat(fname, x)
loadmat(fname)
os.unlink(fname)
# Check that stream is left open
f = open(fname, 'wb')
savemat(f, x)
f.seek(0)
f.close()
f = open(fname, 'rb')
loadmat(f)
f.seek(0)
f.close()
finally:
shutil.rmtree(tmpdir)
def test_mat73():
# Check any hdf5 files raise an error
filenames = glob(
pjoin(test_data_path, 'testhdf5*.mat'))
assert_(len(filenames) > 0)
for filename in filenames:
fp = open(filename, 'rb')
assert_raises(NotImplementedError,
loadmat,
fp,
struct_as_record=True)
fp.close()
def test_warnings():
# This test is an echo of the previous behavior, which was to raise a
# warning if the user triggered a search for mat files on the Python system
# path. We can remove the test in the next version after upcoming (0.13)
fname = pjoin(test_data_path, 'testdouble_7.1_GLNX86.mat')
with warnings.catch_warnings():
warnings.simplefilter('error')
# This should not generate a warning
mres = loadmat(fname, struct_as_record=True)
# This neither
mres = loadmat(fname, struct_as_record=False)
def test_regression_653():
# Saving a dictionary with only invalid keys used to raise an error. Now we
# save this as an empty struct in matlab space.
sio = BytesIO()
savemat(sio, {'d':{1:2}}, format='5')
back = loadmat(sio)['d']
# Check we got an empty struct equivalent
assert_equal(back.shape, (1,1))
assert_equal(back.dtype, np.dtype(object))
assert_(back[0,0] is None)
def test_structname_len():
# Test limit for length of field names in structs
lim = 31
fldname = 'a' * lim
st1 = np.zeros((1,1), dtype=[(fldname, object)])
savemat(BytesIO(), {'longstruct': st1}, format='5')
fldname = 'a' * (lim+1)
st1 = np.zeros((1,1), dtype=[(fldname, object)])
assert_raises(ValueError, savemat, BytesIO(),
{'longstruct': st1}, format='5')
def test_4_and_long_field_names_incompatible():
# Long field names option not supported in 4
my_struct = np.zeros((1,1),dtype=[('my_fieldname',object)])
assert_raises(ValueError, savemat, BytesIO(),
{'my_struct':my_struct}, format='4', long_field_names=True)
def test_long_field_names():
# Test limit for length of field names in structs
lim = 63
fldname = 'a' * lim
st1 = np.zeros((1,1), dtype=[(fldname, object)])
savemat(BytesIO(), {'longstruct': st1}, format='5',long_field_names=True)
fldname = 'a' * (lim+1)
st1 = np.zeros((1,1), dtype=[(fldname, object)])
assert_raises(ValueError, savemat, BytesIO(),
{'longstruct': st1}, format='5',long_field_names=True)
def test_long_field_names_in_struct():
# Regression test - long_field_names was erased if you passed a struct
# within a struct
lim = 63
fldname = 'a' * lim
cell = np.ndarray((1,2),dtype=object)
st1 = np.zeros((1,1), dtype=[(fldname, object)])
cell[0,0] = st1
cell[0,1] = st1
savemat(BytesIO(), {'longstruct': cell}, format='5',long_field_names=True)
#
# Check to make sure it fails with long field names off
#
assert_raises(ValueError, savemat, BytesIO(),
{'longstruct': cell}, format='5', long_field_names=False)
def test_cell_with_one_thing_in_it():
# Regression test - make a cell array that's 1 x 2 and put two
# strings in it. It works. Make a cell array that's 1 x 1 and put
# a string in it. It should work but, in the old days, it didn't.
cells = np.ndarray((1,2),dtype=object)
cells[0,0] = 'Hello'
cells[0,1] = 'World'
savemat(BytesIO(), {'x': cells}, format='5')
cells = np.ndarray((1,1),dtype=object)
cells[0,0] = 'Hello, world'
savemat(BytesIO(), {'x': cells}, format='5')
def test_writer_properties():
# Tests getting, setting of properties of matrix writer
mfw = MatFile5Writer(BytesIO())
yield assert_equal, mfw.global_vars, []
mfw.global_vars = ['avar']
yield assert_equal, mfw.global_vars, ['avar']
yield assert_equal, mfw.unicode_strings, False
mfw.unicode_strings = True
yield assert_equal, mfw.unicode_strings, True
yield assert_equal, mfw.long_field_names, False
mfw.long_field_names = True
yield assert_equal, mfw.long_field_names, True
def test_use_small_element():
# Test whether we're using small data element or not
sio = BytesIO()
wtr = MatFile5Writer(sio)
# First check size for no sde for name
arr = np.zeros(10)
wtr.put_variables({'aaaaa': arr})
w_sz = len(sio.getvalue())
# Check small name results in largish difference in size
sio.truncate(0)
sio.seek(0)
wtr.put_variables({'aaaa': arr})
yield assert_, w_sz - len(sio.getvalue()) > 4
# Whereas increasing name size makes less difference
sio.truncate(0)
sio.seek(0)
wtr.put_variables({'aaaaaa': arr})
yield assert_, len(sio.getvalue()) - w_sz < 4
def test_save_dict():
# Test that dict can be saved (as recarray), loaded as matstruct
dict_types = ((dict, False),)
try:
from collections import OrderedDict
except ImportError:
pass
else:
dict_types += ((OrderedDict, True),)
ab_exp = np.array([[(1, 2)]], dtype=[('a', object), ('b', object)])
ba_exp = np.array([[(2, 1)]], dtype=[('b', object), ('a', object)])
for dict_type, is_ordered in dict_types:
# Initialize with tuples to keep order for OrderedDict
d = dict_type([('a', 1), ('b', 2)])
stream = BytesIO()
savemat(stream, {'dict': d})
stream.seek(0)
vals = loadmat(stream)['dict']
assert_equal(set(vals.dtype.names), set(['a', 'b']))
if is_ordered: # Input was ordered, output in ab order
assert_array_equal(vals, ab_exp)
else: # Not ordered input, either order output
if vals.dtype.names[0] == 'a':
assert_array_equal(vals, ab_exp)
else:
assert_array_equal(vals, ba_exp)
def test_1d_shape():
# New 5 behavior is 1D -> row vector
arr = np.arange(5)
for format in ('4', '5'):
# Column is the default
stream = BytesIO()
savemat(stream, {'oned': arr}, format=format)
vals = loadmat(stream)
assert_equal(vals['oned'].shape, (1, 5))
# can be explicitly 'column' for oned_as
stream = BytesIO()
savemat(stream, {'oned':arr},
format=format,
oned_as='column')
vals = loadmat(stream)
assert_equal(vals['oned'].shape, (5,1))
# but different from 'row'
stream = BytesIO()
savemat(stream, {'oned':arr},
format=format,
oned_as='row')
vals = loadmat(stream)
assert_equal(vals['oned'].shape, (1,5))
def test_compression():
arr = np.zeros(100).reshape((5,20))
arr[2,10] = 1
stream = BytesIO()
savemat(stream, {'arr':arr})
raw_len = len(stream.getvalue())
vals = loadmat(stream)
yield assert_array_equal, vals['arr'], arr
stream = BytesIO()
savemat(stream, {'arr':arr}, do_compression=True)
compressed_len = len(stream.getvalue())
vals = loadmat(stream)
yield assert_array_equal, vals['arr'], arr
yield assert_, raw_len > compressed_len
# Concatenate, test later
arr2 = arr.copy()
arr2[0,0] = 1
stream = BytesIO()
savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=False)
vals = loadmat(stream)
yield assert_array_equal, vals['arr2'], arr2
stream = BytesIO()
savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=True)
vals = loadmat(stream)
yield assert_array_equal, vals['arr2'], arr2
def test_single_object():
stream = BytesIO()
savemat(stream, {'A':np.array(1, dtype=object)})
def test_skip_variable():
# Test skipping over the first of two variables in a MAT file
# using mat_reader_factory and put_variables to read them in.
#
# This is a regression test of a problem that's caused by
# using the compressed file reader seek instead of the raw file
# I/O seek when skipping over a compressed chunk.
#
# The problem arises when the chunk is large: this file has
# a 256x256 array of random (uncompressible) doubles.
#
filename = pjoin(test_data_path,'test_skip_variable.mat')
#
# Prove that it loads with loadmat
#
d = loadmat(filename, struct_as_record=True)
yield assert_, 'first' in d
yield assert_, 'second' in d
#
# Make the factory
#
factory = mat_reader_factory(filename, struct_as_record=True)
#
# This is where the factory breaks with an error in MatMatrixGetter.to_next
#
d = factory.get_variables('second')
yield assert_, 'second' in d
factory.mat_stream.close()
def test_empty_struct():
# ticket 885
filename = pjoin(test_data_path,'test_empty_struct.mat')
# before ticket fix, this would crash with ValueError, empty data
# type
d = loadmat(filename, struct_as_record=True)
a = d['a']
assert_equal(a.shape, (1,1))
assert_equal(a.dtype, np.dtype(object))
assert_(a[0,0] is None)
stream = BytesIO()
arr = np.array((), dtype='U')
# before ticket fix, this used to give data type not understood
savemat(stream, {'arr':arr})
d = loadmat(stream)
a2 = d['arr']
assert_array_equal(a2, arr)
def test_save_empty_dict():
# saving empty dict also gives empty struct
stream = BytesIO()
savemat(stream, {'arr': {}})
d = loadmat(stream)
a = d['arr']
assert_equal(a.shape, (1,1))
assert_equal(a.dtype, np.dtype(object))
assert_(a[0,0] is None)
def assert_any_equal(output, alternatives):
""" Assert `output` is equal to at least one element in `alternatives`
"""
one_equal = False
for expected in alternatives:
if np.all(output == expected):
one_equal = True
break
assert_(one_equal)
def test_to_writeable():
# Test to_writeable function
res = to_writeable(np.array([1])) # pass through ndarrays
assert_equal(res.shape, (1,))
assert_array_equal(res, 1)
# Dict fields can be written in any order
expected1 = np.array([(1, 2)], dtype=[('a', '|O8'), ('b', '|O8')])
expected2 = np.array([(2, 1)], dtype=[('b', '|O8'), ('a', '|O8')])
alternatives = (expected1, expected2)
assert_any_equal(to_writeable({'a':1,'b':2}), alternatives)
# Fields with underscores discarded
assert_any_equal(to_writeable({'a':1,'b':2, '_c':3}), alternatives)
# Not-string fields discarded
assert_any_equal(to_writeable({'a':1,'b':2, 100:3}), alternatives)
# String fields that are valid Python identifiers discarded
assert_any_equal(to_writeable({'a':1,'b':2, '99':3}), alternatives)
# Object with field names is equivalent
class klass(object):
pass
c = klass
c.a = 1
c.b = 2
assert_any_equal(to_writeable(c), alternatives)
# empty list and tuple go to empty array
res = to_writeable([])
assert_equal(res.shape, (0,))
assert_equal(res.dtype.type, np.float64)
res = to_writeable(())
assert_equal(res.shape, (0,))
assert_equal(res.dtype.type, np.float64)
# None -> None
assert_(to_writeable(None) is None)
# String to strings
assert_equal(to_writeable('a string').dtype.type, np.str_)
# Scalars to numpy to numpy scalars
res = to_writeable(1)
assert_equal(res.shape, ())
assert_equal(res.dtype.type, np.array(1).dtype.type)
assert_array_equal(res, 1)
# Empty dict returns EmptyStructMarker
assert_(to_writeable({}) is EmptyStructMarker)
# Object does not have (even empty) __dict__
assert_(to_writeable(object()) is None)
# Custom object does have empty __dict__, returns EmptyStructMarker
class C(object):
pass
assert_(to_writeable(c()) is EmptyStructMarker)
# dict keys with legal characters are convertible
res = to_writeable({'a': 1})['a']
assert_equal(res.shape, (1,))
assert_equal(res.dtype.type, np.object_)
# Only fields with illegal characters, falls back to EmptyStruct
assert_(to_writeable({'1':1}) is EmptyStructMarker)
assert_(to_writeable({'_a':1}) is EmptyStructMarker)
# Unless there are valid fields, in which case structured array
assert_equal(to_writeable({'1':1, 'f': 2}),
np.array([(2,)], dtype=[('f', '|O8')]))
def test_recarray():
# check roundtrip of structured array
dt = [('f1', 'f8'),
('f2', 'S10')]
arr = np.zeros((2,), dtype=dt)
arr[0]['f1'] = 0.5
arr[0]['f2'] = 'python'
arr[1]['f1'] = 99
arr[1]['f2'] = 'not perl'
stream = BytesIO()
savemat(stream, {'arr': arr})
d = loadmat(stream, struct_as_record=False)
a20 = d['arr'][0,0]
yield assert_equal, a20.f1, 0.5
yield assert_equal, a20.f2, 'python'
d = loadmat(stream, struct_as_record=True)
a20 = d['arr'][0,0]
yield assert_equal, a20['f1'], 0.5
yield assert_equal, a20['f2'], 'python'
# structs always come back as object types
yield assert_equal, a20.dtype, np.dtype([('f1', 'O'),
('f2', 'O')])
a21 = d['arr'].flat[1]
yield assert_equal, a21['f1'], 99
yield assert_equal, a21['f2'], 'not perl'
def test_save_object():
class C(object):
pass
c = C()
c.field1 = 1
c.field2 = 'a string'
stream = BytesIO()
savemat(stream, {'c': c})
d = loadmat(stream, struct_as_record=False)
c2 = d['c'][0,0]
assert_equal(c2.field1, 1)
assert_equal(c2.field2, 'a string')
d = loadmat(stream, struct_as_record=True)
c2 = d['c'][0,0]
assert_equal(c2['field1'], 1)
assert_equal(c2['field2'], 'a string')
def test_read_opts():
# tests if read is seeing option sets, at initialization and after
# initialization
arr = np.arange(6).reshape(1,6)
stream = BytesIO()
savemat(stream, {'a': arr})
rdr = MatFile5Reader(stream)
back_dict = rdr.get_variables()
rarr = back_dict['a']
assert_array_equal(rarr, arr)
rdr = MatFile5Reader(stream, squeeze_me=True)
assert_array_equal(rdr.get_variables()['a'], arr.reshape((6,)))
rdr.squeeze_me = False
assert_array_equal(rarr, arr)
rdr = MatFile5Reader(stream, byte_order=boc.native_code)
assert_array_equal(rdr.get_variables()['a'], arr)
# inverted byte code leads to error on read because of swapped
# header etc
rdr = MatFile5Reader(stream, byte_order=boc.swapped_code)
assert_raises(Exception, rdr.get_variables)
rdr.byte_order = boc.native_code
assert_array_equal(rdr.get_variables()['a'], arr)
arr = np.array(['a string'])
stream.truncate(0)
stream.seek(0)
savemat(stream, {'a': arr})
rdr = MatFile5Reader(stream)
assert_array_equal(rdr.get_variables()['a'], arr)
rdr = MatFile5Reader(stream, chars_as_strings=False)
carr = np.atleast_2d(np.array(list(arr.item()), dtype='U1'))
assert_array_equal(rdr.get_variables()['a'], carr)
rdr.chars_as_strings = True
assert_array_equal(rdr.get_variables()['a'], arr)
def test_empty_string():
# make sure reading empty string does not raise error
estring_fname = pjoin(test_data_path, 'single_empty_string.mat')
fp = open(estring_fname, 'rb')
rdr = MatFile5Reader(fp)
d = rdr.get_variables()
fp.close()
assert_array_equal(d['a'], np.array([], dtype='U1'))
# empty string round trip. Matlab cannot distiguish
# between a string array that is empty, and a string array
# containing a single empty string, because it stores strings as
# arrays of char. There is no way of having an array of char that
# is not empty, but contains an empty string.
stream = BytesIO()
savemat(stream, {'a': np.array([''])})
rdr = MatFile5Reader(stream)
d = rdr.get_variables()
assert_array_equal(d['a'], np.array([], dtype='U1'))
stream.truncate(0)
stream.seek(0)
savemat(stream, {'a': np.array([], dtype='U1')})
rdr = MatFile5Reader(stream)
d = rdr.get_variables()
assert_array_equal(d['a'], np.array([], dtype='U1'))
stream.close()
def test_corrupted_data():
import zlib
for exc, fname in [(ValueError, 'corrupted_zlib_data.mat'),
(zlib.error, 'corrupted_zlib_checksum.mat')]:
with open(pjoin(test_data_path, fname), 'rb') as fp:
rdr = MatFile5Reader(fp)
assert_raises(exc, rdr.get_variables)
def test_corrupted_data_check_can_be_disabled():
with open(pjoin(test_data_path, 'corrupted_zlib_data.mat'), 'rb') as fp:
rdr = MatFile5Reader(fp, verify_compressed_data_integrity=False)
rdr.get_variables()
def test_read_both_endian():
# make sure big- and little- endian data is read correctly
for fname in ('big_endian.mat', 'little_endian.mat'):
fp = open(pjoin(test_data_path, fname), 'rb')
rdr = MatFile5Reader(fp)
d = rdr.get_variables()
fp.close()
assert_array_equal(d['strings'],
np.array([['hello'],
['world']], dtype=object))
assert_array_equal(d['floats'],
np.array([[2., 3.],
[3., 4.]], dtype=np.float32))
def test_write_opposite_endian():
# We don't support writing opposite endian .mat files, but we need to behave
# correctly if the user supplies an other-endian numpy array to write out
float_arr = np.array([[2., 3.],
[3., 4.]])
int_arr = np.arange(6).reshape((2, 3))
uni_arr = np.array(['hello', 'world'], dtype='U')
stream = BytesIO()
savemat(stream, {'floats': float_arr.byteswap().newbyteorder(),
'ints': int_arr.byteswap().newbyteorder(),
'uni_arr': uni_arr.byteswap().newbyteorder()})
rdr = MatFile5Reader(stream)
d = rdr.get_variables()
assert_array_equal(d['floats'], float_arr)
assert_array_equal(d['ints'], int_arr)
assert_array_equal(d['uni_arr'], uni_arr)
stream.close()
def test_logical_array():
# The roundtrip test doesn't verify that we load the data up with the
# correct (bool) dtype
with open(pjoin(test_data_path, 'testbool_8_WIN64.mat'), 'rb') as fobj:
rdr = MatFile5Reader(fobj, mat_dtype=True)
d = rdr.get_variables()
x = np.array([[True], [False]], dtype=np.bool_)
assert_array_equal(d['testbools'], x)
assert_equal(d['testbools'].dtype, x.dtype)
def test_logical_out_type():
# Confirm that bool type written as uint8, uint8 class
# See gh-4022
stream = BytesIO()
barr = np.array([False, True, False])
savemat(stream, {'barray': barr})
stream.seek(0)
reader = MatFile5Reader(stream)
reader.initialize_read()
reader.read_file_header()
hdr, _ = reader.read_var_header()
assert_equal(hdr.mclass, mio5p.mxUINT8_CLASS)
assert_equal(hdr.is_logical, True)
var = reader.read_var_array(hdr, False)
assert_equal(var.dtype.type, np.uint8)
def test_mat4_3d():
# test behavior when writing 3D arrays to matlab 4 files
stream = BytesIO()
arr = np.arange(24).reshape((2,3,4))
assert_raises(ValueError, savemat, stream, {'a': arr}, True, '4')
def test_func_read():
func_eg = pjoin(test_data_path, 'testfunc_7.4_GLNX86.mat')
fp = open(func_eg, 'rb')
rdr = MatFile5Reader(fp)
d = rdr.get_variables()
fp.close()
assert_(isinstance(d['testfunc'], MatlabFunction))
stream = BytesIO()
wtr = MatFile5Writer(stream)
assert_raises(MatWriteError, wtr.put_variables, d)
def test_mat_dtype():
double_eg = pjoin(test_data_path, 'testmatrix_6.1_SOL2.mat')
fp = open(double_eg, 'rb')
rdr = MatFile5Reader(fp, mat_dtype=False)
d = rdr.get_variables()
fp.close()
yield assert_equal, d['testmatrix'].dtype.kind, 'u'
fp = open(double_eg, 'rb')
rdr = MatFile5Reader(fp, mat_dtype=True)
d = rdr.get_variables()
fp.close()
yield assert_equal, d['testmatrix'].dtype.kind, 'f'
def test_sparse_in_struct():
# reproduces bug found by DC where Cython code was insisting on
# ndarray return type, but getting sparse matrix
st = {'sparsefield': SP.coo_matrix(np.eye(4))}
stream = BytesIO()
savemat(stream, {'a':st})
d = loadmat(stream, struct_as_record=True)
yield assert_array_equal, d['a'][0,0]['sparsefield'].todense(), np.eye(4)
def test_mat_struct_squeeze():
stream = BytesIO()
in_d = {'st':{'one':1, 'two':2}}
savemat(stream, in_d)
# no error without squeeze
out_d = loadmat(stream, struct_as_record=False)
# previous error was with squeeze, with mat_struct
out_d = loadmat(stream,
struct_as_record=False,
squeeze_me=True,
)
def test_scalar_squeeze():
stream = BytesIO()
in_d = {'scalar': [[0.1]], 'string': 'my name', 'st':{'one':1, 'two':2}}
savemat(stream, in_d)
out_d = loadmat(stream, squeeze_me=True)
assert_(isinstance(out_d['scalar'], float))
assert_(isinstance(out_d['string'], string_types))
assert_(isinstance(out_d['st'], np.ndarray))
def test_str_round():
# from report by Angus McMorland on mailing list 3 May 2010
stream = BytesIO()
in_arr = np.array(['Hello', 'Foob'])
out_arr = np.array(['Hello', 'Foob '])
savemat(stream, dict(a=in_arr))
res = loadmat(stream)
# resulted in ['HloolFoa', 'elWrdobr']
assert_array_equal(res['a'], out_arr)
stream.truncate(0)
stream.seek(0)
# Make Fortran ordered version of string
in_str = in_arr.tostring(order='F')
in_from_str = np.ndarray(shape=a.shape,
dtype=in_arr.dtype,
order='F',
buffer=in_str)
savemat(stream, dict(a=in_from_str))
assert_array_equal(res['a'], out_arr)
# unicode save did lead to buffer too small error
stream.truncate(0)
stream.seek(0)
in_arr_u = in_arr.astype('U')
out_arr_u = out_arr.astype('U')
savemat(stream, {'a': in_arr_u})
res = loadmat(stream)
assert_array_equal(res['a'], out_arr_u)
def test_fieldnames():
# Check that field names are as expected
stream = BytesIO()
savemat(stream, {'a': {'a':1, 'b':2}})
res = loadmat(stream)
field_names = res['a'].dtype.names
assert_equal(set(field_names), set(('a', 'b')))
def test_loadmat_varnames():
# Test that we can get just one variable from a mat file using loadmat
mat5_sys_names = ['__globals__',
'__header__',
'__version__']
for eg_file, sys_v_names in (
(pjoin(test_data_path, 'testmulti_4.2c_SOL2.mat'), []), (pjoin(
test_data_path, 'testmulti_7.4_GLNX86.mat'), mat5_sys_names)):
vars = loadmat(eg_file)
assert_equal(set(vars.keys()), set(['a', 'theta'] + sys_v_names))
vars = loadmat(eg_file, variable_names='a')
assert_equal(set(vars.keys()), set(['a'] + sys_v_names))
vars = loadmat(eg_file, variable_names=['a'])
assert_equal(set(vars.keys()), set(['a'] + sys_v_names))
vars = loadmat(eg_file, variable_names=['theta'])
assert_equal(set(vars.keys()), set(['theta'] + sys_v_names))
vars = loadmat(eg_file, variable_names=('theta',))
assert_equal(set(vars.keys()), set(['theta'] + sys_v_names))
vars = loadmat(eg_file, variable_names=[])
assert_equal(set(vars.keys()), set(sys_v_names))
vnames = ['theta']
vars = loadmat(eg_file, variable_names=vnames)
assert_equal(vnames, ['theta'])
def test_round_types():
# Check that saving, loading preserves dtype in most cases
arr = np.arange(10)
stream = BytesIO()
for dts in ('f8','f4','i8','i4','i2','i1',
'u8','u4','u2','u1','c16','c8'):
stream.truncate(0)
stream.seek(0) # needed for BytesIO in python 3
savemat(stream, {'arr': arr.astype(dts)})
vars = loadmat(stream)
assert_equal(np.dtype(dts), vars['arr'].dtype)
def test_varmats_from_mat():
# Make a mat file with several variables, write it, read it back
names_vars = (('arr', mlarr(np.arange(10))),
('mystr', mlarr('a string')),
('mynum', mlarr(10)))
# Dict like thing to give variables in defined order
class C(object):
def items(self):
return names_vars
stream = BytesIO()
savemat(stream, C())
varmats = varmats_from_mat(stream)
assert_equal(len(varmats), 3)
for i in range(3):
name, var_stream = varmats[i]
exp_name, exp_res = names_vars[i]
assert_equal(name, exp_name)
res = loadmat(var_stream)
assert_array_equal(res[name], exp_res)
def test_one_by_zero():
# Test 1x0 chars get read correctly
func_eg = pjoin(test_data_path, 'one_by_zero_char.mat')
fp = open(func_eg, 'rb')
rdr = MatFile5Reader(fp)
d = rdr.get_variables()
fp.close()
assert_equal(d['var'].shape, (0,))
def test_load_mat4_le():
# We were getting byte order wrong when reading little-endian floa64 dense
# matrices on big-endian platforms
mat4_fname = pjoin(test_data_path, 'test_mat4_le_floats.mat')
vars = loadmat(mat4_fname)
assert_array_equal(vars['a'], [[0.1, 1.2]])
def test_unicode_mat4():
# Mat4 should save unicode as latin1
bio = BytesIO()
var = {'second_cat': u('Schrödinger')}
savemat(bio, var, format='4')
var_back = loadmat(bio)
assert_equal(var_back['second_cat'], var['second_cat'])
def test_logical_sparse():
# Test we can read logical sparse stored in mat file as bytes.
# See https://github.com/scipy/scipy/issues/3539.
# In some files saved by MATLAB, the sparse data elements (Real Part
# Subelement in MATLAB speak) are stored with apparent type double
# (miDOUBLE) but are in fact single bytes.
filename = pjoin(test_data_path,'logical_sparse.mat')
# Before fix, this would crash with:
# ValueError: indices and data should have the same size
d = loadmat(filename, struct_as_record=True)
log_sp = d['sp_log_5_4']
assert_(isinstance(log_sp, SP.csc_matrix))
assert_equal(log_sp.dtype.type, np.bool_)
assert_array_equal(log_sp.toarray(),
[[True, True, True, False],
[False, False, True, False],
[False, False, True, False],
[False, False, False, False],
[False, False, False, False]])
def test_empty_sparse():
# Can we read empty sparse matrices?
sio = BytesIO()
import scipy.sparse
empty_sparse = scipy.sparse.csr_matrix([[0,0],[0,0]])
savemat(sio, dict(x=empty_sparse))
sio.seek(0)
res = loadmat(sio)
assert_array_equal(res['x'].shape, empty_sparse.shape)
assert_array_equal(res['x'].todense(), 0)
# Do empty sparse matrices get written with max nnz 1?
# See https://github.com/scipy/scipy/issues/4208
sio.seek(0)
reader = MatFile5Reader(sio)
reader.initialize_read()
reader.read_file_header()
hdr, _ = reader.read_var_header()
assert_equal(hdr.nzmax, 1)
def test_empty_mat_error():
# Test we get a specific warning for an empty mat file
sio = BytesIO()
assert_raises(MatReadError, loadmat, sio)
def test_miuint32_compromise():
# Reader should accept miUINT32 for miINT32, but check signs
# mat file with miUINT32 for miINT32, but OK values
filename = pjoin(test_data_path, 'miuint32_for_miint32.mat')
res = loadmat(filename)
assert_equal(res['an_array'], np.arange(10)[None, :])
# mat file with miUINT32 for miINT32, with negative value
filename = pjoin(test_data_path, 'bad_miuint32.mat')
with warnings.catch_warnings(record=True): # Py3k ResourceWarning
assert_raises(ValueError, loadmat, filename)
def test_miutf8_for_miint8_compromise():
# Check reader accepts ascii as miUTF8 for array names
filename = pjoin(test_data_path, 'miutf8_array_name.mat')
res = loadmat(filename)
assert_equal(res['array_name'], [[1]])
# mat file with non-ascii utf8 name raises error
filename = pjoin(test_data_path, 'bad_miutf8_array_name.mat')
with warnings.catch_warnings(record=True): # Py3k ResourceWarning
assert_raises(ValueError, loadmat, filename)
def test_bad_utf8():
# Check that reader reads bad UTF with 'replace' option
filename = pjoin(test_data_path,'broken_utf8.mat')
res = loadmat(filename)
assert_equal(res['bad_string'],
b'\x80 am broken'.decode('utf8', 'replace'))
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | -5,561,465,797,958,377,000 | 33.225833 | 83 | 0.596649 | false |
sschnug/pyeda | pyeda/parsing/pla.py | 5 | 4136 | """
PLA
This is a partial implementation of the Berkeley PLA format.
See extension/espresso/html/espresso.5.html for details.
Exceptions:
Error
Interface Functions:
parse
"""
# Disable 'no-name-in-module', b/c pylint can't look into C extensions
# pylint: disable=E0611
import re
from pyeda.boolalg.espresso import FTYPE, DTYPE, RTYPE
_COMMENT = re.compile(r"^#.*$")
_NINS = re.compile(r"^.i\s+(\d+)$")
_NOUTS = re.compile(r"^.o\s+(\d+)$")
_PROD = re.compile(r"^.p\s+(\d+)$")
_ILB = re.compile(r"^.ilb\s+(\w+(?:\s+\w+)*)$")
_OB = re.compile(r"^.ob\s+(\w+(?:\s+\w+)*)$")
_TYPE = re.compile(r"^.type\s+(f|r|fd|fr|dr|fdr)$")
_CUBE = re.compile(r"^([01-]+)\s+([01-]+)$")
_END = re.compile(r"^.e(?:nd)?$")
_TYPES = {
'f': FTYPE,
'r': RTYPE,
'fd': FTYPE | DTYPE,
'fr': FTYPE | RTYPE,
'dr': DTYPE | RTYPE,
'fdr': FTYPE | DTYPE | RTYPE,
}
_INCODE = {'0': 1, '1': 2, '-': 3}
_OUTCODE = {'0': 0, '1': 1, '-': 2}
class Error(Exception):
"""An error happened during parsing a PLA file"""
def parse(s):
"""
Parse an input string in PLA format,
and return an intermediate representation dict.
Parameters
----------
s : str
String containing a PLA.
Returns
-------
A dict with all PLA information:
=============== ============ =================================
Key Value type Value description
=============== ============ =================================
ninputs int Number of inputs
noutputs int Number of outputs
input_labels list Input variable names
output_labels list Output function names
intype int Cover type: {F, R, FD, FR, DR, FDR}
cover set Implicant table
=============== ============ =================================
"""
d = dict(ninputs=None, noutputs=None,
input_labels=None, output_labels=None,
intype=None, cover=set())
lines = [line.strip() for line in s.splitlines()]
for i, line in enumerate(lines, start=1):
# skip comments
if not line or _COMMENT.match(line):
continue
# .i
m_in = _NINS.match(line)
if m_in:
if d['ninputs'] is None:
d['ninputs'] = int(m_in.group(1))
continue
else:
raise Error(".i declared more than once")
# .o
m_out = _NOUTS.match(line)
if m_out:
if d['noutputs'] is None:
d['noutputs'] = int(m_out.group(1))
continue
else:
raise Error(".o declared more than once")
# ignore .p
m_prod = _PROD.match(line)
if m_prod:
continue
# .ilb
m_ilb = _ILB.match(line)
if m_ilb:
if d['input_labels'] is None:
d['input_labels'] = m_ilb.group(1).split()
continue
else:
raise Error(".ilb declared more than once")
# .ob
m_ob = _OB.match(line)
if m_ob:
if d['output_labels'] is None:
d['output_labels'] = m_ob.group(1).split()
continue
else:
raise Error(".ob declared more than once")
# .type
m_type = _TYPE.match(line)
if m_type:
if d['intype'] is None:
d['intype'] = _TYPES[m_type.group(1)]
continue
else:
raise Error(".type declared more tha once")
# cube
m_cube = _CUBE.match(line)
if m_cube:
inputs, outputs = m_cube.groups()
invec = tuple(_INCODE[c] for c in inputs)
outvec = tuple(_OUTCODE[c] for c in outputs)
d['cover'].add((invec, outvec))
continue
# ignore .e
m_end = _END.match(line)
if m_end:
continue
raise Error("syntax error on line {}: {}".format(i, line))
return d
| bsd-2-clause | 2,750,904,232,134,338,600 | 26.210526 | 75 | 0.465909 | false |
watonyweng/horizon | openstack_dashboard/dashboards/project/images/images/urls.py | 65 | 1264 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.project.images.images import views
VIEWS_MOD = 'openstack_dashboard.dashboards.project.images.images.views'
urlpatterns = patterns(
VIEWS_MOD,
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(r'^(?P<image_id>[^/]+)/update/$',
views.UpdateView.as_view(), name='update'),
url(r'^(?P<image_id>[^/]+)/$', views.DetailView.as_view(), name='detail'),
)
| apache-2.0 | -7,291,472,935,780,299,000 | 36.176471 | 78 | 0.718354 | false |
janeloveless/mechanics-of-exploration | neuromech/util.py | 1 | 11756 | #! /usr/bin/env python
import os
import itertools as it
import sys
import textwrap
#import gtk
import numpy as np
import sympy as sy
import sympy.stats
import odespy as ode
import matplotlib
import matplotlib.pyplot as plt
import sympy.physics.mechanics as mech
"""
Pretty plotting code.
"""
_all_spines = ["top", "right", "bottom", "left"]
def hide_spines(s=["top", "right"]):
"""Hides the top and rightmost axis spines from view for all active
figures and their respective axes."""
global _all_spines
# Retrieve a list of all current figures.
figures = [x for x in matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
for figure in figures:
# Get all Axis instances related to the figure.
for ax in figure.canvas.figure.get_axes():
for spine in _all_spines :
if spine in s :
ax.spines[spine].set_color('none')
if "top" in s and "bottom" in s :
ax.xaxis.set_ticks_position('none')
elif "top" in s :
ax.xaxis.set_ticks_position('bottom')
elif "bottom" in s :
ax.xaxis.set_ticks_position('top')
else :
ax.xaxis.set_ticks_position('both')
if "left" in s and "right" in s :
ax.yaxis.set_ticks_position('none')
elif "left" in s :
ax.yaxis.set_ticks_position('right')
elif "right" in s :
ax.yaxis.set_ticks_position('left')
else :
ax.yaxis.set_ticks_position('both')
"""
FORTRAN compilation code.
"""
def find_matching_parentheses(s, popen="(", pclose=")") :
i_start = s.find(popen)
i_end = -1
count = 0
s_frame = s[i_start:]
for i in xrange(len(s_frame)) :
char = s_frame[i]
if char == popen :
count += 1
elif char == pclose :
count -= 1
if count == 0 :
i_end = i + i_start + 1
break
return i_start, i_end
def parse_merge(H, s) :
"""
Parse the first FORTRAN merge statement found within s.
H is the name of a hidden variable which will be used to store the value of
the piecewise function defined by the merge statement.
"""
# extract bracketed code in merge statement from s
# m_statement is of form "(expr1,expr2,cond)"
i_merge_start = s.find("merge")
ms = s[i_merge_start:]
i_start, i_end = find_matching_parentheses(ms)
m_statement = ms[i_start:i_end]
# print m_statement
# extract expr1, expr2, and conditional
i1 = m_statement.find(",")
i2 = m_statement.rfind(",")
expr1 = m_statement[1:i1]
expr2 = m_statement[i1 + 1:i2]
cond = m_statement[i2 + 1:-1]
# if expr1, expr2, or cond are merge statements, recursively call this
# function otherwise, set the hidden switch variable to take the value of
# the relevant expr
if expr1.find("merge") != -1 :
expr1_str = parse_merge(H, expr1)[-1]
expr1_str = "".join([" " + s + "\n" for s in expr1_str.splitlines()])
else :
expr1_str = " " + H + "=" + expr1
if expr2.find("merge") != -1 :
expr2_str = parse_merge(H, expr2)[-1]
expr2_str = "".join([" " + s + "\n" for s in expr2_str.splitlines()])
else :
expr2_str = " " + H + "=" + expr2
# format expr1_str, expr2_str, and cond into a correct FORTRAN IF-THEN-ELSE
# statement
f_code = " IF (" + cond.strip() + ") THEN \n" + expr1_str + "\n" + \
" ELSE \n" + expr2_str + "\n" + \
" ENDIF \n"
return i_merge_start, i_merge_start + i_end, f_code
def FORTRAN_f(x, f, parameters=[], verbose=False) :
"""
Produce FORTRAN function for evaluating a vector-valued SymPy expression f
given a state vector x.
The FORTRAN function will have the signature f_f77(neq, t, X, Y) where neq
is hidden and Y is an output matrix.
"""
# TODO remove code for dealing with stochastic systems -- it is not used in
# this paper
x = list(x) + list(parameters)
f = list(f) + [0]*len(parameters)
rv = list(set((np.concatenate([sy.stats.random_symbols(f_i) for f_i in f]))))
NR = len(rv)
if NR > 0 :
x += [sy.symbols("dt"), sy.symbols("seed")]
f += [0, 0]
NX = len(x)
NY = len(f)
if NX != NY :
raise Exception("System is not square!")
if verbose : print "generating FORTRAN matrices..."
_X = sy.tensor.IndexedBase("X", shape=(NX, ))
X = [_X[i + 1] for i in xrange(NX)]
_R = sy.tensor.IndexedBase("R", shape=(NR, ))
R = [_R[i + 1] for i in xrange(NR)]
if type(f) != sy.Matrix : f = sy.Matrix(f)
# WARNING : These substitution steps are VERY SLOW!!! It might be wise to
# parallelise them in the future, or at least substitute into one dynamical
# equation at a time so that progress can be monitored.
if verbose : print "substituting matrix elements for original state variables and parameters (WARNING: SLOW)..."
f_sub = f.subs(zip(x, X))
if verbose : print "substituting matrix elements for random variables (WARNING: SLOW)..."
f_sub = f_sub.subs(zip(rv, R))
# generate FORTRAN code
if verbose : print "generating FORTRAN code from dynamics equations..."
fstrs = [sy.fcode(fi, standard=95) for fi in f_sub]
# remove whitespace and newlines
if verbose : print "removing whitespace and newlines..."
fstrs = ["".join(fi.split()) for fi in fstrs]
# remove all @ (FORTRAN line continuation indicator)
if verbose : print "removing line continuations..."
fstrs = [fi.replace("@", "") for fi in fstrs]
# find FORTRAN inline merge statements and replace with a hidden "switch"
# variable whose value is set by a full IF statement at the start of the
# function call.
# -- this is needed because FORTRAN77 doesn't support inline merge statements
Hstrs = [] # to hold hidden switch expressions
if verbose : print "formatting piecewise functions..."
for i in xrange(len(fstrs)) :
while fstrs[i].find("merge") != -1 :
H = "H(" + str(len(Hstrs) + 1) + ")"
i_merge_start, i_merge_end, Hstr = parse_merge(H, fstrs[i])
fstrs[i] = fstrs[i][:i_merge_start] + H + fstrs[i][i_merge_end:]
Hstrs.append(Hstr)
NH = len(Hstrs)
# format the fstrs
wrapper = textwrap.TextWrapper(expand_tabs=True,
replace_whitespace=True,
initial_indent=" ",
subsequent_indent=" @ ",
width=60)
if verbose : print "formatting state equations..."
for i in xrange(len(fstrs)) :
fstrs[i] = wrapper.fill("Y(" + str(i + 1) + ")=" + fstrs[i]) + "\n"
# put the above elements together into a FORTRAN subroutine
if verbose : print "formatting preamble..."
hdr = " subroutine f_f77(neq, t, X, Y) \n" +\
"Cf2py intent(hide) neq \n" +\
"Cf2py intent(out) Y \n" +\
" integer neq \n" +\
" double precision t, X, Y \n" +\
" dimension X(neq), Y(neq) \n"
if NH > 0 : hdr += " real, dimension(" + str(NH) + ") :: H \n"
# TODO fix the following -- assumes dt = 0.01
# NOTE this is only important when dealing with stochastic systems
if NR > 0 : hdr += " real, dimension(" + str(NR) + ") :: R \n" +\
" integer :: SEED \n" +\
" real :: RTRASH \n" +\
" SEED = INT((t/" + sy.fcode(X[-2]).strip() +\
") + " + sy.fcode(X[-1]).strip() + ") \n" +\
" CALL SRAND(SEED) \n" +\
" DO i=1,4 \n" +\
" RTRASH=RAND(0) \n" +\
" END DO \n"
R_block = "".join([sy.fcode(R_i) + "=RAND(0) \n" for R_i in R])
H_block = "".join(Hstrs)
Y_block = "".join(fstrs)
if verbose : print "assembling source code blocks..."
fcode = hdr + R_block + H_block + Y_block + " return \n" + " end \n"
# final formatting
if verbose : print "final source code formatting..."
wrapper = textwrap.TextWrapper(expand_tabs=True, replace_whitespace=True,
initial_indent="", subsequent_indent=" @ ", width=60)
fcode = "".join([wrapper.fill(src) + "\n" for src in fcode.split("\n")])
return fcode
def FORTRAN_jacobian(x, jac, parameters=[]) :
# TODO document
# TODO remove this function if unused in paper
NX = len(x)
NP = len(parameters)
Nrowpd = jac.shape[0]
Ncolpd = jac.shape[1]
if NX != Nrowpd != Ncolpd :
raise Exception("System is not square!")
_X = sy.tensor.IndexedBase("X", shape=(NX, ))
X = [_X[i + 1] for i in xrange(NX)]
X = X + [_X[NX + i + 1] for i in xrange(NP)]
if type(jac) == sy.Matrix : jac = sy.Matrix(jac)
jac_sub = jac.subs(zip(list(x) + list(parameters), X))
ijs = [i for i in it.product(xrange(Nrowpd), xrange(Ncolpd))]
# generate FORTRAN code
fstrs = [sy.fcode(jac_ij) for jac_ij in jac_sub]
# remove whitespace and newlines
fstrs = ["".join(jac_ij.split()) for jac_ij in fstrs]
# remove all @ (FORTRAN line continuation indicator)
fstrs = [jac_ij.replace("@", "") for jac_ij in fstrs]
# find FORTRAN inline merge statements and replace with a hidden "switch"
# variable whose value is set by a full IF statement at the start of the
# function call.
# -- this is needed because FORTRAN77 doesn't support inline merge statements
Hstrs = [] # to hold hidden switch expressions
for i in xrange(len(fstrs)) :
while fstrs[i].find("merge") != -1 :
H = "H(" + str(len(Hstrs) + 1) + ")"
i_merge_start, i_merge_end, Hstr = parse_merge(H, fstrs[i])
fstrs[i] = fstrs[i][:i_merge_start] + H + fstrs[i][i_merge_end:]
Hstrs.append(Hstr)
NH = len(Hstrs)
# format the fstrs
wrapper = textwrap.TextWrapper(expand_tabs=True,
replace_whitespace=True,
initial_indent=" ",
subsequent_indent=" @ ",
width=60)
for k in xrange(len(fstrs)) :
i, j = ijs[k]
fstrs[k] = wrapper.fill("pd(" + str(i + 1) + "," + str(j + 1) + ")=" + fstrs[k]) + "\n"
# put the above elements together into a FORTRAN subroutine
hdr = " subroutine jac_f77(neq, t, X, ml, mu, pd, nrowpd) \n" +\
"Cf2py intent(hide) neq, ml, mu, nrowpd \n" +\
"Cf2py intent(out) pd \n" +\
" integer neq, ml, mu, nrowpd \n" +\
" double precision t, X, pd \n" +\
" dimension X(neq), pd(neq, neq) \n"
if NH > 0 : hdr += " real, dimension(" + str(NH) + ") :: H \n"
H_block = "".join(Hstrs)
pd_block = "".join(fstrs)
fcode = hdr + H_block + pd_block + " return \n" + " end \n"
return fcode
def FORTRAN_compile(fcode) :
f_f77 = ode.compile_f77(fcode)
os.remove("tmp_callback.so")
# reload(ode)
return f_f77
"""
Numerical integration code.
"""
def FORTRAN_integrate(t, x0, f, p0=[], jac=None, rtol=0.0001, atol=0.0001) :
solver = ode.Lsodes(f=None, f_f77=f, jac_f77=jac, rtol=rtol, atol=atol)
solver.set_initial_condition(list(x0) + list(p0))
x, _ = solver.solve(t)
return x
| unlicense | 7,497,102,598,660,154,000 | 35.396285 | 116 | 0.544233 | false |
ms-iot/python | cpython/Lib/test/test_file.py | 83 | 11367 | import sys
import os
import unittest
from array import array
from weakref import proxy
import io
import _pyio as pyio
from test.support import TESTFN, run_unittest
from collections import UserList
class AutoFileTests:
# file tests for which a test file is automatically set up
def setUp(self):
self.f = self.open(TESTFN, 'wb')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(b'teststring')
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testAttributes(self):
# verify expected attributes exist
f = self.f
f.name # merely shouldn't blow up
f.mode # ditto
f.closed # ditto
def testReadinto(self):
# verify readinto
self.f.write(b'12')
self.f.close()
a = array('b', b'x'*10)
self.f = self.open(TESTFN, 'rb')
n = self.f.readinto(a)
self.assertEqual(b'12', a.tobytes()[:n])
def testReadinto_text(self):
# verify readinto refuses text files
a = array('b', b'x'*10)
self.f.close()
self.f = self.open(TESTFN, 'r')
if hasattr(self.f, "readinto"):
self.assertRaises(TypeError, self.f.readinto, a)
def testWritelinesUserList(self):
# verify writelines with instance sequence
l = UserList([b'1', b'2'])
self.f.writelines(l)
self.f.close()
self.f = self.open(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'12')
def testWritelinesIntegers(self):
# verify writelines with integers
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
def testWritelinesIntegersUserList(self):
# verify writelines with integers in UserList
l = UserList([1,2,3])
self.assertRaises(TypeError, self.f.writelines, l)
def testWritelinesNonString(self):
# verify writelines with non-string object
class NonString:
pass
self.assertRaises(TypeError, self.f.writelines,
[NonString(), NonString()])
def testErrors(self):
f = self.f
self.assertEqual(f.name, TESTFN)
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
if hasattr(f, "readinto"):
self.assertRaises((OSError, TypeError), f.readinto, "")
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = [('fileno', ()),
('flush', ()),
('isatty', ()),
('__next__', ()),
('read', ()),
('write', (b"",)),
('readline', ()),
('readlines', ()),
('seek', (0,)),
('tell', ()),
('write', (b"",)),
('writelines', ([],)),
('__iter__', ()),
]
methods.append(('truncate', ()))
# __exit__ should close the file
self.f.__exit__(None, None, None)
self.assertTrue(self.f.closed)
for methodname, args in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method, *args)
# file is closed, __exit__ shouldn't do anything
self.assertEqual(self.f.__exit__(None, None, None), None)
# it must also return None if an exception was given
try:
1/0
except:
self.assertEqual(self.f.__exit__(*sys.exc_info()), None)
def testReadWhenWriting(self):
self.assertRaises(OSError, self.f.read)
class CAutoFileTests(AutoFileTests, unittest.TestCase):
open = io.open
class PyAutoFileTests(AutoFileTests, unittest.TestCase):
open = staticmethod(pyio.open)
class OtherFileTests:
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+"):
try:
f = self.open(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = self.open(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testSetBufferSize(self):
# make sure that explicitly setting the buffer size doesn't cause
# misbehaviour especially with repeated close() calls
for s in (-1, 0, 1, 512):
try:
f = self.open(TESTFN, 'wb', s)
f.write(str(s).encode("ascii"))
f.close()
f.close()
f = self.open(TESTFN, 'rb', s)
d = int(f.read().decode("ascii"))
f.close()
f.close()
except OSError as msg:
self.fail('error setting buffer size %d: %s' % (s, str(msg)))
self.assertEqual(d, s)
def testTruncateOnWindows(self):
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
os.unlink(TESTFN)
f = self.open(TESTFN, 'wb')
try:
f.write(b'12345678901') # 11 bytes
f.close()
f = self.open(TESTFN,'rb+')
data = f.read(5)
if data != b'12345':
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
finally:
f.close()
os.unlink(TESTFN)
def testIteration(self):
# Test the complex interaction when mixing file-iteration and the
# various read* methods.
dataoffset = 16384
filler = b"ham\n"
assert not dataoffset % len(filler), \
"dataoffset must be multiple of len(filler)"
nchunks = dataoffset // len(filler)
testlines = [
b"spam, spam and eggs\n",
b"eggs, spam, ham and spam\n",
b"saussages, spam, spam and eggs\n",
b"spam, ham, spam and eggs\n",
b"spam, spam, spam, spam, spam, ham, spam\n",
b"wonderful spaaaaaam.\n"
]
methods = [("readline", ()), ("read", ()), ("readlines", ()),
("readinto", (array("b", b" "*100),))]
try:
# Prepare the testfile
bag = self.open(TESTFN, "wb")
bag.write(filler * nchunks)
bag.writelines(testlines)
bag.close()
# Test for appropriate errors mixing read* and iteration
for methodname, args in methods:
f = self.open(TESTFN, 'rb')
if next(f) != filler:
self.fail, "Broken testfile"
meth = getattr(f, methodname)
meth(*args) # This simply shouldn't fail
f.close()
# Test to see if harmless (by accident) mixing of read* and
# iteration still works. This depends on the size of the internal
# iteration buffer (currently 8192,) but we can test it in a
# flexible manner. Each line in the bag o' ham is 4 bytes
# ("h", "a", "m", "\n"), so 4096 lines of that should get us
# exactly on the buffer boundary for any power-of-2 buffersize
# between 4 and 16384 (inclusive).
f = self.open(TESTFN, 'rb')
for i in range(nchunks):
next(f)
testline = testlines.pop(0)
try:
line = f.readline()
except ValueError:
self.fail("readline() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("readline() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
buf = array("b", b"\x00" * len(testline))
try:
f.readinto(buf)
except ValueError:
self.fail("readinto() after next() with supposedly empty "
"iteration-buffer failed anyway")
line = buf.tobytes()
if line != testline:
self.fail("readinto() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
try:
line = f.read(len(testline))
except ValueError:
self.fail("read() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("read() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
try:
lines = f.readlines()
except ValueError:
self.fail("readlines() after next() with supposedly empty "
"iteration-buffer failed anyway")
if lines != testlines:
self.fail("readlines() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
f.close()
# Reading after iteration hit EOF shouldn't hurt either
f = self.open(TESTFN, 'rb')
try:
for line in f:
pass
try:
f.readline()
f.readinto(buf)
f.read()
f.readlines()
except ValueError:
self.fail("read* failed after next() consumed file")
finally:
f.close()
finally:
os.unlink(TESTFN)
class COtherFileTests(OtherFileTests, unittest.TestCase):
open = io.open
class PyOtherFileTests(OtherFileTests, unittest.TestCase):
open = staticmethod(pyio.open)
def tearDownModule():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 3,760,448,264,455,755,000 | 33.550152 | 79 | 0.512888 | false |
vv1133/home_web | django/contrib/gis/tests/relatedapp/tests.py | 58 | 14918 | from __future__ import absolute_import
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.tests.utils import HAS_SPATIAL_DB, mysql, oracle, no_mysql, no_oracle, no_spatialite
from django.test import TestCase
from django.utils.unittest import skipUnless
if HAS_GEOS:
from django.contrib.gis.db.models import Collect, Count, Extent, F, Union
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.geos import GEOSGeometry, Point, MultiPoint
from .models import City, Location, DirectoryEntry, Parcel, Book, Author, Article
@skipUnless(HAS_GEOS and HAS_SPATIAL_DB, "Geos and spatial db are required.")
class RelatedGeoModelTest(TestCase):
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.all()
qs2 = City.objects.select_related()
qs3 = City.objects.select_related('location')
# Reference data for what's in the fixtures.
cities = (
('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat), c.location.point)
@no_mysql
def test03_transform_related(self):
"Testing the `transform` GeoQuerySet method on related geographic models."
# All the transformations are to state plane coordinate systems using
# US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot).
tol = 0
def check_pnt(ref, pnt):
self.assertAlmostEqual(ref.x, pnt.x, tol)
self.assertAlmostEqual(ref.y, pnt.y, tol)
self.assertEqual(ref.srid, pnt.srid)
# Each city transformed to the SRID of their state plane coordinate system.
transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'),
('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'),
('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'),
)
for name, srid, wkt in transformed:
# Doing this implicitly sets `select_related` select the location.
# TODO: Fix why this breaks on Oracle.
qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point'))
check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point)
@no_mysql
@no_spatialite
def test04a_related_extent_aggregate(self):
"Testing the `extent` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes New Mexico (Roswell).
all_extent = (-104.528056, 29.763374, -79.460734, 40.18476)
txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476)
e1 = City.objects.extent(field_name='location__point')
e2 = City.objects.exclude(state='NM').extent(field_name='location__point')
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e): self.assertAlmostEqual(ref_val, e_val, tol)
@no_mysql
def test04b_related_union_aggregate(self):
"Testing the `unionagg` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned. Each point # corresponds to City PK.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
p4 = Point(-96.801611, 32.782057)
p5 = Point(-95.363151, 29.763374)
# Creating the reference union geometry depending on the spatial backend,
# as Oracle will have a different internal ordering of the component
# geometries than PostGIS. The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.unionagg()`).
if oracle:
ref_u1 = MultiPoint(p4, p5, p3, p1, p2, srid=4326)
ref_u2 = MultiPoint(p3, p2, srid=4326)
else:
# Looks like PostGIS points by longitude value.
ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.unionagg(field_name='location__point')
u2 = City.objects.exclude(name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth')).unionagg(field_name='location__point')
u3 = aggs['location__point__union']
self.assertEqual(ref_u1, u1)
self.assertEqual(ref_u2, u2)
self.assertEqual(ref_u1, u3)
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
l = list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry('POLYGON((-97.501205 33.052520,-97.501205 33.052576,-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))', srid=4326)
pcity = City.objects.get(name='Aurora')
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
p1 = Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# same (but in different coordinate systems), and this time they
# actually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
p2 = Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
if not mysql:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel, which has the center point equal
# to the point in the City ForeignKey.
qs = Parcel.objects.filter(center1=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
if not mysql:
# This time the city column should be wrapped in transformation SQL.
qs = Parcel.objects.filter(border2__contains=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
def test07_values(self):
"Testing values() and values_list() and GeoQuerySets."
# GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively.
gqs = Location.objects.all()
gvqs = Location.objects.values()
gvlqs = Location.objects.values_list()
# Incrementing through each of the models, dictionaries, and tuples
# returned by the different types of GeoQuerySets.
for m, d, t in zip(gqs, gvqs, gvlqs):
# The values should be Geometry objects and not raw strings returned
# by the spatial database.
self.assertTrue(isinstance(d['point'], Geometry))
self.assertTrue(isinstance(t[1], Geometry))
self.assertEqual(m.point, d['point'])
self.assertEqual(m.point, t[1])
def test08_defer_only(self):
"Testing defer() and only() on Geographic models."
qs = Location.objects.all()
def_qs = Location.objects.defer('point')
for loc, def_loc in zip(qs, def_qs):
self.assertEqual(loc.point, def_loc.point)
def test09_pk_relations(self):
"Ensuring correct primary key column is selected across relations. See #10757."
# The expected ID values -- notice the last two location IDs
# are out of order. Dallas and Houston have location IDs that differ
# from their PKs -- this is done to ensure that the related location
# ID column is selected instead of ID column for the city.
city_ids = (1, 2, 3, 4, 5)
loc_ids = (1, 2, 3, 5, 4)
ids_qs = City.objects.order_by('id').values('id', 'location__id')
for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids):
self.assertEqual(val_dict['id'], c_id)
self.assertEqual(val_dict['location__id'], l_id)
def test10_combine(self):
"Testing the combination of two GeoQuerySets. See #10807."
buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1)
buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1)
qs1 = City.objects.filter(location__point__within=buf1)
qs2 = City.objects.filter(location__point__within=buf2)
combined = qs1 | qs2
names = [c.name for c in combined]
self.assertEqual(2, len(names))
self.assertTrue('Aurora' in names)
self.assertTrue('Kecksburg' in names)
def test11_geoquery_pickle(self):
"Ensuring GeoQuery objects are unpickled correctly. See #10839."
import pickle
from django.contrib.gis.db.models.sql import GeoQuery
qs = City.objects.all()
q_str = pickle.dumps(qs.query)
q = pickle.loads(q_str)
self.assertEqual(GeoQuery, q.__class__)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test12a_count(self):
"Testing `Count` aggregate use with the `GeoManager` on geo-fields."
# The City, 'Fort Worth' uses the same location as Dallas.
dallas = City.objects.get(name='Dallas')
# Count annotation should be 2 for the Dallas location now.
loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id)
self.assertEqual(2, loc.num_cities)
def test12b_count(self):
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
# Should only be one author (Trevor Paglen) returned by this query, and
# the annotation should have 3 for the number of books, see #11087.
# Also testing with a `GeoValuesQuerySet`, see #11489.
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
self.assertEqual(1, len(qs))
self.assertEqual(3, qs[0].num_books)
self.assertEqual(1, len(vqs))
self.assertEqual(3, vqs[0]['num_books'])
def test13c_count(self):
"Testing `Count` aggregate with `.values()`. See #15305."
qs = Location.objects.filter(id=5).annotate(num_cities=Count('city')).values('id', 'point', 'num_cities')
self.assertEqual(1, len(qs))
self.assertEqual(2, qs[0]['num_cities'])
self.assertTrue(isinstance(qs[0]['point'], GEOSGeometry))
# TODO: The phantom model does appear on Oracle.
@no_oracle
def test13_select_related_null_fk(self):
"Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381."
no_author = Book.objects.create(title='Without Author')
b = Book.objects.select_related('author').get(title='Without Author')
# Should be `None`, and not a 'dummy' model.
self.assertEqual(None, b.author)
@no_mysql
@no_oracle
@no_spatialite
def test14_collect(self):
"Testing the `collect` GeoQuerySet method and `Collect` aggregate."
# Reference query:
# SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
# "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
# WHERE "relatedapp_city"."state" = 'TX';
ref_geom = GEOSGeometry('MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,-95.363151 29.763374,-96.801611 32.782057)')
c1 = City.objects.filter(state='TX').collect(field_name='location__point')
c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']
for coll in (c1, c2):
# Even though Dallas and Ft. Worth share same point, Collect doesn't
# consolidate -- that's why 4 points in MultiPoint.
self.assertEqual(4, len(coll))
self.assertEqual(ref_geom, coll)
def test15_invalid_select_related(self):
"Testing doing select_related on the related name manager of a unique FK. See #13934."
qs = Article.objects.select_related('author__article')
# This triggers TypeError when `get_default_columns` has no `local_only`
# keyword. The TypeError is swallowed if QuerySet is actually
# evaluated as list generation swallows TypeError in CPython.
sql = str(qs.query)
def test16_annotated_date_queryset(self):
"Ensure annotated date querysets work if spatial backend is used. See #14648."
birth_years = [dt.year for dt in
list(Author.objects.annotate(num_books=Count('books')).dates('dob', 'year'))]
birth_years.sort()
self.assertEqual([1950, 1974], birth_years)
# TODO: Related tests for KML, GML, and distance lookups.
| bsd-3-clause | -6,670,540,781,343,956,000 | 47.911475 | 155 | 0.639362 | false |
google/contentbox | third_party/modeltranslation/manager.py | 5 | 13076 | # -*- coding: utf-8 -*-
"""
The idea of MultilingualManager is taken from
django-linguo by Zach Mathew
https://github.com/zmathew/django-linguo
"""
from django.db import models
from django.db.models import FieldDoesNotExist
from django.db.models.fields.related import RelatedField, RelatedObject
from django.db.models.sql.where import Constraint
from django.utils.tree import Node
from modeltranslation import settings
from modeltranslation.fields import TranslationField
from modeltranslation.utils import (build_localized_fieldname, get_language,
auto_populate)
def get_translatable_fields_for_model(model):
from modeltranslation.translator import NotRegistered, translator
try:
return translator.get_options_for_model(model).get_field_names()
except NotRegistered:
return None
def rewrite_lookup_key(model, lookup_key):
pieces = lookup_key.split('__', 1)
original_key = pieces[0]
translatable_fields = get_translatable_fields_for_model(model)
if translatable_fields is not None:
# If we are doing a lookup on a translatable field,
# we want to rewrite it to the actual field name
# For example, we want to rewrite "name__startswith" to "name_fr__startswith"
if pieces[0] in translatable_fields:
pieces[0] = build_localized_fieldname(pieces[0], get_language())
if len(pieces) > 1:
# Check if we are doing a lookup to a related trans model
fields_to_trans_models = get_fields_to_translatable_models(model)
for field_to_trans, transmodel in fields_to_trans_models:
# Check ``original key``, as pieces[0] may have been already rewritten.
if original_key == field_to_trans:
pieces[1] = rewrite_lookup_key(transmodel, pieces[1])
break
return '__'.join(pieces)
def rewrite_order_lookup_key(model, lookup_key):
if lookup_key.startswith('-'):
return '-' + rewrite_lookup_key(model, lookup_key[1:])
else:
return rewrite_lookup_key(model, lookup_key)
_F2TM_CACHE = {}
def get_fields_to_translatable_models(model):
if model not in _F2TM_CACHE:
results = []
for field_name in model._meta.get_all_field_names():
field_object, modelclass, direct, m2m = model._meta.get_field_by_name(field_name)
# Direct relationship
if direct and isinstance(field_object, RelatedField):
if get_translatable_fields_for_model(field_object.related.parent_model) is not None:
results.append((field_name, field_object.related.parent_model))
# Reverse relationship
if isinstance(field_object, RelatedObject):
if get_translatable_fields_for_model(field_object.model) is not None:
results.append((field_name, field_object.model))
_F2TM_CACHE[model] = results
return _F2TM_CACHE[model]
_C2F_CACHE = {}
def get_field_by_colum_name(model, col):
# First, try field with the column name
try:
field = model._meta.get_field(col)
if field.column == col:
return field
except FieldDoesNotExist:
pass
field = _C2F_CACHE.get((model, col), None)
if field:
return field
# D'oh, need to search through all of them.
for field in model._meta.fields:
if field.column == col:
_C2F_CACHE[(model, col)] = field
return field
assert False, "No field found for column %s" % col
class MultilingualQuerySet(models.query.QuerySet):
def __init__(self, *args, **kwargs):
super(MultilingualQuerySet, self).__init__(*args, **kwargs)
self._post_init()
def _post_init(self):
self._rewrite = True
self._populate = None
if self.model and (not self.query.order_by):
if self.model._meta.ordering:
# If we have default ordering specified on the model, set it now so that
# it can be rewritten. Otherwise sql.compiler will grab it directly from _meta
ordering = []
for key in self.model._meta.ordering:
ordering.append(rewrite_order_lookup_key(self.model, key))
self.query.add_ordering(*ordering)
# This method was not present in django-linguo
def _clone(self, klass=None, *args, **kwargs):
if klass is not None and not issubclass(klass, MultilingualQuerySet):
class NewClass(klass, MultilingualQuerySet):
pass
NewClass.__name__ = 'Multilingual%s' % klass.__name__
klass = NewClass
kwargs.setdefault('_rewrite', self._rewrite)
kwargs.setdefault('_populate', self._populate)
return super(MultilingualQuerySet, self)._clone(klass, *args, **kwargs)
# This method was not present in django-linguo
def rewrite(self, mode=True):
return self._clone(_rewrite=mode)
# This method was not present in django-linguo
def populate(self, mode='all'):
"""
Overrides the translation fields population mode for this query set.
"""
return self._clone(_populate=mode)
def _rewrite_applied_operations(self):
"""
Rewrite fields in already applied filters/ordering.
Useful when converting any QuerySet into MultilingualQuerySet.
"""
self._rewrite_where(self.query.where)
self._rewrite_where(self.query.having)
self._rewrite_order()
def _rewrite_where(self, q):
"""
Rewrite field names inside WHERE tree.
"""
if isinstance(q, tuple) and isinstance(q[0], Constraint):
c = q[0]
if c.field is None:
c.field = get_field_by_colum_name(self.model, c.col)
new_name = rewrite_lookup_key(self.model, c.field.name)
if c.field.name != new_name:
c.field = self.model._meta.get_field(new_name)
c.col = c.field.column
if isinstance(q, Node):
for child in q.children:
self._rewrite_where(child)
def _rewrite_order(self):
self.query.order_by = [rewrite_order_lookup_key(self.model, field_name)
for field_name in self.query.order_by]
# This method was not present in django-linguo
def _rewrite_q(self, q):
"""Rewrite field names inside Q call."""
if isinstance(q, tuple) and len(q) == 2:
return rewrite_lookup_key(self.model, q[0]), q[1]
if isinstance(q, Node):
q.children = list(map(self._rewrite_q, q.children))
return q
# This method was not present in django-linguo
def _rewrite_f(self, q):
"""
Rewrite field names inside F call.
"""
if isinstance(q, models.F):
q.name = rewrite_lookup_key(self.model, q.name)
return q
if isinstance(q, Node):
q.children = list(map(self._rewrite_f, q.children))
return q
def _filter_or_exclude(self, negate, *args, **kwargs):
if not self._rewrite:
return super(MultilingualQuerySet, self)._filter_or_exclude(negate, *args, **kwargs)
args = map(self._rewrite_q, args)
for key, val in kwargs.items():
new_key = rewrite_lookup_key(self.model, key)
del kwargs[key]
kwargs[new_key] = self._rewrite_f(val)
return super(MultilingualQuerySet, self)._filter_or_exclude(negate, *args, **kwargs)
def _get_original_fields(self):
return [f.attname for f in self.model._meta.fields if not isinstance(f, TranslationField)]
def order_by(self, *field_names):
"""
Change translatable field names in an ``order_by`` argument
to translation fields for the current language.
"""
if not self._rewrite:
return super(MultilingualQuerySet, self).order_by(*field_names)
new_args = []
for key in field_names:
new_args.append(rewrite_order_lookup_key(self.model, key))
return super(MultilingualQuerySet, self).order_by(*new_args)
def update(self, **kwargs):
if not self._rewrite:
return super(MultilingualQuerySet, self).update(**kwargs)
for key, val in kwargs.items():
new_key = rewrite_lookup_key(self.model, key)
del kwargs[key]
kwargs[new_key] = self._rewrite_f(val)
return super(MultilingualQuerySet, self).update(**kwargs)
update.alters_data = True
# This method was not present in django-linguo
@property
def _populate_mode(self):
# Populate can be set using a global setting or a manager method.
if self._populate is None:
return settings.AUTO_POPULATE
return self._populate
# This method was not present in django-linguo
def create(self, **kwargs):
"""
Allows to override population mode with a ``populate`` method.
"""
with auto_populate(self._populate_mode):
return super(MultilingualQuerySet, self).create(**kwargs)
# This method was not present in django-linguo
def get_or_create(self, **kwargs):
"""
Allows to override population mode with a ``populate`` method.
"""
with auto_populate(self._populate_mode):
return super(MultilingualQuerySet, self).get_or_create(**kwargs)
def _append_translated(self, fields):
"If translated field is encountered, add also all its translation fields."
fields = set(fields)
from modeltranslation.translator import translator
opts = translator.get_options_for_model(self.model)
for key, translated in opts.fields.items():
if key in fields:
fields = fields.union(f.name for f in translated)
return fields
# This method was not present in django-linguo
def defer(self, *fields):
fields = self._append_translated(fields)
return super(MultilingualQuerySet, self).defer(*fields)
# This method was not present in django-linguo
def only(self, *fields):
fields = self._append_translated(fields)
return super(MultilingualQuerySet, self).only(*fields)
# This method was not present in django-linguo
def raw_values(self, *fields):
return super(MultilingualQuerySet, self).values(*fields)
# This method was not present in django-linguo
def values(self, *fields):
if not self._rewrite:
return super(MultilingualQuerySet, self).values(*fields)
if not fields:
# Emulate original queryset behaviour: get all fields that are not translation fields
fields = self._get_original_fields()
new_args = []
for key in fields:
new_args.append(rewrite_lookup_key(self.model, key))
vqs = super(MultilingualQuerySet, self).values(*new_args)
vqs.field_names = list(fields)
return vqs
# This method was not present in django-linguo
def values_list(self, *fields, **kwargs):
if not self._rewrite:
return super(MultilingualQuerySet, self).values_list(*fields, **kwargs)
if not fields:
# Emulate original queryset behaviour: get all fields that are not translation fields
fields = self._get_original_fields()
new_args = []
for key in fields:
new_args.append(rewrite_lookup_key(self.model, key))
return super(MultilingualQuerySet, self).values_list(*new_args, **kwargs)
# This method was not present in django-linguo
def dates(self, field_name, *args, **kwargs):
if not self._rewrite:
return super(MultilingualQuerySet, self).dates(field_name, *args, **kwargs)
new_key = rewrite_lookup_key(self.model, field_name)
return super(MultilingualQuerySet, self).dates(new_key, *args, **kwargs)
class MultilingualManager(models.Manager):
use_for_related_fields = True
def rewrite(self, *args, **kwargs):
return self.get_queryset().rewrite(*args, **kwargs)
def populate(self, *args, **kwargs):
return self.get_queryset().populate(*args, **kwargs)
def raw_values(self, *args, **kwargs):
return self.get_queryset().raw_values(*args, **kwargs)
def get_queryset(self):
if hasattr(super(MultilingualManager, self), 'get_queryset'):
qs = super(MultilingualManager, self).get_queryset()
else: # Django 1.4 / 1.5 compat
qs = super(MultilingualManager, self).get_query_set()
if qs.__class__ == models.query.QuerySet:
qs.__class__ = MultilingualQuerySet
else:
class NewClass(qs.__class__, MultilingualQuerySet):
pass
NewClass.__name__ = 'Multilingual%s' % qs.__class__.__name__
qs.__class__ = NewClass
qs._post_init()
qs._rewrite_applied_operations()
return qs
get_query_set = get_queryset
| apache-2.0 | 5,549,695,872,246,741,000 | 38.267267 | 100 | 0.62282 | false |
payjp/payjp-python | payjp/test/helper.py | 1 | 3831 | import datetime
import json
import os
import random
import re
import string
import unittest
from mock import patch, Mock
from six import string_types
import payjp
NOW = datetime.datetime.now()
DUMMY_CARD = {
'number': '4242424242424242',
'exp_month': NOW.month,
'exp_year': NOW.year + 4
}
DUMMY_CHARGE = {
'amount': 100,
'currency': 'jpy',
'card': DUMMY_CARD
}
DUMMY_PLAN = {
'amount': 2000,
'interval': 'month',
'name': 'Amazing Gold Plan',
'currency': 'jpy',
'id': ('payjp-test-gold-' +
''.join(random.choice(string.ascii_lowercase) for x in range(10)))
}
DUMMY_TRANSFER = {
'amount': 400,
'currency': 'jpy',
'recipient': 'self'
}
class PayjpTestCase(unittest.TestCase):
RESTORE_ATTRIBUTES = ('api_version', 'api_key', 'max_retry', 'retry_initial_delay', 'retry_max_delay')
def setUp(self):
super(PayjpTestCase, self).setUp()
self._payjp_original_attributes = {}
for attr in self.RESTORE_ATTRIBUTES:
self._payjp_original_attributes[attr] = getattr(payjp, attr)
api_base = os.environ.get('PAYJP_API_BASE')
if api_base:
payjp.api_base = api_base
payjp.api_key = os.environ.get(
'PAYJP_API_KEY', 'sk_test_c62fade9d045b54cd76d7036')
def tearDown(self):
super(PayjpTestCase, self).tearDown()
for attr in self.RESTORE_ATTRIBUTES:
setattr(payjp, attr, self._payjp_original_attributes[attr])
# Python < 2.7 compatibility
def assertRaisesRegexp(self, exception, regexp, callable, *args, **kwargs):
try:
callable(*args, **kwargs)
except exception as err:
if regexp is None:
return True
if isinstance(regexp, string_types):
regexp = re.compile(regexp)
if not regexp.search(str(err)):
raise self.failureException('"%s" does not match "%s"' %
(regexp.pattern, str(err)))
else:
raise self.failureException(
'%s was not raised' % (exception.__name__,))
class PayjpUnitTestCase(PayjpTestCase):
REQUEST_LIBRARIES = ['requests']
def setUp(self):
super(PayjpUnitTestCase, self).setUp()
self.request_patchers = {}
self.request_mocks = {}
for lib in self.REQUEST_LIBRARIES:
patcher = patch("payjp.http_client.%s" % (lib,))
self.request_mocks[lib] = patcher.start()
self.request_patchers[lib] = patcher
def tearDown(self):
super(PayjpUnitTestCase, self).tearDown()
for patcher in self.request_patchers.values():
patcher.stop()
class PayjpApiTestCase(PayjpTestCase):
def setUp(self):
super(PayjpApiTestCase, self).setUp()
self.requestor_patcher = patch('payjp.api_requestor.APIRequestor')
self.requestor_class_mock = self.requestor_patcher.start()
self.requestor_mock = self.requestor_class_mock.return_value
def tearDown(self):
super(PayjpApiTestCase, self).tearDown()
self.requestor_patcher.stop()
def mock_response(self, res):
self.requestor_mock.request = Mock(return_value=(res, 'reskey'))
class MyResource(payjp.resource.APIResource):
pass
class MyListable(payjp.resource.ListableAPIResource):
pass
class MyCreatable(payjp.resource.CreateableAPIResource):
pass
class MyUpdateable(payjp.resource.UpdateableAPIResource):
pass
class MyDeletable(payjp.resource.DeletableAPIResource):
pass
class MyComposite(payjp.resource.ListableAPIResource,
payjp.resource.CreateableAPIResource,
payjp.resource.UpdateableAPIResource,
payjp.resource.DeletableAPIResource):
pass
| mit | 9,127,578,366,974,042,000 | 24.885135 | 106 | 0.623858 | false |
millaguie/Vernam | vernam/message.py | 1 | 4778 | # -*- coding: utf-8 -*-
"""
Message module holds all methods to work with message files
"""
import sys
import os
import array
from struct import pack
from struct import unpack
import uuid
import hashlib
import keymanagement
import yaml
import ownbase32
from util import hashSum
L2RHEADER = bytearray([222, 210, 7, 163, 100])
R2LHEADER = bytearray([222, 210, 7, 163, 101])
def readMessage(keyPath, messagePath):
"""
This function reads a message (envelope) in the defined format, and
returns the data inside the file, offset in key file and the reading mode
for the key.
It checks that message key UUID matchs defined key UUID, also checks
consistency of message via sha512
Args:
* keyPath: path to the file used as key
* messagePath: path to the file used to read the message
Returns:
Data inside the envelope
"""
keyUUID = keymanagement.getCatalogUUID(keyPath)
with open(messagePath, "rb") as file:
header = bytearray(unpack(">iiiii", file.read(5*4)))
if header == L2RHEADER:
L2R = True
elif header == R2LHEADER:
L2R = False
else:
raise ValueError("File format unknown")
msgSize = unpack(">Q", file.read(8))[0]
offsetInKey = unpack(">Q", file.read(8))
msgKeyUUID1, msgKeyUUID2 = unpack(">QQ", file.read(16))
msgKeyUUID = (msgKeyUUID1 << 64) | msgKeyUUID2
if keyUUID.int != msgKeyUUID:
raise ValueError("Bad Key UUID")
message = unpack(">{}s".format(msgSize), file.read(msgSize))[0]
msgFileHash = file.read()
if hashSum(message) != msgFileHash.encode("hex"):
raise ValueError("Failed to hash message ")
return offsetInKey, L2R, message
def writeHumanMessage(outputPath, message, seek):
"""
This function writes a message in the human friendly format.
Format of the message is as follows:
offset#message
Args:
* outputPath: path to the new message file
* message: message to write in the file
* seek: offset in key to decrypt message
Returns:
None
"""
with open(outputPath, "w") as f:
f.write("{}#{}".format(seek, ownbase32.ba2ob32string(message)))
def readHumanMessage(inputPath):
"""
This function reads a message in the human friendly format.
Function will return two elements, key offset and the
encrypted message.
Args:
* inputPath: path to the message file to read
Returns:
An array:
* Offset in the key
* Encrypted message
"""
with open(inputPath, "r") as f:
s = f.read()
s = s.split("#")
return int(s[0]), s[1]
def writeMessage(keyPath, messagePath, ciphered, offsetInKey, l2r=True):
"""
This function Writes a message in the defined format. Format of the message is as follows:
* Header 20 bytes, as defined, two options, one for R2L and another for L2R it's on the todo.
* Message size in 8 bytes (64 bits) integer
* Key UUID used in message 16 bytes
* Message it's self, it's size is defined in the second field
* Hash of the message, 32 bytes a sha512
Args:
* keyPath: path to the file used as key
* messagePath: path to the file used to store the message
* ciphered: ciphered data to write in the file (envelope)
* offsetInKey: need to jump to this byte in key to decrypt
* l2r: Indicates if the key will need to be readed R2L or L2R (True)
Returns:
None
"""
keyUUID = keymanagement.getCatalogUUID(keyPath)
msgSize = len(ciphered)
with open(messagePath, "wb") as file:
max_int64 = 0xFFFFFFFFFFFFFFFF
# Write file header right to left or left to right
if l2r is True:
file.write(pack(">iiiii", *L2RHEADER))
else:
file.write(pack(">iiiii", *R2LHEADER))
offsetInKey = offsetInKey + msgSize
# Write menssage size in bytes
file.write(pack(">Q", msgSize))
# Write offset in key to decrypt message
file.write(pack(">Q", offsetInKey))
# Write Key UUID for easy key management
file.write(pack('>QQ', (keyUUID.int >> 64) & max_int64,
keyUUID.int & max_int64))
#write message it's self
ciphered = str(ciphered)
file.write(pack(">{}s".format(msgSize), ciphered))
# Get hash for the message
msgHash = hashSum(ciphered)
msgHashint = msgHash.decode("hex")
msgHashArray = bytearray(msgHashint)
hashSize = msgHashArray.count(msgHashArray)
print("ESCRITURA -> offset: {}, L2R: {}".format(offsetInKey, l2r) )
file.write(msgHashArray)
| bsd-3-clause | 4,168,946,275,354,322,000 | 32.412587 | 101 | 0.626831 | false |
dhruvagarwal/django | django/utils/_os.py | 502 | 3581 | from __future__ import unicode_literals
import os
import sys
import tempfile
from os.path import abspath, dirname, isabs, join, normcase, normpath, sep
from django.core.exceptions import SuspiciousFileOperation
from django.utils import six
from django.utils.encoding import force_text
if six.PY2:
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
# Under Python 2, define our own abspath function that can handle joining
# unicode paths to a current working directory that has non-ASCII characters
# in it. This isn't necessary on Windows since the Windows version of abspath
# handles this correctly. It also handles drive letters differently than the
# pure Python implementation, so it's best not to replace it.
if six.PY3 or os.name == 'nt':
abspathu = abspath
else:
def abspathu(path):
"""
Version of os.path.abspath that uses the unicode representation
of the current working directory, thus avoiding a UnicodeDecodeError
in join when the cwd has non-ASCII characters.
"""
if not isabs(path):
path = join(os.getcwdu(), path)
return normpath(path)
def upath(path):
"""
Always return a unicode path.
"""
if six.PY2 and not isinstance(path, six.text_type):
return path.decode(fs_encoding)
return path
def npath(path):
"""
Always return a native path, that is unicode on Python 3 and bytestring on
Python 2.
"""
if six.PY2 and not isinstance(path, bytes):
return path.encode(fs_encoding)
return path
def safe_join(base, *paths):
"""
Joins one or more path components to the base path component intelligently.
Returns a normalized, absolute version of the final path.
The final path must be located inside of the base path component (otherwise
a ValueError is raised).
"""
base = force_text(base)
paths = [force_text(p) for p in paths]
final_path = abspathu(join(base, *paths))
base_path = abspathu(base)
# Ensure final_path starts with base_path (using normcase to ensure we
# don't false-negative on case insensitive operating systems like Windows),
# further, one of the following conditions must be true:
# a) The next character is the path separator (to prevent conditions like
# safe_join("/dir", "/../d"))
# b) The final path must be the same as the base path.
# c) The base path must be the most root path (meaning either "/" or "C:\\")
if (not normcase(final_path).startswith(normcase(base_path + sep)) and
normcase(final_path) != normcase(base_path) and
dirname(normcase(base_path)) != normcase(base_path)):
raise SuspiciousFileOperation(
'The joined path ({}) is located outside of the base path '
'component ({})'.format(final_path, base_path))
return final_path
def symlinks_supported():
"""
A function to check if creating symlinks are supported in the
host platform and/or if they are allowed to be created (e.g.
on Windows it requires admin permissions).
"""
tmpdir = tempfile.mkdtemp()
original_path = os.path.join(tmpdir, 'original')
symlink_path = os.path.join(tmpdir, 'symlink')
os.makedirs(original_path)
try:
os.symlink(original_path, symlink_path)
supported = True
except (OSError, NotImplementedError, AttributeError):
supported = False
else:
os.remove(symlink_path)
finally:
os.rmdir(original_path)
os.rmdir(tmpdir)
return supported
| bsd-3-clause | 6,270,224,937,032,313,000 | 34.107843 | 81 | 0.677464 | false |
leejir/darkforce | juggle/codegen/deletenote.py | 3 | 1137 | # 2014-12-17
# build by qianqians
# deletenote
def deletenote(filestr):
genfilestr = []
count = 0
errornote = ""
for i in xrange(len(filestr)):
str = filestr[i]
while(1):
if count == 1:
indexafter = str.find("*/")
if indexafter is not -1:
str = str[indexafter+2:]
count = 0
else:
break
index = str.find('//')
if index is not -1:
str = str[0:index]
else:
indexbegin = str.find("/*")
if indexbegin is not -1:
errornote = str
indexafter = str.find("*/")
if indexafter is not -1:
str = str[0:indexbegin] + str[indexafter+2:]
else:
count = 1
break
if str is not "":
genfilestr.append(str)
break
if count is 1:
raise Exception("c/c++ coding error unpaired /* ", errornote)
return genfilestr
| gpl-3.0 | 7,870,676,413,077,104,000 | 24.840909 | 69 | 0.40985 | false |
Ivoz/pip | pip/_vendor/requests/packages/urllib3/util.py | 248 | 21407 | # urllib3/util.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from base64 import b64encode
from binascii import hexlify, unhexlify
from collections import namedtuple
from hashlib import md5, sha1
from socket import error as SocketError, _GLOBAL_DEFAULT_TIMEOUT
import time
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
try: # Test for SSL features
SSLContext = None
HAS_SNI = False
import ssl
from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
from ssl import SSLContext # Modern SSL?
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
from .packages import six
from .exceptions import LocationParseError, SSLError, TimeoutStateError
_Default = object()
# The default timeout to use for socket connections. This is the attribute used
# by httplib to define the default timeout
def current_time():
"""
Retrieve the current time, this function is mocked out in unit testing.
"""
return time.time()
class Timeout(object):
"""
Utility object for storing timeout values.
Example usage:
.. code-block:: python
timeout = urllib3.util.Timeout(connect=2.0, read=7.0)
pool = HTTPConnectionPool('www.google.com', 80, timeout=timeout)
pool.request(...) # Etc, etc
:param connect:
The maximum amount of time to wait for a connection attempt to a server
to succeed. Omitting the parameter will default the connect timeout to
the system default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time to wait between consecutive
read operations for a response from the server. Omitting
the parameter will default the read timeout to the system
default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response. Specifically, Python's DNS resolver does not obey the
timeout specified on the socket. Other factors that can affect total
request time include high CPU load, high swap, the program running at a
low priority level, or other behaviors. The observed running time for
urllib3 to return a response may be greater than the value passed to
`total`.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not ever trigger, even though the request will
take several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, 'connect')
self._read = self._validate_timeout(read, 'read')
self.total = self._validate_timeout(total, 'total')
self._start_connect = None
def __str__(self):
return '%s(connect=%r, read=%r, total=%r)' % (
type(self).__name__, self._connect, self._read, self.total)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is used
for clear error messages
:return: the value
:raises ValueError: if the type is not an integer or a float, or if it
is a numeric value less than zero
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
try:
float(value)
except (TypeError, ValueError):
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
try:
if value < 0:
raise ValueError("Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than 0." % (name, value))
except TypeError: # Python 3
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value passed
to this function.
:param timeout: The legacy timeout value
:type timeout: integer, float, sentinel default object, or None
:return: a Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read,
total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: the elapsed time
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: the connect timeout
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: the value to use for the read timeout
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (self.total is not None and
self.total is not self.DEFAULT_TIMEOUT and
self._read is not None and
self._read is not self.DEFAULT_TIMEOUT):
# in case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(),
self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'])):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, query=None, fragment=None):
return super(Url, cls).__new__(cls, scheme, auth, host, port, path, query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example: ::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx+1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example: ::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers.
if not port.isdigit():
raise LocationParseError("Failed to parse: %s" % url)
port = int(port)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
Example: ::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = 'gzip,deflate'
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(six.b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(six.b(proxy_basic_auth)).decode('utf-8')
return headers
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if not sock: # Platform-specific: AppEngine
return False
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except SocketError:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
# Maps the length of a digest to a possible hash function producing
# this digest.
hashfunc_map = {
16: md5,
20: sha1
}
fingerprint = fingerprint.replace(':', '').lower()
digest_length, rest = divmod(len(fingerprint), 2)
if rest or digest_length not in hashfunc_map:
raise SSLError('Fingerprint is of invalid length.')
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
hashfunc = hashfunc_map[digest_length]
cert_digest = hashfunc(cert).digest()
if not cert_digest == fingerprint_bytes:
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
.format(hexlify(fingerprint_bytes),
hexlify(cert_digest)))
def is_fp_closed(obj):
"""
Checks whether a given file-like object is closed.
:param obj:
The file-like object to check.
"""
if hasattr(obj, 'fp'):
# Object is a container for another file-like object that gets released
# on exhaustion (e.g. HTTPResponse)
return obj.fp is None
return obj.closed
if SSLContext is not None: # Python 3.2+
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
"""
All arguments except `server_hostname` have the same meaning as for
:func:`ssl.wrap_socket`
:param server_hostname:
Hostname of the expected certificate
"""
context = SSLContext(ssl_version)
context.verify_mode = cert_reqs
# Disable TLS compression to migitate CRIME attack (issue #309)
OP_NO_COMPRESSION = 0x20000
context.options |= OP_NO_COMPRESSION
if ca_certs:
try:
context.load_verify_locations(ca_certs)
# Py32 raises IOError
# Py33 raises FileNotFoundError
except Exception as e: # Reraise as SSLError
raise SSLError(e)
if certfile:
# FIXME: This block needs a test.
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
return context.wrap_socket(sock, server_hostname=server_hostname)
return context.wrap_socket(sock)
else: # Python 3.1 and earlier
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
return wrap_socket(sock, keyfile=keyfile, certfile=certfile,
ca_certs=ca_certs, cert_reqs=cert_reqs,
ssl_version=ssl_version)
| mit | 4,341,497,358,461,269,500 | 32.035494 | 105 | 0.614332 | false |
mohamed--abdel-maksoud/chromium.src | mojo/public/third_party/jinja2/environment.py | 614 | 47244 | # -*- coding: utf-8 -*-
"""
jinja2.environment
~~~~~~~~~~~~~~~~~~
Provides a class that holds runtime and parsing time options.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from jinja2 import nodes
from jinja2.defaults import BLOCK_START_STRING, \
BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
DEFAULT_FILTERS, DEFAULT_TESTS, DEFAULT_NAMESPACE, \
KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
from jinja2.lexer import get_lexer, TokenStream
from jinja2.parser import Parser
from jinja2.nodes import EvalContext
from jinja2.optimizer import optimize
from jinja2.compiler import generate
from jinja2.runtime import Undefined, new_context
from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \
TemplatesNotFound, TemplateRuntimeError
from jinja2.utils import import_string, LRUCache, Markup, missing, \
concat, consume, internalcode
from jinja2._compat import imap, ifilter, string_types, iteritems, \
text_type, reraise, implements_iterator, implements_to_string, \
get_next, encode_filename, PY2, PYPY
from functools import reduce
# for direct template usage we have up to ten living environments
_spontaneous_environments = LRUCache(10)
# the function to create jinja traceback objects. This is dynamically
# imported on the first exception in the exception handler.
_make_traceback = None
def get_spontaneous_environment(*args):
"""Return a new spontaneous environment. A spontaneous environment is an
unnamed and unaccessible (in theory) environment that is used for
templates generated from a string and not from the file system.
"""
try:
env = _spontaneous_environments.get(args)
except TypeError:
return Environment(*args)
if env is not None:
return env
_spontaneous_environments[args] = env = Environment(*args)
env.shared = True
return env
def create_cache(size):
"""Return the cache class for the given size."""
if size == 0:
return None
if size < 0:
return {}
return LRUCache(size)
def copy_cache(cache):
"""Create an empty copy of the given cache."""
if cache is None:
return None
elif type(cache) is dict:
return {}
return LRUCache(cache.capacity)
def load_extensions(environment, extensions):
"""Load the extensions from the list and bind it to the environment.
Returns a dict of instantiated environments.
"""
result = {}
for extension in extensions:
if isinstance(extension, string_types):
extension = import_string(extension)
result[extension.identifier] = extension(environment)
return result
def _environment_sanity_check(environment):
"""Perform a sanity check on the environment."""
assert issubclass(environment.undefined, Undefined), 'undefined must ' \
'be a subclass of undefined because filters depend on it.'
assert environment.block_start_string != \
environment.variable_start_string != \
environment.comment_start_string, 'block, variable and comment ' \
'start strings must be different'
assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
'newline_sequence set to unknown line ending string.'
return environment
class Environment(object):
r"""The core component of Jinja is the `Environment`. It contains
important shared variables like configuration, filters, tests,
globals and others. Instances of this class may be modified if
they are not shared and if no template was loaded so far.
Modifications on environments after the first template was loaded
will lead to surprising effects and undefined behavior.
Here the possible initialization parameters:
`block_start_string`
The string marking the begin of a block. Defaults to ``'{%'``.
`block_end_string`
The string marking the end of a block. Defaults to ``'%}'``.
`variable_start_string`
The string marking the begin of a print statement.
Defaults to ``'{{'``.
`variable_end_string`
The string marking the end of a print statement. Defaults to
``'}}'``.
`comment_start_string`
The string marking the begin of a comment. Defaults to ``'{#'``.
`comment_end_string`
The string marking the end of a comment. Defaults to ``'#}'``.
`line_statement_prefix`
If given and a string, this will be used as prefix for line based
statements. See also :ref:`line-statements`.
`line_comment_prefix`
If given and a string, this will be used as prefix for line based
based comments. See also :ref:`line-statements`.
.. versionadded:: 2.2
`trim_blocks`
If this is set to ``True`` the first newline after a block is
removed (block, not variable tag!). Defaults to `False`.
`lstrip_blocks`
If this is set to ``True`` leading spaces and tabs are stripped
from the start of a line to a block. Defaults to `False`.
`newline_sequence`
The sequence that starts a newline. Must be one of ``'\r'``,
``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a
useful default for Linux and OS X systems as well as web
applications.
`keep_trailing_newline`
Preserve the trailing newline when rendering templates.
The default is ``False``, which causes a single newline,
if present, to be stripped from the end of the template.
.. versionadded:: 2.7
`extensions`
List of Jinja extensions to use. This can either be import paths
as strings or extension classes. For more information have a
look at :ref:`the extensions documentation <jinja-extensions>`.
`optimized`
should the optimizer be enabled? Default is `True`.
`undefined`
:class:`Undefined` or a subclass of it that is used to represent
undefined values in the template.
`finalize`
A callable that can be used to process the result of a variable
expression before it is output. For example one can convert
`None` implicitly into an empty string here.
`autoescape`
If set to true the XML/HTML autoescaping feature is enabled by
default. For more details about auto escaping see
:class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also
be a callable that is passed the template name and has to
return `True` or `False` depending on autoescape should be
enabled by default.
.. versionchanged:: 2.4
`autoescape` can now be a function
`loader`
The template loader for this environment.
`cache_size`
The size of the cache. Per default this is ``50`` which means
that if more than 50 templates are loaded the loader will clean
out the least recently used template. If the cache size is set to
``0`` templates are recompiled all the time, if the cache size is
``-1`` the cache will not be cleaned.
`auto_reload`
Some loaders load templates from locations where the template
sources may change (ie: file system or database). If
`auto_reload` is set to `True` (default) every time a template is
requested the loader checks if the source changed and if yes, it
will reload the template. For higher performance it's possible to
disable that.
`bytecode_cache`
If set to a bytecode cache object, this object will provide a
cache for the internal Jinja bytecode so that templates don't
have to be parsed if they were not changed.
See :ref:`bytecode-cache` for more information.
"""
#: if this environment is sandboxed. Modifying this variable won't make
#: the environment sandboxed though. For a real sandboxed environment
#: have a look at jinja2.sandbox. This flag alone controls the code
#: generation by the compiler.
sandboxed = False
#: True if the environment is just an overlay
overlayed = False
#: the environment this environment is linked to if it is an overlay
linked_to = None
#: shared environments have this set to `True`. A shared environment
#: must not be modified
shared = False
#: these are currently EXPERIMENTAL undocumented features.
exception_handler = None
exception_formatter = None
def __init__(self,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
lstrip_blocks=LSTRIP_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
keep_trailing_newline=KEEP_TRAILING_NEWLINE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False,
loader=None,
cache_size=50,
auto_reload=True,
bytecode_cache=None):
# !!Important notice!!
# The constructor accepts quite a few arguments that should be
# passed by keyword rather than position. However it's important to
# not change the order of arguments because it's used at least
# internally in those cases:
# - spontaneous environments (i18n extension and Template)
# - unittests
# If parameter changes are required only add parameters at the end
# and don't change the arguments (or the defaults!) of the arguments
# existing already.
# lexer / parser information
self.block_start_string = block_start_string
self.block_end_string = block_end_string
self.variable_start_string = variable_start_string
self.variable_end_string = variable_end_string
self.comment_start_string = comment_start_string
self.comment_end_string = comment_end_string
self.line_statement_prefix = line_statement_prefix
self.line_comment_prefix = line_comment_prefix
self.trim_blocks = trim_blocks
self.lstrip_blocks = lstrip_blocks
self.newline_sequence = newline_sequence
self.keep_trailing_newline = keep_trailing_newline
# runtime information
self.undefined = undefined
self.optimized = optimized
self.finalize = finalize
self.autoescape = autoescape
# defaults
self.filters = DEFAULT_FILTERS.copy()
self.tests = DEFAULT_TESTS.copy()
self.globals = DEFAULT_NAMESPACE.copy()
# set the loader provided
self.loader = loader
self.cache = create_cache(cache_size)
self.bytecode_cache = bytecode_cache
self.auto_reload = auto_reload
# load extensions
self.extensions = load_extensions(self, extensions)
_environment_sanity_check(self)
def add_extension(self, extension):
"""Adds an extension after the environment was created.
.. versionadded:: 2.5
"""
self.extensions.update(load_extensions(self, [extension]))
def extend(self, **attributes):
"""Add the items to the instance of the environment if they do not exist
yet. This is used by :ref:`extensions <writing-extensions>` to register
callbacks and configuration values without breaking inheritance.
"""
for key, value in iteritems(attributes):
if not hasattr(self, key):
setattr(self, key, value)
def overlay(self, block_start_string=missing, block_end_string=missing,
variable_start_string=missing, variable_end_string=missing,
comment_start_string=missing, comment_end_string=missing,
line_statement_prefix=missing, line_comment_prefix=missing,
trim_blocks=missing, lstrip_blocks=missing,
extensions=missing, optimized=missing,
undefined=missing, finalize=missing, autoescape=missing,
loader=missing, cache_size=missing, auto_reload=missing,
bytecode_cache=missing):
"""Create a new overlay environment that shares all the data with the
current environment except of cache and the overridden attributes.
Extensions cannot be removed for an overlayed environment. An overlayed
environment automatically gets all the extensions of the environment it
is linked to plus optional extra extensions.
Creating overlays should happen after the initial environment was set
up completely. Not all attributes are truly linked, some are just
copied over so modifications on the original environment may not shine
through.
"""
args = dict(locals())
del args['self'], args['cache_size'], args['extensions']
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.overlayed = True
rv.linked_to = self
for key, value in iteritems(args):
if value is not missing:
setattr(rv, key, value)
if cache_size is not missing:
rv.cache = create_cache(cache_size)
else:
rv.cache = copy_cache(self.cache)
rv.extensions = {}
for key, value in iteritems(self.extensions):
rv.extensions[key] = value.bind(rv)
if extensions is not missing:
rv.extensions.update(load_extensions(rv, extensions))
return _environment_sanity_check(rv)
lexer = property(get_lexer, doc="The lexer for this environment.")
def iter_extensions(self):
"""Iterates over the extensions by priority."""
return iter(sorted(self.extensions.values(),
key=lambda x: x.priority))
def getitem(self, obj, argument):
"""Get an item or attribute of an object but prefer the item."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, string_types):
try:
attr = str(argument)
except Exception:
pass
else:
try:
return getattr(obj, attr)
except AttributeError:
pass
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Get an item or attribute of an object but prefer the attribute.
Unlike :meth:`getitem` the attribute *must* be a bytestring.
"""
try:
return getattr(obj, attribute)
except AttributeError:
pass
try:
return obj[attribute]
except (TypeError, LookupError, AttributeError):
return self.undefined(obj=obj, name=attribute)
def call_filter(self, name, value, args=None, kwargs=None,
context=None, eval_ctx=None):
"""Invokes a filter on a value the same way the compiler does it.
.. versionadded:: 2.7
"""
func = self.filters.get(name)
if func is None:
raise TemplateRuntimeError('no filter named %r' % name)
args = [value] + list(args or ())
if getattr(func, 'contextfilter', False):
if context is None:
raise TemplateRuntimeError('Attempted to invoke context '
'filter without context')
args.insert(0, context)
elif getattr(func, 'evalcontextfilter', False):
if eval_ctx is None:
if context is not None:
eval_ctx = context.eval_ctx
else:
eval_ctx = EvalContext(self)
args.insert(0, eval_ctx)
elif getattr(func, 'environmentfilter', False):
args.insert(0, self)
return func(*args, **(kwargs or {}))
def call_test(self, name, value, args=None, kwargs=None):
"""Invokes a test on a value the same way the compiler does it.
.. versionadded:: 2.7
"""
func = self.tests.get(name)
if func is None:
raise TemplateRuntimeError('no test named %r' % name)
return func(value, *(args or ()), **(kwargs or {}))
@internalcode
def parse(self, source, name=None, filename=None):
"""Parse the sourcecode and return the abstract syntax tree. This
tree of nodes is used by the compiler to convert the template into
executable source- or bytecode. This is useful for debugging or to
extract information from templates.
If you are :ref:`developing Jinja2 extensions <writing-extensions>`
this gives you a good overview of the node tree generated.
"""
try:
return self._parse(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def _parse(self, source, name, filename):
"""Internal parsing function used by `parse` and `compile`."""
return Parser(self, source, name, encode_filename(filename)).parse()
def lex(self, source, name=None, filename=None):
"""Lex the given sourcecode and return a generator that yields
tokens as tuples in the form ``(lineno, token_type, value)``.
This can be useful for :ref:`extension development <writing-extensions>`
and debugging templates.
This does not perform preprocessing. If you want the preprocessing
of the extensions to be applied you have to filter source through
the :meth:`preprocess` method.
"""
source = text_type(source)
try:
return self.lexer.tokeniter(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def preprocess(self, source, name=None, filename=None):
"""Preprocesses the source with all extensions. This is automatically
called for all parsing and compiling methods but *not* for :meth:`lex`
because there you usually only want the actual source tokenized.
"""
return reduce(lambda s, e: e.preprocess(s, name, filename),
self.iter_extensions(), text_type(source))
def _tokenize(self, source, name, filename=None, state=None):
"""Called by the parser to do the preprocessing and filtering
for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
"""
source = self.preprocess(source, name, filename)
stream = self.lexer.tokenize(source, name, filename, state)
for ext in self.iter_extensions():
stream = ext.filter_stream(stream)
if not isinstance(stream, TokenStream):
stream = TokenStream(stream, name, filename)
return stream
def _generate(self, source, name, filename, defer_init=False):
"""Internal hook that can be overridden to hook a different generate
method in.
.. versionadded:: 2.5
"""
return generate(source, self, name, filename, defer_init=defer_init)
def _compile(self, source, filename):
"""Internal hook that can be overridden to hook a different compile
method in.
.. versionadded:: 2.5
"""
return compile(source, filename, 'exec')
@internalcode
def compile(self, source, name=None, filename=None, raw=False,
defer_init=False):
"""Compile a node or template source code. The `name` parameter is
the load name of the template after it was joined using
:meth:`join_path` if necessary, not the filename on the file system.
the `filename` parameter is the estimated filename of the template on
the file system. If the template came from a database or memory this
can be omitted.
The return value of this method is a python code object. If the `raw`
parameter is `True` the return value will be a string with python
code equivalent to the bytecode returned otherwise. This method is
mainly used internally.
`defer_init` is use internally to aid the module code generator. This
causes the generated code to be able to import without the global
environment variable to be set.
.. versionadded:: 2.4
`defer_init` parameter added.
"""
source_hint = None
try:
if isinstance(source, string_types):
source_hint = source
source = self._parse(source, name, filename)
if self.optimized:
source = optimize(source, self)
source = self._generate(source, name, filename,
defer_init=defer_init)
if raw:
return source
if filename is None:
filename = '<template>'
else:
filename = encode_filename(filename)
return self._compile(source, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def compile_expression(self, source, undefined_to_none=True):
"""A handy helper method that returns a callable that accepts keyword
arguments that appear as variables in the expression. If called it
returns the result of the expression.
This is useful if applications want to use the same rules as Jinja
in template "configuration files" or similar situations.
Example usage:
>>> env = Environment()
>>> expr = env.compile_expression('foo == 42')
>>> expr(foo=23)
False
>>> expr(foo=42)
True
Per default the return value is converted to `None` if the
expression returns an undefined value. This can be changed
by setting `undefined_to_none` to `False`.
>>> env.compile_expression('var')() is None
True
>>> env.compile_expression('var', undefined_to_none=False)()
Undefined
.. versionadded:: 2.1
"""
parser = Parser(self, source, state='variable')
exc_info = None
try:
expr = parser.parse_expression()
if not parser.stream.eos:
raise TemplateSyntaxError('chunk after expression',
parser.stream.current.lineno,
None, None)
expr.set_environment(self)
except TemplateSyntaxError:
exc_info = sys.exc_info()
if exc_info is not None:
self.handle_exception(exc_info, source_hint=source)
body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
template = self.from_string(nodes.Template(body, lineno=1))
return TemplateExpression(template, undefined_to_none)
def compile_templates(self, target, extensions=None, filter_func=None,
zip='deflated', log_function=None,
ignore_errors=True, py_compile=False):
"""Finds all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be will be stored in a directory.
By default a deflate zip algorithm is used, to switch to
the stored algorithm, `zip` can be set to ``'stored'``.
`extensions` and `filter_func` are passed to :meth:`list_templates`.
Each template returned will be compiled to the target folder or
zipfile.
By default template compilation errors are ignored. In case a
log function is provided, errors are logged. If you want template
syntax errors to abort the compilation you can set `ignore_errors`
to `False` and you will get an exception on syntax errors.
If `py_compile` is set to `True` .pyc files will be written to the
target instead of standard .py files. This flag does not do anything
on pypy and Python 3 where pyc files are not picked up by itself and
don't give much benefit.
.. versionadded:: 2.4
"""
from jinja2.loaders import ModuleLoader
if log_function is None:
log_function = lambda x: None
if py_compile:
if not PY2 or PYPY:
from warnings import warn
warn(Warning('py_compile has no effect on pypy or Python 3'))
py_compile = False
else:
import imp, marshal
py_header = imp.get_magic() + \
u'\xff\xff\xff\xff'.encode('iso-8859-15')
# Python 3.3 added a source filesize to the header
if sys.version_info >= (3, 3):
py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15')
def write_file(filename, data, mode):
if zip:
info = ZipInfo(filename)
info.external_attr = 0o755 << 16
zip_file.writestr(info, data)
else:
f = open(os.path.join(target, filename), mode)
try:
f.write(data)
finally:
f.close()
if zip is not None:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
stored=ZIP_STORED)[zip])
log_function('Compiling into Zip archive "%s"' % target)
else:
if not os.path.isdir(target):
os.makedirs(target)
log_function('Compiling into folder "%s"' % target)
try:
for name in self.list_templates(extensions, filter_func):
source, filename, _ = self.loader.get_source(self, name)
try:
code = self.compile(source, name, filename, True, True)
except TemplateSyntaxError as e:
if not ignore_errors:
raise
log_function('Could not compile "%s": %s' % (name, e))
continue
filename = ModuleLoader.get_module_filename(name)
if py_compile:
c = self._compile(code, encode_filename(filename))
write_file(filename + 'c', py_header +
marshal.dumps(c), 'wb')
log_function('Byte-compiled "%s" as %s' %
(name, filename + 'c'))
else:
write_file(filename, code, 'w')
log_function('Compiled "%s" as %s' % (name, filename))
finally:
if zip:
zip_file.close()
log_function('Finished compiling templates')
def list_templates(self, extensions=None, filter_func=None):
"""Returns a list of templates for this environment. This requires
that the loader supports the loader's
:meth:`~BaseLoader.list_templates` method.
If there are other files in the template folder besides the
actual templates, the returned list can be filtered. There are two
ways: either `extensions` is set to a list of file extensions for
templates, or a `filter_func` can be provided which is a callable that
is passed a template name and should return `True` if it should end up
in the result list.
If the loader does not support that, a :exc:`TypeError` is raised.
.. versionadded:: 2.4
"""
x = self.loader.list_templates()
if extensions is not None:
if filter_func is not None:
raise TypeError('either extensions or filter_func '
'can be passed, but not both')
filter_func = lambda x: '.' in x and \
x.rsplit('.', 1)[1] in extensions
if filter_func is not None:
x = ifilter(filter_func, x)
return x
def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
global _make_traceback
if exc_info is None:
exc_info = sys.exc_info()
# the debugging module is imported when it's used for the first time.
# we're doing a lot of stuff there and for applications that do not
# get any exceptions in template rendering there is no need to load
# all of that.
if _make_traceback is None:
from jinja2.debug import make_traceback as _make_traceback
traceback = _make_traceback(exc_info, source_hint)
if rendered and self.exception_formatter is not None:
return self.exception_formatter(traceback)
if self.exception_handler is not None:
self.exception_handler(traceback)
exc_type, exc_value, tb = traceback.standard_exc_info
reraise(exc_type, exc_value, tb)
def join_path(self, template, parent):
"""Join a template with the parent. By default all the lookups are
relative to the loader root so this method returns the `template`
parameter unchanged, but if the paths should be relative to the
parent template, this function can be used to calculate the real
template name.
Subclasses may override this method and implement template path
joining here.
"""
return template
@internalcode
def _load_template(self, name, globals):
if self.loader is None:
raise TypeError('no loader for this environment specified')
if self.cache is not None:
template = self.cache.get(name)
if template is not None and (not self.auto_reload or \
template.is_up_to_date):
return template
template = self.loader.load(self, name, globals)
if self.cache is not None:
self.cache[name] = template
return template
@internalcode
def get_template(self, name, parent=None, globals=None):
"""Load a template from the loader. If a loader is configured this
method ask the loader for the template and returns a :class:`Template`.
If the `parent` parameter is not `None`, :meth:`join_path` is called
to get the real template name before loading.
The `globals` parameter can be used to provide template wide globals.
These variables are available in the context at render time.
If the template does not exist a :exc:`TemplateNotFound` exception is
raised.
.. versionchanged:: 2.4
If `name` is a :class:`Template` object it is returned from the
function unchanged.
"""
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
return self._load_template(name, self.make_globals(globals))
@internalcode
def select_template(self, names, parent=None, globals=None):
"""Works like :meth:`get_template` but tries a number of templates
before it fails. If it cannot find any of the templates, it will
raise a :exc:`TemplatesNotFound` exception.
.. versionadded:: 2.3
.. versionchanged:: 2.4
If `names` contains a :class:`Template` object it is returned
from the function unchanged.
"""
if not names:
raise TemplatesNotFound(message=u'Tried to select from an empty list '
u'of templates.')
globals = self.make_globals(globals)
for name in names:
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
try:
return self._load_template(name, globals)
except TemplateNotFound:
pass
raise TemplatesNotFound(names)
@internalcode
def get_or_select_template(self, template_name_or_list,
parent=None, globals=None):
"""Does a typecheck and dispatches to :meth:`select_template`
if an iterable of template names is given, otherwise to
:meth:`get_template`.
.. versionadded:: 2.3
"""
if isinstance(template_name_or_list, string_types):
return self.get_template(template_name_or_list, parent, globals)
elif isinstance(template_name_or_list, Template):
return template_name_or_list
return self.select_template(template_name_or_list, parent, globals)
def from_string(self, source, globals=None, template_class=None):
"""Load a template from a string. This parses the source given and
returns a :class:`Template` object.
"""
globals = self.make_globals(globals)
cls = template_class or self.template_class
return cls.from_code(self, self.compile(source), globals, None)
def make_globals(self, d):
"""Return a dict for the globals."""
if not d:
return self.globals
return dict(self.globals, **d)
class Template(object):
"""The central template object. This class represents a compiled template
and is used to evaluate it.
Normally the template object is generated from an :class:`Environment` but
it also has a constructor that makes it possible to create a template
instance directly using the constructor. It takes the same arguments as
the environment constructor but it's not possible to specify a loader.
Every template object has a few methods and members that are guaranteed
to exist. However it's important that a template object should be
considered immutable. Modifications on the object are not supported.
Template objects created from the constructor rather than an environment
do have an `environment` attribute that points to a temporary environment
that is probably shared with other templates created with the constructor
and compatible settings.
>>> template = Template('Hello {{ name }}!')
>>> template.render(name='John Doe')
u'Hello John Doe!'
>>> stream = template.stream(name='John Doe')
>>> stream.next()
u'Hello John Doe!'
>>> stream.next()
Traceback (most recent call last):
...
StopIteration
"""
def __new__(cls, source,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
lstrip_blocks=LSTRIP_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
keep_trailing_newline=KEEP_TRAILING_NEWLINE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False):
env = get_spontaneous_environment(
block_start_string, block_end_string, variable_start_string,
variable_end_string, comment_start_string, comment_end_string,
line_statement_prefix, line_comment_prefix, trim_blocks,
lstrip_blocks, newline_sequence, keep_trailing_newline,
frozenset(extensions), optimized, undefined, finalize, autoescape,
None, 0, False, None)
return env.from_string(source, template_class=cls)
@classmethod
def from_code(cls, environment, code, globals, uptodate=None):
"""Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object.
"""
namespace = {
'environment': environment,
'__file__': code.co_filename
}
exec(code, namespace)
rv = cls._from_namespace(environment, namespace, globals)
rv._uptodate = uptodate
return rv
@classmethod
def from_module_dict(cls, environment, module_dict, globals):
"""Creates a template object from a module. This is used by the
module loader to create a template object.
.. versionadded:: 2.4
"""
return cls._from_namespace(environment, module_dict, globals)
@classmethod
def _from_namespace(cls, environment, namespace, globals):
t = object.__new__(cls)
t.environment = environment
t.globals = globals
t.name = namespace['name']
t.filename = namespace['__file__']
t.blocks = namespace['blocks']
# render function and module
t.root_render_func = namespace['root']
t._module = None
# debug and loader helpers
t._debug_info = namespace['debug_info']
t._uptodate = None
# store the reference
namespace['environment'] = environment
namespace['__jinja_template__'] = t
return t
def render(self, *args, **kwargs):
"""This method accepts the same arguments as the `dict` constructor:
A dict, a dict subclass or some keyword arguments. If no arguments
are given the context will be empty. These two calls do the same::
template.render(knights='that say nih')
template.render({'knights': 'that say nih'})
This will return the rendered template as unicode string.
"""
vars = dict(*args, **kwargs)
try:
return concat(self.root_render_func(self.new_context(vars)))
except Exception:
exc_info = sys.exc_info()
return self.environment.handle_exception(exc_info, True)
def stream(self, *args, **kwargs):
"""Works exactly like :meth:`generate` but returns a
:class:`TemplateStream`.
"""
return TemplateStream(self.generate(*args, **kwargs))
def generate(self, *args, **kwargs):
"""For very large templates it can be useful to not render the whole
template at once but evaluate each statement after another and yield
piece for piece. This method basically does exactly that and returns
a generator that yields one item after another as unicode strings.
It accepts the same arguments as :meth:`render`.
"""
vars = dict(*args, **kwargs)
try:
for event in self.root_render_func(self.new_context(vars)):
yield event
except Exception:
exc_info = sys.exc_info()
else:
return
yield self.environment.handle_exception(exc_info, True)
def new_context(self, vars=None, shared=False, locals=None):
"""Create a new :class:`Context` for this template. The vars
provided will be passed to the template. Per default the globals
are added to the context. If shared is set to `True` the data
is passed as it to the context without adding the globals.
`locals` can be a dict of local variables for internal usage.
"""
return new_context(self.environment, self.name, self.blocks,
vars, shared, self.globals, locals)
def make_module(self, vars=None, shared=False, locals=None):
"""This method works like the :attr:`module` attribute when called
without arguments but it will evaluate the template on every call
rather than caching it. It's also possible to provide
a dict which is then used as context. The arguments are the same
as for the :meth:`new_context` method.
"""
return TemplateModule(self, self.new_context(vars, shared, locals))
@property
def module(self):
"""The template as module. This is used for imports in the
template runtime but is also useful if one wants to access
exported template variables from the Python layer:
>>> t = Template('{% macro foo() %}42{% endmacro %}23')
>>> unicode(t.module)
u'23'
>>> t.module.foo()
u'42'
"""
if self._module is not None:
return self._module
self._module = rv = self.make_module()
return rv
def get_corresponding_lineno(self, lineno):
"""Return the source line number of a line number in the
generated bytecode as they are not in sync.
"""
for template_line, code_line in reversed(self.debug_info):
if code_line <= lineno:
return template_line
return 1
@property
def is_up_to_date(self):
"""If this variable is `False` there is a newer version available."""
if self._uptodate is None:
return True
return self._uptodate()
@property
def debug_info(self):
"""The debug info mapping."""
return [tuple(imap(int, x.split('='))) for x in
self._debug_info.split('&')]
def __repr__(self):
if self.name is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.name)
return '<%s %s>' % (self.__class__.__name__, name)
@implements_to_string
class TemplateModule(object):
"""Represents an imported template. All the exported names of the
template are available as attributes on this object. Additionally
converting it into an unicode- or bytestrings renders the contents.
"""
def __init__(self, template, context):
self._body_stream = list(template.root_render_func(context))
self.__dict__.update(context.get_exported())
self.__name__ = template.name
def __html__(self):
return Markup(concat(self._body_stream))
def __str__(self):
return concat(self._body_stream)
def __repr__(self):
if self.__name__ is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.__name__)
return '<%s %s>' % (self.__class__.__name__, name)
class TemplateExpression(object):
"""The :meth:`jinja2.Environment.compile_expression` method returns an
instance of this object. It encapsulates the expression-like access
to the template with an expression it wraps.
"""
def __init__(self, template, undefined_to_none):
self._template = template
self._undefined_to_none = undefined_to_none
def __call__(self, *args, **kwargs):
context = self._template.new_context(dict(*args, **kwargs))
consume(self._template.root_render_func(context))
rv = context.vars['result']
if self._undefined_to_none and isinstance(rv, Undefined):
rv = None
return rv
@implements_iterator
class TemplateStream(object):
"""A template stream works pretty much like an ordinary python generator
but it can buffer multiple items to reduce the number of total iterations.
Per default the output is unbuffered which means that for every unbuffered
instruction in the template one unicode string is yielded.
If buffering is enabled with a buffer size of 5, five items are combined
into a new unicode string. This is mainly useful if you are streaming
big templates to a client via WSGI which flushes after each iteration.
"""
def __init__(self, gen):
self._gen = gen
self.disable_buffering()
def dump(self, fp, encoding=None, errors='strict'):
"""Dump the complete stream into a file or file-like object.
Per default unicode strings are written, if you want to encode
before writing specify an `encoding`.
Example usage::
Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
"""
close = False
if isinstance(fp, string_types):
fp = open(fp, encoding is None and 'w' or 'wb')
close = True
try:
if encoding is not None:
iterable = (x.encode(encoding, errors) for x in self)
else:
iterable = self
if hasattr(fp, 'writelines'):
fp.writelines(iterable)
else:
for item in iterable:
fp.write(item)
finally:
if close:
fp.close()
def disable_buffering(self):
"""Disable the output buffering."""
self._next = get_next(self._gen)
self.buffered = False
def enable_buffering(self, size=5):
"""Enable buffering. Buffer `size` items before yielding them."""
if size <= 1:
raise ValueError('buffer size too small')
def generator(next):
buf = []
c_size = 0
push = buf.append
while 1:
try:
while c_size < size:
c = next()
push(c)
if c:
c_size += 1
except StopIteration:
if not c_size:
return
yield concat(buf)
del buf[:]
c_size = 0
self.buffered = True
self._next = get_next(generator(get_next(self._gen)))
def __iter__(self):
return self
def __next__(self):
return self._next()
# hook in default template class. if anyone reads this comment: ignore that
# it's possible to use custom templates ;-)
Environment.template_class = Template
| bsd-3-clause | 7,780,591,161,596,931,000 | 38.667506 | 82 | 0.604881 | false |
R4stl1n/allianceauth | allianceauth/services/modules/smf/views.py | 5 | 5057 | import logging
from django.contrib import messages
from django.contrib.auth.decorators import login_required, permission_required
from django.shortcuts import render, redirect
from allianceauth.services.forms import ServicePasswordForm
from .manager import SmfManager
from .models import SmfUser
from .tasks import SmfTasks
logger = logging.getLogger(__name__)
ACCESS_PERM = 'smf.access_smf'
@login_required
@permission_required(ACCESS_PERM)
def activate_smf(request):
logger.debug("activate_smf called by user %s" % request.user)
# Valid now we get the main characters
character = request.user.profile.main_character
logger.debug("Adding smf user for user %s with main character %s" % (request.user, character))
result = SmfManager.add_user(SmfTasks.get_username(request.user), request.user.email, ['Member'],
character.character_id)
# if empty we failed
if result[0] != "":
SmfUser.objects.update_or_create(user=request.user, defaults={'username': result[0]})
logger.debug("Updated authserviceinfo for user %s with smf credentials. Updating groups." % request.user)
SmfTasks.update_groups.delay(request.user.pk)
logger.info("Successfully activated smf for user %s" % request.user)
messages.success(request, 'Activated SMF account.')
credentials = {
'username': result[0],
'password': result[1],
}
return render(request, 'services/service_credentials.html',
context={'credentials': credentials, 'service': 'SMF'})
else:
logger.error("Unsuccessful attempt to activate smf for user %s" % request.user)
messages.error(request, 'An error occurred while processing your SMF account.')
return redirect("services:services")
@login_required
@permission_required(ACCESS_PERM)
def deactivate_smf(request):
logger.debug("deactivate_smf called by user %s" % request.user)
result = SmfTasks.delete_user(request.user)
# false we failed
if result:
logger.info("Successfully deactivated smf for user %s" % request.user)
messages.success(request, 'Deactivated SMF account.')
else:
logger.error("Unsuccessful attempt to activate smf for user %s" % request.user)
messages.error(request, 'An error occurred while processing your SMF account.')
return redirect("services:services")
@login_required
@permission_required(ACCESS_PERM)
def reset_smf_password(request):
logger.debug("reset_smf_password called by user %s" % request.user)
character = request.user.profile.main_character
if SmfTasks.has_account(request.user) and character is not None:
result = SmfManager.update_user_password(request.user.smf.username, character.character_id)
# false we failed
if result != "":
logger.info("Successfully reset smf password for user %s" % request.user)
messages.success(request, 'Reset SMF password.')
credentials = {
'username': request.user.smf.username,
'password': result,
}
return render(request, 'services/service_credentials.html',
context={'credentials': credentials, 'service': 'SMF'})
logger.error("Unsuccessful attempt to reset smf password for user %s" % request.user)
messages.error(request, 'An error occurred while processing your SMF account.')
return redirect("services:services")
@login_required
@permission_required(ACCESS_PERM)
def set_smf_password(request):
logger.debug("set_smf_password called by user %s" % request.user)
if request.method == 'POST':
logger.debug("Received POST request with form.")
form = ServicePasswordForm(request.POST)
logger.debug("Form is valid: %s" % form.is_valid())
character = request.user.profile.main_character
if form.is_valid() and SmfTasks.has_account(request.user) and character is not None:
password = form.cleaned_data['password']
logger.debug("Form contains password of length %s" % len(password))
result = SmfManager.update_user_password(request.user.smf.username, character.character_id,
password=password)
if result != "":
logger.info("Successfully set smf password for user %s" % request.user)
messages.success(request, 'Set SMF password.')
else:
logger.error("Failed to install custom smf password for user %s" % request.user)
messages.error(request, 'An error occurred while processing your SMF account.')
return redirect("services:services")
else:
logger.debug("Request is not type POST - providing empty form.")
form = ServicePasswordForm()
logger.debug("Rendering form for user %s" % request.user)
context = {'form': form, 'service': 'SMF'}
return render(request, 'services/service_password.html', context=context)
| gpl-2.0 | 1,110,023,437,999,826,400 | 45.394495 | 113 | 0.667985 | false |
pmoulon/TheiaSfM | docs/make_docs.py | 19 | 2872 | #!/usr/bin/python
#
# Ceres Solver - A fast non-linear least squares minimizer
# Copyright 2013 Google Inc. All rights reserved.
# http://code.google.com/p/ceres-solver/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Google Inc. nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: [email protected] (Sameer Agarwal)
#
# Note: You will need Sphinx and Pygments installed for this to work.
import glob
import os
import sys
# Number of arguments
N = len(sys.argv)
if N < 3:
print "make_docs.py src_root destination_root"
sys.exit(1)
src_dir = sys.argv[1] + "/docs/source"
build_root = sys.argv[2]
cache_dir = build_root + "/doctrees"
html_dir = build_root + "/html"
# Called from Command Line
if N == 3:
sphinx_exe = "sphinx-build"
# Called from CMake (using the SPHINX_EXECUTABLE found)
elif N == 4:
sphinx_exe = sys.argv[3]
# Run Sphinx to build the documentation.
os.system("%s -b html -d %s %s %s" %(sphinx_exe, cache_dir, src_dir, html_dir))
input_pattern = """config=TeX-AMS-MML_HTMLorMML"></script>"""
output_pattern = """config=TeX-AMS_HTML">
MathJax.Hub.Config({
"HTML-CSS": {
availableFonts: ["TeX"]
}
});
</script>"""
# By default MathJax uses does not use TeX fonts. This simple search
# and replace fixes that.
for name in glob.glob("%s/*.html" % html_dir):
print "Postprocessing: ", name
fptr = open(name)
out = fptr.read().replace(input_pattern, output_pattern)
fptr.close()
fptr = open(name, "w")
fptr.write(out)
fptr.close()
| bsd-3-clause | -9,084,123,006,244,607,000 | 34.45679 | 79 | 0.731198 | false |
cloudify-cosmo/softlayer-python | SoftLayer/managers/sshkey.py | 5 | 2631 | """
SoftLayer.sshkey
~~~~~~~~~~~~~~~~
SSH Key Manager/helpers
:license: MIT, see LICENSE for more details.
"""
from SoftLayer import utils
class SshKeyManager(utils.IdentifierMixin, object):
"""Manages account SSH keys.
:param SoftLayer.API.Client client: an API client instance
"""
def __init__(self, client):
self.client = client
self.sshkey = client['Security_Ssh_Key']
self.resolvers = [self._get_ids_from_label]
def add_key(self, key, label, notes=None):
"""Adds a new SSH key to the account.
:param string key: The SSH key to add
:param string label: The label for the key
:returns: A dictionary of the new key's information.
"""
order = {
'key': key,
'label': label,
'notes': notes,
}
return self.sshkey.createObject(order)
def delete_key(self, key_id):
"""Permanently deletes an SSH key from the account.
:param int key_id: The ID of the key to delete
"""
return self.sshkey.deleteObject(id=key_id)
def edit_key(self, key_id, label=None, notes=None):
"""Edits information about an SSH key.
:param int key_id: The ID of the key to edit
:param string label: The new label for the key
:param string notes: Notes to set or change on the key
:returns: A Boolean indicating success or failure
"""
data = {}
if label:
data['label'] = label
if notes:
data['notes'] = notes
return self.sshkey.editObject(data, id=key_id)
def get_key(self, key_id):
"""Returns full information about a single SSH key.
:param int key_id: The ID of the key to retrieve
:returns: A dictionary of information about the key
"""
return self.sshkey.getObject(id=key_id)
def list_keys(self, label=None):
"""Lists all SSH keys on the account.
:param string label: Filter list based on SSH key label
:returns: A list of dictionaries with information about each key
"""
_filter = utils.NestedDict({})
if label:
_filter['sshKeys']['label'] = utils.query_filter(label)
return self.client['Account'].getSshKeys(filter=_filter.to_dict())
def _get_ids_from_label(self, label):
"""Return sshkey IDs which match the given label."""
keys = self.list_keys()
results = []
for key in keys:
if key['label'] == label:
results.append(key['id'])
return results
| mit | -4,295,229,147,732,616,000 | 27.912088 | 74 | 0.582668 | false |
TheMOOCAgency/edx-platform | openedx/core/djangoapps/course_groups/tests/test_partition_scheme.py | 7 | 16239 | """
Test the partitions and partitions service
"""
import json
from django.conf import settings
import django.test
from mock import patch
from nose.plugins.attrib import attr
from unittest import skipUnless
from courseware.masquerade import handle_ajax, setup_masquerade
from courseware.tests.test_masquerade import StaffMasqueradeTestCase
from student.tests.factories import UserFactory
from xmodule.partitions.partitions import Group, UserPartition, UserPartitionError
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, TEST_DATA_MIXED_MODULESTORE
from xmodule.modulestore.tests.factories import ToyCourseFactory
from openedx.core.djangoapps.user_api.partition_schemes import RandomUserPartitionScheme
from ..partition_scheme import CohortPartitionScheme, get_cohorted_user_partition
from ..models import CourseUserGroupPartitionGroup
from ..views import link_cohort_to_partition_group, unlink_cohort_partition_group
from ..cohorts import add_user_to_cohort, remove_user_from_cohort, get_course_cohorts
from .helpers import CohortFactory, config_course_cohorts
@attr(shard=2)
class TestCohortPartitionScheme(ModuleStoreTestCase):
"""
Test the logic for linking a user to a partition group based on their cohort.
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
"""
Regenerate a course with cohort configuration, partition and groups,
and a student for each test.
"""
super(TestCohortPartitionScheme, self).setUp()
self.course_key = ToyCourseFactory.create().id
self.course = modulestore().get_course(self.course_key)
config_course_cohorts(self.course, is_cohorted=True)
self.groups = [Group(10, 'Group 10'), Group(20, 'Group 20')]
self.user_partition = UserPartition(
0,
'Test Partition',
'for testing purposes',
self.groups,
scheme=CohortPartitionScheme
)
self.student = UserFactory.create()
def assert_student_in_group(self, group, partition=None):
"""
Utility for checking that our test student comes up as assigned to the
specified partition (or, if None, no partition at all)
"""
self.assertEqual(
CohortPartitionScheme.get_group_for_user(
self.course_key,
self.student,
partition or self.user_partition,
use_cached=False
),
group
)
def test_student_cohort_assignment(self):
"""
Test that the CohortPartitionScheme continues to return the correct
group for a student as the student is moved in and out of different
cohorts.
"""
first_cohort, second_cohort = [
CohortFactory(course_id=self.course_key) for _ in range(2)
]
# place student 0 into first cohort
add_user_to_cohort(first_cohort, self.student.username)
self.assert_student_in_group(None)
# link first cohort to group 0 in the partition
link_cohort_to_partition_group(
first_cohort,
self.user_partition.id,
self.groups[0].id,
)
# link second cohort to to group 1 in the partition
link_cohort_to_partition_group(
second_cohort,
self.user_partition.id,
self.groups[1].id,
)
self.assert_student_in_group(self.groups[0])
# move student from first cohort to second cohort
add_user_to_cohort(second_cohort, self.student.username)
self.assert_student_in_group(self.groups[1])
# move the student out of the cohort
remove_user_from_cohort(second_cohort, self.student.username)
self.assert_student_in_group(None)
def test_cohort_partition_group_assignment(self):
"""
Test that the CohortPartitionScheme returns the correct group for a
student in a cohort when the cohort link is created / moved / deleted.
"""
test_cohort = CohortFactory(course_id=self.course_key)
# assign user to cohort (but cohort isn't linked to a partition group yet)
add_user_to_cohort(test_cohort, self.student.username)
# scheme should not yet find any link
self.assert_student_in_group(None)
# link cohort to group 0
link_cohort_to_partition_group(
test_cohort,
self.user_partition.id,
self.groups[0].id,
)
# now the scheme should find a link
self.assert_student_in_group(self.groups[0])
# link cohort to group 1 (first unlink it from group 0)
unlink_cohort_partition_group(test_cohort)
link_cohort_to_partition_group(
test_cohort,
self.user_partition.id,
self.groups[1].id,
)
# scheme should pick up the link
self.assert_student_in_group(self.groups[1])
# unlink cohort from anywhere
unlink_cohort_partition_group(
test_cohort,
)
# scheme should now return nothing
self.assert_student_in_group(None)
def test_student_lazily_assigned(self):
"""
Test that the lazy assignment of students to cohorts works
properly when accessed via the CohortPartitionScheme.
"""
# don't assign the student to any cohort initially
self.assert_student_in_group(None)
# get the default cohort, which is automatically created
# during the `get_course_cohorts` API call if it doesn't yet exist
cohort = get_course_cohorts(self.course)[0]
# map that cohort to a group in our partition
link_cohort_to_partition_group(
cohort,
self.user_partition.id,
self.groups[0].id,
)
# The student will be lazily assigned to the default cohort
# when CohortPartitionScheme.get_group_for_user makes its internal
# call to cohorts.get_cohort.
self.assert_student_in_group(self.groups[0])
def setup_student_in_group_0(self):
"""
Utility to set up a cohort, add our student to the cohort, and link
the cohort to self.groups[0]
"""
test_cohort = CohortFactory(course_id=self.course_key)
# link cohort to group 0
link_cohort_to_partition_group(
test_cohort,
self.user_partition.id,
self.groups[0].id,
)
# place student into cohort
add_user_to_cohort(test_cohort, self.student.username)
# check link is correct
self.assert_student_in_group(self.groups[0])
def test_partition_changes_nondestructive(self):
"""
If the name of a user partition is changed, or a group is added to the
partition, links from cohorts do not break.
If the name of a group is changed, links from cohorts do not break.
"""
self.setup_student_in_group_0()
# to simulate a non-destructive configuration change on the course, create
# a new partition with the same id and scheme but with groups renamed and
# a group added
new_groups = [Group(10, 'New Group 10'), Group(20, 'New Group 20'), Group(30, 'New Group 30')]
new_user_partition = UserPartition(
0, # same id
'Different Partition',
'dummy',
new_groups,
scheme=CohortPartitionScheme,
)
# the link should still work
self.assert_student_in_group(new_groups[0], new_user_partition)
def test_missing_group(self):
"""
If the group is deleted (or its id is changed), there's no referential
integrity enforced, so any references from cohorts to that group will be
lost. A warning should be logged when links are found from cohorts to
groups that no longer exist.
"""
self.setup_student_in_group_0()
# to simulate a destructive change on the course, create a new partition
# with the same id, but different group ids.
new_user_partition = UserPartition(
0, # same id
'Another Partition',
'dummy',
[Group(11, 'Not Group 10'), Group(21, 'Not Group 20')], # different ids
scheme=CohortPartitionScheme,
)
# the partition will be found since it has the same id, but the group
# ids aren't present anymore, so the scheme returns None (and logs a
# warning)
with patch('openedx.core.djangoapps.course_groups.partition_scheme.log') as mock_log:
self.assert_student_in_group(None, new_user_partition)
self.assertTrue(mock_log.warn.called)
self.assertRegexpMatches(mock_log.warn.call_args[0][0], 'group not found')
def test_missing_partition(self):
"""
If the user partition is deleted (or its id is changed), there's no
referential integrity enforced, so any references from cohorts to that
partition's groups will be lost. A warning should be logged when links
are found from cohorts to partitions that do not exist.
"""
self.setup_student_in_group_0()
# to simulate another destructive change on the course, create a new
# partition with a different id, but using the same groups.
new_user_partition = UserPartition(
1, # different id
'Moved Partition',
'dummy',
[Group(10, 'Group 10'), Group(20, 'Group 20')], # same ids
scheme=CohortPartitionScheme,
)
# the partition will not be found even though the group ids match, so the
# scheme returns None (and logs a warning).
with patch('openedx.core.djangoapps.course_groups.partition_scheme.log') as mock_log:
self.assert_student_in_group(None, new_user_partition)
self.assertTrue(mock_log.warn.called)
self.assertRegexpMatches(mock_log.warn.call_args[0][0], 'partition mismatch')
@attr(shard=2)
class TestExtension(django.test.TestCase):
"""
Ensure that the scheme extension is correctly plugged in (via entry point
in setup.py)
"""
def test_get_scheme(self):
self.assertEqual(UserPartition.get_scheme('cohort'), CohortPartitionScheme)
with self.assertRaisesRegexp(UserPartitionError, 'Unrecognized scheme'):
UserPartition.get_scheme('other')
@attr(shard=2)
class TestGetCohortedUserPartition(ModuleStoreTestCase):
"""
Test that `get_cohorted_user_partition` returns the first user_partition with scheme `CohortPartitionScheme`.
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
"""
Regenerate a course with cohort configuration, partition and groups,
and a student for each test.
"""
super(TestGetCohortedUserPartition, self).setUp()
self.course_key = ToyCourseFactory.create().id
self.course = modulestore().get_course(self.course_key)
self.student = UserFactory.create()
self.random_user_partition = UserPartition(
1,
'Random Partition',
'Should not be returned',
[Group(0, 'Group 0'), Group(1, 'Group 1')],
scheme=RandomUserPartitionScheme
)
self.cohort_user_partition = UserPartition(
0,
'Cohort Partition 1',
'Should be returned',
[Group(10, 'Group 10'), Group(20, 'Group 20')],
scheme=CohortPartitionScheme
)
self.second_cohort_user_partition = UserPartition(
2,
'Cohort Partition 2',
'Should not be returned',
[Group(10, 'Group 10'), Group(1, 'Group 1')],
scheme=CohortPartitionScheme
)
def test_returns_first_cohort_user_partition(self):
"""
Test get_cohorted_user_partition returns first user_partition with scheme `CohortPartitionScheme`.
"""
self.course.user_partitions.append(self.random_user_partition)
self.course.user_partitions.append(self.cohort_user_partition)
self.course.user_partitions.append(self.second_cohort_user_partition)
self.assertEqual(self.cohort_user_partition, get_cohorted_user_partition(self.course))
def test_no_cohort_user_partitions(self):
"""
Test get_cohorted_user_partition returns None when there are no cohorted user partitions.
"""
self.course.user_partitions.append(self.random_user_partition)
self.assertIsNone(get_cohorted_user_partition(self.course))
@attr(shard=2)
class TestMasqueradedGroup(StaffMasqueradeTestCase):
"""
Check for staff being able to masquerade as belonging to a group.
"""
def setUp(self):
super(TestMasqueradedGroup, self).setUp()
self.user_partition = UserPartition(
0, 'Test User Partition', '',
[Group(0, 'Group 1'), Group(1, 'Group 2')],
scheme_id='cohort'
)
self.course.user_partitions.append(self.user_partition)
self.session = {}
modulestore().update_item(self.course, self.test_user.id)
def _verify_masquerade_for_group(self, group):
"""
Verify that the masquerade works for the specified group id.
"""
# Send the request to set the masquerade
request_json = {
"role": "student",
"user_partition_id": self.user_partition.id,
"group_id": group.id if group is not None else None
}
request = self._create_mock_json_request(
self.test_user,
data=request_json,
session=self.session
)
response = handle_ajax(request, unicode(self.course.id))
# pylint has issues analyzing this class (maybe due to circular imports?)
self.assertEquals(response.status_code, 200) # pylint: disable=no-member
# Now setup the masquerade for the test user
setup_masquerade(request, self.course.id, True)
scheme = self.user_partition.scheme
self.assertEqual(
scheme.get_group_for_user(self.course.id, self.test_user, self.user_partition),
group
)
def _verify_masquerade_for_all_groups(self):
"""
Verify that the staff user can masquerade as being in all groups
as well as no group.
"""
self._verify_masquerade_for_group(self.user_partition.groups[0])
self._verify_masquerade_for_group(self.user_partition.groups[1])
self._verify_masquerade_for_group(None)
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS')
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_group_masquerade(self):
"""
Tests that a staff member can masquerade as being in a particular group.
"""
self._verify_masquerade_for_all_groups()
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS')
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_group_masquerade_with_cohort(self):
"""
Tests that a staff member can masquerade as being in a particular group
when that staff member also belongs to a cohort with a corresponding
group.
"""
self.course.cohort_config = {'cohorted': True}
modulestore().update_item(self.course, self.test_user.id) # pylint: disable=no-member
cohort = CohortFactory.create(course_id=self.course.id, users=[self.test_user])
CourseUserGroupPartitionGroup(
course_user_group=cohort,
partition_id=self.user_partition.id,
group_id=self.user_partition.groups[0].id
).save()
# When the staff user is masquerading as being in a None group
# (within an existent UserPartition), we should treat that as
# an explicit None, not defaulting to the user's cohort's
# partition group.
self._verify_masquerade_for_all_groups()
| agpl-3.0 | 7,696,015,459,569,116,000 | 38.13012 | 113 | 0.636985 | false |
coberger/DIRAC | DataManagementSystem/scripts/dirac-dms-show-se-status.py | 7 | 1713 | #!/usr/bin/env python
from DIRAC.Core.Base import Script
Script.setUsageMessage( """
Get status of the available Storage Elements
Usage:
%s [<options>]
""" % Script.scriptName )
Script.parseCommandLine()
import DIRAC
from DIRAC import gConfig,gLogger
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.Core.Utilities.List import sortList
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources
from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup
if __name__ == "__main__":
result = getVOfromProxyGroup()
if not result['OK']:
gLogger.notice( 'Error:', result['Message'] )
DIRAC.exit( 1 )
vo = result['Value']
resources = Resources( vo = vo )
result = resources.getEligibleStorageElements()
if not result['OK']:
gLogger.notice( 'Error:', result['Message'] )
DIRAC.exit( 2 )
seList = sortList( result[ 'Value' ] )
resourceStatus = ResourceStatus()
result = resourceStatus.getStorageStatus( seList )
if not result['OK']:
gLogger.notice( 'Error:', result['Message'] )
DIRAC.exit( 3 )
for k,v in result[ 'Value' ].items():
readState, writeState = 'Active', 'Active'
if v.has_key( 'ReadAccess' ):
readState = v[ 'ReadAccess' ]
if v.has_key( 'WriteAccess' ):
writeState = v[ 'WriteAccess']
gLogger.notice("%s %s %s" % ( k.ljust(25),readState.rjust(15),writeState.rjust(15)) )
DIRAC.exit(0)
################################################################################
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| gpl-3.0 | 1,564,461,439,718,285,800 | 29.589286 | 89 | 0.614711 | false |
nafraf/spreads | spreadsplug/intervaltrigger.py | 5 | 2788 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Johannes Baiter <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Trigger plugin that triggers in a configurable interval. """
from __future__ import unicode_literals
import logging
import threading
import time
from spreads.config import OptionTemplate
from spreads.plugin import HookPlugin, TriggerHooksMixin
logger = logging.getLogger('spreadsplug.intervaltrigger')
class IntervalTrigger(HookPlugin, TriggerHooksMixin):
__name__ = 'intervaltrigger'
_loop_thread = None
_exit_event = None
@classmethod
def configuration_template(cls):
return {'interval': OptionTemplate(5.0, "Interval between captures"
" (in seconds)")}
def start_trigger_loop(self, capture_callback):
""" Launch the triggering loop in a background thread.
:param capture_callback: Callback for triggering a capture
:type capture_callback: function
"""
logger.debug("Starting event loop")
self._exit_event = threading.Event()
self._loop_thread = threading.Thread(target=self._trigger_loop,
args=(capture_callback, ))
self._loop_thread.start()
def stop_trigger_loop(self):
""" Stop the triggering loop and its thread. """
if self._exit_event:
logger.debug("Stopping event loop")
self._exit_event.set()
if self._loop_thread:
self._loop_thread.join()
def _trigger_loop(self, capture_func):
""" Read interval from configuration and run a loop that captures every
time the interval has elapsed.
:param capture_func: Callback for triggering a capture
:type capture_func: function
"""
interval = self.config['interval'].get(float)
while True and interval > 0.0:
sleep_time = 0
while sleep_time < interval:
if self._exit_event.is_set():
return
time.sleep(0.01)
sleep_time += 0.01
capture_func()
| agpl-3.0 | 3,905,821,402,085,475,300 | 34.74359 | 79 | 0.639885 | false |
alexalemi/battleship | players/util.py | 1 | 3217 | """
Author: Alex Alemi
Some utility routines for python players
"""
import logging
import socket
import os
import sys
from random import randrange
ship_sizes = {"A": 5, "B": 4, "D": 3, "S": 3, "P": 2}
def board_str(board):
""" Return the many lined string for a board """
boardstr = ""
for i in xrange(10):
for j in xrange(10):
if (i,j) in board:
boardstr += board[(i,j)]
else:
boardstr += '0'
boardstr += '\n'
return boardstr
def gen_random_board():
""" Generate a random board """
def place_ship(board, ship):
size = ship_sizes[ship]
orientation = randrange(2)
if orientation:
# if we are trying to place it horizontally
xpos = randrange(10-size)
ypos = randrange(10)
for i in xrange(size):
loc = (xpos+i, ypos)
if board.get(loc):
# we have a collision
raise IndexError
else:
board[loc] = ship
else:
# if we are trying to place it vertically
xpos = randrange(10)
ypos = randrange(10-size)
for i in xrange(size):
loc = (xpos, ypos+i)
if board.get(loc):
# we have a collision
raise IndexError
else:
board[loc] = ship
return board
done = False
while not done:
# Generate boards until we manage to not fail
board = {}
for ship,size in ship_sizes.iteritems():
try:
board = place_ship(board, ship)
except IndexError:
break
else:
done = True
return board
def gen_random_board_str():
return board_str(gen_random_board())
class LocalCommunication(object):
""" A very simple local communication thing
which can be used to locally test your
program
"""
def readline(self):
msg = raw_input()
return msg
def sendline(self,msg):
print(msg)
class Communication(object):
""" A simple communication wrapper, use
comm = Communication()
at which point you can use comm.readline() to read a line
and comm.sendline(msg) to send a line, sendline
will automatically add the newline at the end.
"""
def __init__(self):
self.port = int(sys.argv[1])
logging.debug("Got port %d", self.port)
# Create the socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_address = ('localhost', self.port)
logging.debug("Connection to %r", self.server_address)
self.sock.connect(self.server_address)
self.sock_file = self.sock.makefile("rw")
logging.debug("Connected")
def readline(self):
msg = self.sock_file.readline()
logging.debug("Read line %s", msg.strip())
return msg
def sendline(self,msg):
logging.debug("Sending line %s", msg.strip())
self.sock_file.write(msg + '\n')
self.sock_file.flush()
| mit | -2,937,155,664,503,200,300 | 27.219298 | 69 | 0.535903 | false |
GorK-ChO/selenium | py/test/selenium/webdriver/chrome/chrome_network_emulation_tests.py | 29 | 1252 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.webdriver import Chrome
def test_network_conditions_emulation():
driver = Chrome()
driver.set_network_conditions(
offline=False,
latency=56, # additional latency (ms)
throughput=789)
conditions = driver.get_network_conditions()
assert conditions['offline'] is False
assert conditions['latency'] == 56
assert conditions['download_throughput'] == 789
assert conditions['upload_throughput'] == 789
| apache-2.0 | -2,553,425,591,509,324,000 | 39.387097 | 62 | 0.742013 | false |
OpenPymeMx/account-financial-reporting | account_financial_report_webkit/report/partner_balance.py | 29 | 4238 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright Camptocamp SA 2011
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from openerp import pooler
from openerp.report import report_sxw
from openerp.tools.translate import _
from .common_partner_balance_reports \
import CommonPartnerBalanceReportHeaderWebkit
from .webkit_parser_header_fix import HeaderFooterTextWebKitParser
class PartnerBalanceWebkit(report_sxw.rml_parse,
CommonPartnerBalanceReportHeaderWebkit):
def __init__(self, cursor, uid, name, context):
super(PartnerBalanceWebkit, self).__init__(
cursor, uid, name, context=context)
self.pool = pooler.get_pool(self.cr.dbname)
self.cursor = self.cr
company = self.pool.get('res.users').browse(
self.cr, uid, uid, context=context).company_id
header_report_name = ' - '.join((_('PARTNER BALANCE'),
company.name,
company.currency_id.name))
footer_date_time = self.formatLang(
str(datetime.today()), date_time=True)
self.localcontext.update({
'cr': cursor,
'uid': uid,
'report_name': _('Partner Balance'),
'display_account': self._get_display_account,
'display_account_raw': self._get_display_account_raw,
'filter_form': self._get_filter,
'target_move': self._get_target_move,
'display_target_move': self._get_display_target_move,
'display_partner_account': self._get_display_partner_account,
'accounts': self._get_accounts_br,
'additional_args': [
('--header-font-name', 'Helvetica'),
('--footer-font-name', 'Helvetica'),
('--header-font-size', '10'),
('--footer-font-size', '6'),
('--header-left', header_report_name),
('--header-spacing', '2'),
('--footer-left', footer_date_time),
('--footer-right',
' '.join((_('Page'), '[page]', _('of'), '[topage]'))),
('--footer-line',),
],
})
def _get_initial_balance_mode(self, start_period):
""" Force computing of initial balance for the partner balance,
because we cannot use the entries generated by
OpenERP in the opening period.
OpenERP allows to reconcile move lines between different partners,
so the generated entries in the opening period are unreliable.
"""
return 'initial_balance'
def set_context(self, objects, data, ids, report_type=None):
"""Populate a ledger_lines attribute on each browse record that will
be used by mako template"""
objects, new_ids, context_report_values = self.\
compute_partner_balance_data(data)
self.localcontext.update(context_report_values)
return super(PartnerBalanceWebkit, self).set_context(
objects, data, new_ids, report_type=report_type)
HeaderFooterTextWebKitParser(
'report.account.account_report_partner_balance_webkit',
'account.account',
'addons/account_financial_report_webkit/report/templates/\
account_report_partner_balance.mako',
parser=PartnerBalanceWebkit)
| agpl-3.0 | -8,863,846,583,735,864,000 | 40.960396 | 78 | 0.589429 | false |
itkinside/ufs | itkufs/common/views/display.py | 1 | 3418 | from operator import itemgetter
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.shortcuts import render
from itkufs.common.decorators import limit_to_group, limit_to_owner
from itkufs.accounting.models import Account, Group
@login_required
@limit_to_group
def group_summary(request, group, is_admin=False):
"""Show group summary"""
return render(
request,
"common/group_summary.html",
{
"is_admin": is_admin,
"all": "all" in request.GET,
"group": Group.objects.select_related().get(id=group.id),
},
)
@login_required
@limit_to_owner
def account_summary(request, group, account, is_admin=False, is_owner=False):
"""Show account summary"""
if is_owner:
# Set active account in session
request.session["my_account"] = {
"group_slug": account.group.slug,
"account_slug": account.slug,
}
# Warn owner of account about a low balance
if account.is_blocked():
messages.error(
request,
"The account balance is below the block limit, please "
"contact the group admin or deposit enough to pass the "
"limit.",
)
elif account.needs_warning():
messages.warning(
request, "The account balance is below the warning limit."
)
return render(
request,
"common/account_summary.html",
{
"is_admin": is_admin,
"is_owner": is_owner,
"group": group,
"account": Account.objects.select_related().get(id=account.id),
"balance_data": _generate_gchart_data(
account.get_balance_history_set()
),
},
)
@login_required
@limit_to_group
def group_balance_graph(request, group, is_admin=False):
accounts = (
Account.objects.all()
.filter(group_id=group.id, active=True, group_account=False)
.order_by("name")
)
data = []
for a in accounts:
data.append([a.short_name, a.normal_balance()])
graph_data = ['[ "%s", %d ]' % (a[0], a[1]) for a in data]
data = sorted(data, key=itemgetter(1), reverse=True)
graph_data_sorted = ['[ "%s", %d ]' % (a[0], a[1]) for a in data]
graph_data_positive = []
graph_data_negative = []
for a in data:
if a[1] >= 0:
graph_data_positive.append('[ "%s", %d ]' % (a[0], a[1]))
else:
graph_data_negative.append('[ "%s", %d ]' % (a[0], -a[1]))
return render(
request,
"common/group_balance_graph.html",
{
"group": Group.objects.select_related().get(id=group.id),
"graph_data": ",\n".join(graph_data),
"graph_data_sorted": ",\n".join(graph_data_sorted),
"graph_data_positive": ",\n".join(graph_data_positive),
"graph_data_negative": ",\n".join(graph_data_negative),
},
)
def _generate_gchart_data(dataset):
# aggregate data
agg = 0.0
history = []
for i in range(len(dataset)):
saldo = float(dataset[i].saldo)
history.append((dataset[i].date, saldo + agg))
agg += saldo
items = [f"[ new Date({date}), {balance:.2f}]" for date, balance in history]
return ",\n".join(items)
| gpl-2.0 | -8,872,298,342,209,795,000 | 27.722689 | 80 | 0.559391 | false |
bbbenja/SickRage | lib/sqlalchemy/dialects/oracle/zxjdbc.py | 79 | 7744 | # oracle/zxjdbc.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: oracle+zxjdbc
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: oracle+zxjdbc://user:pass@host/dbname
:driverurl: http://www.oracle.com/technology/software/tech/java/sqlj_jdbc/index.html.
"""
import decimal
import re
from sqlalchemy import sql, types as sqltypes, util
from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
from sqlalchemy.dialects.oracle.base import OracleCompiler, OracleDialect, OracleExecutionContext
from sqlalchemy.engine import result as _result
from sqlalchemy.sql import expression
import collections
SQLException = zxJDBC = None
class _ZxJDBCDate(sqltypes.Date):
def result_processor(self, dialect, coltype):
def process(value):
if value is None:
return None
else:
return value.date()
return process
class _ZxJDBCNumeric(sqltypes.Numeric):
def result_processor(self, dialect, coltype):
#XXX: does the dialect return Decimal or not???
# if it does (in all cases), we could use a None processor as well as
# the to_float generic processor
if self.asdecimal:
def process(value):
if isinstance(value, decimal.Decimal):
return value
else:
return decimal.Decimal(str(value))
else:
def process(value):
if isinstance(value, decimal.Decimal):
return float(value)
else:
return value
return process
class OracleCompiler_zxjdbc(OracleCompiler):
def returning_clause(self, stmt, returning_cols):
self.returning_cols = list(expression._select_iterables(returning_cols))
# within_columns_clause=False so that labels (foo AS bar) don't render
columns = [self.process(c, within_columns_clause=False, result_map=self.result_map)
for c in self.returning_cols]
if not hasattr(self, 'returning_parameters'):
self.returning_parameters = []
binds = []
for i, col in enumerate(self.returning_cols):
dbtype = col.type.dialect_impl(self.dialect).get_dbapi_type(self.dialect.dbapi)
self.returning_parameters.append((i + 1, dbtype))
bindparam = sql.bindparam("ret_%d" % i, value=ReturningParam(dbtype))
self.binds[bindparam.key] = bindparam
binds.append(self.bindparam_string(self._truncate_bindparam(bindparam)))
return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds)
class OracleExecutionContext_zxjdbc(OracleExecutionContext):
def pre_exec(self):
if hasattr(self.compiled, 'returning_parameters'):
# prepare a zxJDBC statement so we can grab its underlying
# OraclePreparedStatement's getReturnResultSet later
self.statement = self.cursor.prepare(self.statement)
def get_result_proxy(self):
if hasattr(self.compiled, 'returning_parameters'):
rrs = None
try:
try:
rrs = self.statement.__statement__.getReturnResultSet()
next(rrs)
except SQLException as sqle:
msg = '%s [SQLCode: %d]' % (sqle.getMessage(), sqle.getErrorCode())
if sqle.getSQLState() is not None:
msg += ' [SQLState: %s]' % sqle.getSQLState()
raise zxJDBC.Error(msg)
else:
row = tuple(self.cursor.datahandler.getPyObject(rrs, index, dbtype)
for index, dbtype in self.compiled.returning_parameters)
return ReturningResultProxy(self, row)
finally:
if rrs is not None:
try:
rrs.close()
except SQLException:
pass
self.statement.close()
return _result.ResultProxy(self)
def create_cursor(self):
cursor = self._dbapi_connection.cursor()
cursor.datahandler = self.dialect.DataHandler(cursor.datahandler)
return cursor
class ReturningResultProxy(_result.FullyBufferedResultProxy):
"""ResultProxy backed by the RETURNING ResultSet results."""
def __init__(self, context, returning_row):
self._returning_row = returning_row
super(ReturningResultProxy, self).__init__(context)
def _cursor_description(self):
ret = []
for c in self.context.compiled.returning_cols:
if hasattr(c, 'name'):
ret.append((c.name, c.type))
else:
ret.append((c.anon_label, c.type))
return ret
def _buffer_rows(self):
return collections.deque([self._returning_row])
class ReturningParam(object):
"""A bindparam value representing a RETURNING parameter.
Specially handled by OracleReturningDataHandler.
"""
def __init__(self, type):
self.type = type
def __eq__(self, other):
if isinstance(other, ReturningParam):
return self.type == other.type
return NotImplemented
def __ne__(self, other):
if isinstance(other, ReturningParam):
return self.type != other.type
return NotImplemented
def __repr__(self):
kls = self.__class__
return '<%s.%s object at 0x%x type=%s>' % (kls.__module__, kls.__name__, id(self),
self.type)
class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect):
jdbc_db_name = 'oracle'
jdbc_driver_name = 'oracle.jdbc.OracleDriver'
statement_compiler = OracleCompiler_zxjdbc
execution_ctx_cls = OracleExecutionContext_zxjdbc
colspecs = util.update_copy(
OracleDialect.colspecs,
{
sqltypes.Date: _ZxJDBCDate,
sqltypes.Numeric: _ZxJDBCNumeric
}
)
def __init__(self, *args, **kwargs):
super(OracleDialect_zxjdbc, self).__init__(*args, **kwargs)
global SQLException, zxJDBC
from java.sql import SQLException
from com.ziclix.python.sql import zxJDBC
from com.ziclix.python.sql.handler import OracleDataHandler
class OracleReturningDataHandler(OracleDataHandler):
"""zxJDBC DataHandler that specially handles ReturningParam."""
def setJDBCObject(self, statement, index, object, dbtype=None):
if type(object) is ReturningParam:
statement.registerReturnParameter(index, object.type)
elif dbtype is None:
OracleDataHandler.setJDBCObject(
self, statement, index, object)
else:
OracleDataHandler.setJDBCObject(
self, statement, index, object, dbtype)
self.DataHandler = OracleReturningDataHandler
def initialize(self, connection):
super(OracleDialect_zxjdbc, self).initialize(connection)
self.implicit_returning = connection.connection.driverversion >= '10.2'
def _create_jdbc_url(self, url):
return 'jdbc:oracle:thin:@%s:%s:%s' % (url.host, url.port or 1521, url.database)
def _get_server_version_info(self, connection):
version = re.search(r'Release ([\d\.]+)', connection.connection.dbversion).group(1)
return tuple(int(x) for x in version.split('.'))
dialect = OracleDialect_zxjdbc
| gpl-3.0 | -1,441,098,352,703,462,700 | 34.522936 | 97 | 0.608471 | false |
sleepinghungry/wwif | students/simone/aiy yiy yiy.py | 1 | 1028 | Python 3.5.2 (v3.5.2:4def2a2901a5, Jun 25 2016, 22:18:55) [MSC v.1900 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> file
Traceback (most recent call last):
File "<pyshell#0>", line 1, in <module>
file
NameError: name 'file' is not defined
>>>
RESTART: Z:\Coding Classes\Python Text Adventures\wwif\students\simone\hangman.py
Warning (from warnings module):
File "C:\Users\wwclc\AppData\Local\Programs\Python\Python35\lib\getpass.py", line 101
return fallback_getpass(prompt, stream)
GetPassWarning: Can not control echo on the terminal.
Warning: Password input may be echoed.
Player 1, enter a word
RESTART: Z:\Coding Classes\Python Text Adventures\wwif\students\simone\hangman.py
Warning (from warnings module):
File "C:\Users\wwclc\AppData\Local\Programs\Python\Python35\lib\getpass.py", line 101
return fallback_getpass(prompt, stream)
GetPassWarning: Can not control echo on the terminal.
Warning: Password input may be echoed.
Player 1, enter a word
| mit | 4,861,723,265,608,576,000 | 41.833333 | 94 | 0.751946 | false |
ganxueliang88/idracserver | idrac/log_api.py | 3 | 2265 | # coding: utf-8
from argparse import ArgumentParser, FileType
from contextlib import closing
from io import open as copen
from json import dumps
from math import ceil
import re
from os.path import basename, dirname, exists, join
from struct import unpack
from subprocess import Popen
from sys import platform, prefix, stderr
from tempfile import NamedTemporaryFile
from jinja2 import FileSystemLoader, Template
from jinja2.environment import Environment
from jumpserver.api import BASE_DIR
DEFAULT_TEMPLATE = join(BASE_DIR, 'templates', 'jlog', 'static.jinja2')
rz_pat = re.compile(r'\x18B\w+\r\x8a(\x11)?')
def escapeString(string):
string = rz_pat.sub('', string)
try:
string = string.encode('unicode_escape').decode('utf-8', 'ignore')
except (UnicodeEncodeError, UnicodeDecodeError):
string = string.decode('utf-8', 'ignore')
string = string.replace("'", "\\'")
string = '\'' + string + '\''
return string
def getTiming(timef):
timing = None
with closing(timef):
timing = [l.strip().split(' ') for l in timef]
timing = [(int(ceil(float(r[0]) * 1000)), int(r[1])) for r in timing]
return timing
def scriptToJSON(scriptf, timing=None):
ret = []
with closing(scriptf):
scriptf.readline() # ignore first header line from script file
offset = 0
for t in timing:
dt = scriptf.read(t[1])
data = escapeString(dt)
# print ('###### (%s, %s)' % (t[1], repr(data)))
offset += t[0]
ret.append((data, offset))
return dumps(ret)
def renderTemplate(script_path, time_file_path, dimensions=(24, 80), templatename=DEFAULT_TEMPLATE):
with copen(script_path, encoding='utf-8', errors='replace', newline='\r\n') as scriptf:
# with open(script_path) as scriptf:
with open(time_file_path) as timef:
timing = getTiming(timef)
json = scriptToJSON(scriptf, timing)
fsl = FileSystemLoader(dirname(templatename), 'utf-8')
e = Environment()
e.loader = fsl
templatename = basename(templatename)
rendered = e.get_template(templatename).render(json=json,
dimensions=dimensions)
return rendered
| gpl-2.0 | -1,840,176,926,045,570,000 | 28.415584 | 100 | 0.640177 | false |
LIS/lis-tempest | tempest/tests/cmd/test_tempest_init.py | 3 | 4205 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import fixtures
from tempest.cmd import init
from tempest.tests import base
class TestTempestInit(base.TestCase):
def test_generate_testr_conf(self):
# Create fake conf dir
conf_dir = self.useFixture(fixtures.TempDir())
init_cmd = init.TempestInit(None, None)
init_cmd.generate_testr_conf(conf_dir.path)
# Generate expected file contents
top_level_path = os.path.dirname(os.path.dirname(init.__file__))
discover_path = os.path.join(top_level_path, 'test_discover')
testr_conf_file = init.TESTR_CONF % (top_level_path, discover_path)
conf_path = conf_dir.join('.testr.conf')
with open(conf_path, 'r') as conf_file:
self.assertEqual(conf_file.read(), testr_conf_file)
def test_generate_sample_config(self):
local_dir = self.useFixture(fixtures.TempDir())
etc_dir_path = os.path.join(local_dir.path, 'etc/')
os.mkdir(etc_dir_path)
tmp_dir = self.useFixture(fixtures.TempDir())
config_dir = os.path.join(tmp_dir.path, 'config/')
shutil.copytree('etc/', config_dir)
init_cmd = init.TempestInit(None, None)
local_sample_conf_file = os.path.join(etc_dir_path,
'tempest.conf.sample')
# Verify no sample config file exist
self.assertFalse(os.path.isfile(local_sample_conf_file))
init_cmd.generate_sample_config(local_dir.path, config_dir)
# Verify sample config file exist with some content
self.assertTrue(os.path.isfile(local_sample_conf_file))
self.assertGreater(os.path.getsize(local_sample_conf_file), 0)
def test_create_working_dir_with_existing_local_dir_non_empty(self):
fake_local_dir = self.useFixture(fixtures.TempDir())
fake_local_conf_dir = self.useFixture(fixtures.TempDir())
open("%s/foo" % fake_local_dir.path, 'w').close()
_init = init.TempestInit(None, None)
self.assertRaises(OSError,
_init.create_working_dir,
fake_local_dir.path,
fake_local_conf_dir.path)
def test_create_working_dir(self):
fake_local_dir = self.useFixture(fixtures.TempDir())
fake_local_conf_dir = self.useFixture(fixtures.TempDir())
os.rmdir(fake_local_dir.path)
# Create a fake conf file
fake_file = fake_local_conf_dir.join('conf_file.conf')
open(fake_file, 'w').close()
init_cmd = init.TempestInit(None, None)
init_cmd.create_working_dir(fake_local_dir.path,
fake_local_conf_dir.path)
# Assert directories are created
lock_path = os.path.join(fake_local_dir.path, 'tempest_lock')
etc_dir = os.path.join(fake_local_dir.path, 'etc')
log_dir = os.path.join(fake_local_dir.path, 'logs')
testr_dir = os.path.join(fake_local_dir.path, '.testrepository')
self.assertTrue(os.path.isdir(lock_path))
self.assertTrue(os.path.isdir(etc_dir))
self.assertTrue(os.path.isdir(log_dir))
self.assertTrue(os.path.isdir(testr_dir))
# Assert file creation
fake_file_moved = os.path.join(etc_dir, 'conf_file.conf')
local_conf_file = os.path.join(etc_dir, 'tempest.conf')
local_testr_conf = os.path.join(fake_local_dir.path, '.testr.conf')
self.assertTrue(os.path.isfile(fake_file_moved))
self.assertTrue(os.path.isfile(local_conf_file))
self.assertTrue(os.path.isfile(local_testr_conf))
| apache-2.0 | 1,012,579,020,108,612,700 | 42.802083 | 75 | 0.645898 | false |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/tools/metrics/histograms/update_bad_message_reasons.py | 31 | 1275 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Updates the various BadMessage enums in histograms.xml file with values read
from the corresponding bad_message.h files.
If the file was pretty-printed, the updated version is pretty-printed too.
"""
import sys
from update_histogram_enum import UpdateHistogramEnum
if __name__ == '__main__':
if len(sys.argv) > 1:
print >>sys.stderr, 'No arguments expected!'
sys.stderr.write(__doc__)
sys.exit(1)
histograms = {
'chrome/browser/bad_message.h': 'BadMessageReasonChrome',
'content/browser/bad_message.h': 'BadMessageReasonContent',
'components/nacl/browser/bad_message.h': 'BadMessageReasonNaCl',
'components/password_manager/content/browser/bad_message.h':
'BadMessageReasonPasswordManager',
'extensions/browser/bad_message.h': 'BadMessageReasonExtensions',
}
for header_file, histogram_name in histograms.items():
UpdateHistogramEnum(histogram_enum_name=histogram_name,
source_enum_path=header_file,
start_marker='^enum (class )?BadMessageReason {',
end_marker='^BAD_MESSAGE_MAX')
| mit | 3,903,276,838,310,042,600 | 36.5 | 79 | 0.697255 | false |
autosportlabs/kivy | kivy/core/clipboard/__init__.py | 9 | 4563 | '''
Clipboard
=========
Core class for accessing the Clipboard. If we are not able to access the
system clipboard, a fake one will be used.
Usage example:
.. code-block:: kv
#:import Clipboard kivy.core.clipboard.Clipboard
Button:
on_release:
self.text = Clipboard.paste()
Clipboard.copy('Data')
'''
__all__ = ('ClipboardBase', 'Clipboard')
from kivy import Logger
from kivy.core import core_select_lib
from kivy.utils import platform
from kivy.setupconfig import USE_SDL2
class ClipboardBase(object):
def get(self, mimetype):
'''Get the current data in clipboard, using the mimetype if possible.
You not use this method directly. Use :meth:`paste` instead.
'''
return None
def put(self, data, mimetype):
'''Put data on the clipboard, and attach a mimetype.
You should not use this method directly. Use :meth:`copy` instead.
'''
pass
def get_types(self):
'''Return a list of supported mimetypes
'''
return []
def _ensure_clipboard(self):
''' Ensure that the clipboard has been properly initialised.
'''
if hasattr(self, '_clip_mime_type'):
return
if platform == 'win':
self._clip_mime_type = 'text/plain;charset=utf-8'
# windows clipboard uses a utf-16 little endian encoding
self._encoding = 'utf-16-le'
elif platform == 'linux':
self._clip_mime_type = 'text/plain;charset=utf-8'
self._encoding = 'utf-8'
else:
self._clip_mime_type = 'text/plain'
self._encoding = 'utf-8'
def copy(self, data=''):
''' Copy the value provided in argument `data` into current clipboard.
If data is not of type string it will be converted to string.
.. versionadded:: 1.9.0
'''
if data:
self._copy(data)
def paste(self):
''' Get text from the system clipboard and return it a usable string.
.. versionadded:: 1.9.0
'''
return self._paste()
def _copy(self, data):
self._ensure_clipboard()
if not isinstance(data, bytes):
data = data.encode(self._encoding)
self.put(data, self._clip_mime_type)
def _paste(self):
self._ensure_clipboard()
_clip_types = Clipboard.get_types()
mime_type = self._clip_mime_type
if mime_type not in _clip_types:
mime_type = 'text/plain'
data = self.get(mime_type)
if data is not None:
# decode only if we don't have unicode
# we would still need to decode from utf-16 (windows)
# data is of type bytes in PY3
if isinstance(data, bytes):
data = data.decode(self._encoding, 'ignore')
# remove null strings mostly a windows issue
data = data.replace(u'\x00', u'')
return data
return u''
# load clipboard implementation
_clipboards = []
if platform == 'android':
_clipboards.append(
('android', 'clipboard_android', 'ClipboardAndroid'))
elif platform == 'macosx':
_clipboards.append(
('nspaste', 'clipboard_nspaste', 'ClipboardNSPaste'))
elif platform == 'win':
_clipboards.append(
('winctypes', 'clipboard_winctypes', 'ClipboardWindows'))
elif platform == 'linux':
_clipboards.append(
('dbusklipper', 'clipboard_dbusklipper', 'ClipboardDbusKlipper'))
_clipboards.append(
('gtk3', 'clipboard_gtk3', 'ClipboardGtk3'))
_clipboards.append(
('xclip', 'clipboard_xclip', 'ClipboardXclip'))
_clipboards.append(
('xsel', 'clipboard_xsel', 'ClipboardXsel'))
if USE_SDL2:
_clipboards.append(
('sdl2', 'clipboard_sdl2', 'ClipboardSDL2'))
else:
_clipboards.append(
('pygame', 'clipboard_pygame', 'ClipboardPygame'))
_clipboards.append(
('dummy', 'clipboard_dummy', 'ClipboardDummy'))
Clipboard = core_select_lib('clipboard', _clipboards, True)
CutBuffer = None
if platform == 'linux':
_cutbuffers = [
('xclip', 'clipboard_xclip', 'ClipboardXclip'),
('xsel', 'clipboard_xsel', 'ClipboardXsel'),
]
if Clipboard.__class__.__name__ in (c[2] for c in _cutbuffers):
CutBuffer = Clipboard
else:
CutBuffer = core_select_lib('cutbuffer', _cutbuffers, True,
basemodule='clipboard')
if CutBuffer:
Logger.info('CutBuffer: cut buffer support enabled')
| mit | 2,617,446,477,459,466,000 | 28.063694 | 78 | 0.591059 | false |
abelfunctions/abelfunctions | examples/riemanntheta_demo.py | 2 | 8564 | """
Grady Williams
January 28, 2013
This module provides functions for displaying graphs of the Riemann-Theta
function. There are 12 different graphs that can be generated, 10 of them
correspond to the graphics shown on the Digital Library of Mathematical
Functions page for Riemann Theta (dlmf.nist.gov/21.4) and the names of the
functions that generate those plots correspond to the names of the plots on
that page. (e.g plt_a1 plots generates the plot denoted a1 on the dlmf page).
The other two graphs are of the first and second derivatives for a given Omega.
Besides the plots for derivatives all of the plots have a few optional commands:
SIZE: Is the number of grid-points per direction over which the function is computed over, the
default is set to 75.
warp: Is the mayavi warp number documentation for it can be found at:
(docs.enthough.com/mayavi/mayavi/auto/mlab_helper_functions.html). The default is auto.
d_axes: Is a boolean value which determines whether or not the axis are displayed.
WARNING: If d_axis is set to True, be then warp should be set to '1'. Otherwise incorrect
axis will be displayed and function values will appear incorrect.
There are 3 different Omegas that are considered
Omega 1 = [[1.690983006 + .951056516*1.0j 1.5 + .363271264*1.0j]
[1.5 + .363271264*1.0j 1.309016994 + .951056516*1.0j]]
Omega 2 = [[1.0j -.5]
[-.5 1.0j]]
Omega 3 = [[-.5 + 1.0j .5 -.5*1.0j -.5-.5*1.0j]
[.5 -.5*1.0j 1.0j 0 ]
[-.5 - .5*1.0j 0 1.0j ]]
In all of the following graphs, the exponential growth of Riemann Theta has been factored out.
"""
from abelfunctions import RiemannTheta
import numpy as np
from mayavi.mlab import *
import matplotlib.pyplot as plt
gpu = True
try:
import pycuda.driver
except ImportError:
gpu = False
"""
Plots the real part of Riemann Theta for Omega 1 with z = (x + iy,0)
where x,y are real numbers such that 0 < x < 1, 0 < y < 5
corresponds to 21.4.1.a1 on DLMF
"""
def plt_a1(SIZE=75, warp="auto", d_axes=False):
X,Y,V = get_r1_vals(SIZE, gpu)
V = V.real
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
"""
Plots the imaginary part of Riemann Theta for Omega 1 with z = (x + iy,0)
where x,y are real numbers such that 0 < x < 1, 0 < y < 5
corresponds to 21.4.1.b1 on DLMF
"""
def plt_b1(SIZE=75,warp="auto", d_axes=False):
X,Y,V = get_r1_vals(SIZE,gpu)
V = V.imag
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
"""
Plots the modulus of Riemann Theta for Omega 1 with z = (x + iy,0)
where x,y are real numbers such that 0 < x < 1, 0 < y < 5
corresponds to 21.4.1.c1 on DLMF
"""
def plt_c1(SIZE=75, warp="auto", d_axes=False):
X,Y,V = get_r1_vals(SIZE, gpu)
V = np.absolute(V)
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_a2(SIZE=75,warp = "auto",d_axes=False):
X,Y,V = get_r2_vals(SIZE, gpu)
V = V.real
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_b2(SIZE=75,warp= "auto", d_axes=False):
X,Y,V = get_r2_vals(SIZE, gpu)
V = V.imag
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_c2(SIZE=75, warp = "auto", d_axes=False):
X,Y,V = get_r2_vals(SIZE, gpu)
V = np.absolute(V)
s = surf(X,Y,V,warp_scale = warp)
if d_axes:
axes()
return s
def plt_a3(SIZE=75, warp = "auto", d_axes=False):
X,Y,V = get_r3_vals(SIZE, gpu)
V = V.real
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_b3(SIZE=75, warp= "auto", d_axes=False):
X,Y,V = get_r3_vals(SIZE,gpu)
V = V.imag
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_c3(SIZE=75, warp= "auto", d_axes=False):
X,Y,V = get_r3_vals(SIZE,gpu)
V = np.absolute(V)
s = surf(X,Y,V,warp_scale = warp)
if d_axes:
axes()
return s
def plt_21_4_2(SIZE=75, warp = "auto", d_axes = False):
X,Y,V = get_d_vals(SIZE, gpu)
V = V.real
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_21_4_3(SIZE=75, warp = "auto", d_axes=False):
theta = RiemannTheta
Omega = np.matrix([[1.0j, -.5], [-.5,1.0j]])
X,Y = np.mgrid[0:1:SIZE*1.0j, 0:2:SIZE*1.0j]
Z = X + Y*1.0j
Z = Z.flatten()
U,V = theta.exp_and_osc_at_point([[z,0] for z in Z], Omega, batch=True)
V = np.absolute(V)
V = V.reshape(SIZE,SIZE)
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_21_4_4(SIZE=75,warp = "auto", d_axes=False, gpu=False):
theta = RiemannTheta
Omega = np.matrix([[1.0j, -.5], [-.5,1.0j]])
X,Y = np.mgrid[0:4:SIZE*1.0j, 0:4:SIZE*1.0j]
Z = X + Y*1.0j
Z = Z.flatten()
U,V = theta.exp_and_osc_at_point([[z.real*1.0j,z.imag*1.0j] for z in Z], Omega, batch=True)
V = V.real
V = V.reshape(SIZE,SIZE)
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_21_4_5(SIZE=75,warp = "auto", d_axes=False, gpu=False):
theta = RiemannTheta
Omega = np.matrix([[-.5 + 1.0j, .5 -.5*1.0j, -.5-.5*1.0j],
[.5 -.5*1.0j, 1.0j, 0],
[-.5 - .5*1.0j, 0, 1.0j]])
X,Y = np.mgrid[0:1:SIZE*1.0j, 0:3:1.0j*SIZE]
Z = X+Y*1.0j
Z = Z.flatten()
U,V = theta.exp_and_osc_at_point([[z,0,0] for z in Z], Omega, batch=True)
V = V.real
V = V.reshape(SIZE,SIZE)
s = surf(X,Y,V,warp_scale=warp)
if d_axes:
axes()
return s
def plt_first_deriv():
theta = RiemannTheta
Omega = np.matrix([[1.690983006 + .951056516*1.0j, 1.5 + .363271264*1.0j],
[1.5 + .363271264*1.0j, 1.309016994 + .951056516*1.0j]])
k = [[1,0]]
Z = np.linspace(0,50,500)
U,V = theta.exp_and_osc_at_point([[0, z*1.0j] for z in Z], Omega, deriv=k, batch=True)
plt.plot(Z, V.real)
plt.show()
def plt_second_deriv():
theta = RiemannTheta
Omega = np.matrix([[1.690983006 + .951056516*1.0j, 1.5 + .363271264*1.0j],
[1.5 + .363271264*1.0j, 1.309016994 + .951056516*1.0j]])
k = [[1,0],[1,0]]
Z = np.linspace(0,50,500)
U,V = theta.exp_and_osc_at_point([[0, z*1.0j] for z in Z], Omega, deriv=k, batch=True)
plt.plot(Z, V.real)
plt.show()
def explosion(SIZE, gpu):
theta = RiemannTheta
Omega = np.matrix([[1.690983006 + .951056516*1.0j, 1.5 + .363271264*1.0j],
[1.5 + .363271264*1.0j, 1.309016994 + .951056516*1.0j]])
X,Y = np.mgrid[-1.5:1.5:SIZE*1.0j, -1.5:1.5:SIZE*1.0j]
Z = X + Y*1.0j
Z = Z.flatten()
U,V = theta.exp_and_osc_at_point([[z,0] for z in Z], Omega, batch=True)
V = np.exp(U)*V
V = V.reshape(SIZE, SIZE)
s = surf(X,Y,np.absolute(V), warp_scale = 'auto')
savefig("test.eps")
def get_r1_vals(SIZE, gpu):
theta = RiemannTheta
Omega = np.matrix([[1.690983006 + .951056516*1.0j, 1.5 + .363271264*1.0j],
[1.5 + .363271264*1.0j, 1.309016994 + .951056516*1.0j]])
X,Y = np.mgrid[0:1:SIZE*1.0j, 0:5:SIZE*1.0j]
Z = X + Y*1.0j
Z = Z.flatten()
U,V = theta.exp_and_osc_at_point([[z,0] for z in Z], Omega, batch=True)
V = V.reshape(SIZE, SIZE)
return X,Y,V
def get_r2_vals(SIZE, gpu):
theta = RiemannTheta
Omega = np.matrix([[1.690983006 + .951056516*1.0j, 1.5 + .363271264*1.0j],
[1.5 + .363271264*1.0j, 1.309016994 + .951056516*1.0j]])
X = np.linspace(0,1,SIZE)
Y = np.linspace(0,1,SIZE)
Z = []
for x in X:
for y in Y:
Z.append([x,y])
U,V = theta.exp_and_osc_at_point(Z, Omega, batch=True)
V = V.reshape(SIZE,SIZE)
return X,Y,V
def get_r3_vals(SIZE, gpu):
theta = RiemannTheta
Omega = np.matrix([[1.690983006 + .951056516*1.0j, 1.5 + .363271264*1.0j],
[1.5 + .363271264*1.0j, 1.309016994 + .951056516*1.0j]])
X,Y = np.mgrid[0:5:SIZE*1.0j, 0:5:SIZE*1.0j]
Z = X + Y*1.0j
Z = Z.flatten()
U,V = theta.exp_and_osc_at_point([[1.0j*z.real,1.0j*z.imag] for z in Z], Omega, batch=True)
V = V.reshape(SIZE, SIZE)
return X,Y,V
def get_d_vals(SIZE, gpu):
theta = RiemannTheta
Omega = np.matrix([[1.0j, -.5], [-.5, 1.0j]])
X,Y = np.mgrid[0:1:SIZE*1.0j, 0:5:SIZE*1.0j]
Z = X + Y * 1.0j
Z = Z.flatten()
U,V = theta.exp_and_osc_at_point([[z,0] for z in Z], Omega, batch=True)
V = V.reshape(SIZE,SIZE)
return X,Y,V
| mit | -8,656,464,474,544,515,000 | 31.074906 | 95 | 0.579636 | false |
laurent-george/bokeh | examples/glyphs/data_tables.py | 41 | 3178 | from bokeh.io import vplot
from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid, Circle, HoverTool, BoxSelectTool
from bokeh.models.widgets import DataTable, TableColumn, StringFormatter, NumberFormatter, StringEditor, IntEditor, NumberEditor, SelectEditor
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.browserlib import view
from bokeh.sampledata.autompg2 import autompg2 as mpg
source = ColumnDataSource(mpg)
manufacturers = sorted(mpg["manufacturer"].unique())
models = sorted(mpg["model"].unique())
transmissions = sorted(mpg["trans"].unique())
drives = sorted(mpg["drv"].unique())
classes = sorted(mpg["class"].unique())
columns = [
TableColumn(field="manufacturer", title="Manufacturer", editor=SelectEditor(options=manufacturers), formatter=StringFormatter(font_style="bold")),
TableColumn(field="model", title="Model", editor=StringEditor(completions=models)),
TableColumn(field="displ", title="Displacement", editor=NumberEditor(step=0.1), formatter=NumberFormatter(format="0.0")),
TableColumn(field="year", title="Year", editor=IntEditor()),
TableColumn(field="cyl", title="Cylinders", editor=IntEditor()),
TableColumn(field="trans", title="Transmission", editor=SelectEditor(options=transmissions)),
TableColumn(field="drv", title="Drive", editor=SelectEditor(options=drives)),
TableColumn(field="class", title="Class", editor=SelectEditor(options=classes)),
TableColumn(field="cty", title="City MPG", editor=IntEditor()),
TableColumn(field="hwy", title="Highway MPG", editor=IntEditor()),
]
data_table = DataTable(source=source, columns=columns, editable=True)
plot = Plot(title=None, x_range= DataRange1d(), y_range=DataRange1d(), plot_width=1000, plot_height=300)
# Set up x & y axis
plot.add_layout(LinearAxis(), 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
# Add Glyphs
cty_glyph = Circle(x="index", y="cty", fill_color="#396285", size=8, fill_alpha=0.5, line_alpha=0.5)
hwy_glyph = Circle(x="index", y="hwy", fill_color="#CE603D", size=8, fill_alpha=0.5, line_alpha=0.5)
cty = plot.add_glyph(source, cty_glyph)
hwy = plot.add_glyph(source, hwy_glyph)
# Add the tools
tooltips = [
("Manufacturer", "@manufacturer"),
("Model", "@model"),
("Displacement", "@displ"),
("Year", "@year"),
("Cylinders", "@cyl"),
("Transmission", "@trans"),
("Drive", "@drv"),
("Class", "@class"),
]
cty_hover_tool = HoverTool(renderers=[cty], tooltips=tooltips + [("City MPG", "@cty")])
hwy_hover_tool = HoverTool(renderers=[hwy], tooltips=tooltips + [("Highway MPG", "@hwy")])
select_tool = BoxSelectTool(renderers=[cty, hwy], dimensions=['width'])
plot.add_tools(cty_hover_tool, hwy_hover_tool, select_tool)
layout = vplot(plot, data_table)
if __name__ == "__main__":
filename = "data_tables.html"
with open(filename, "w") as f:
f.write(file_html(layout, INLINE, "Data Tables"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause | 7,155,318,669,208,179,000 | 45.735294 | 150 | 0.679673 | false |
Gabrielcarvfer/NS3 | src/bridge/bindings/modulegen__gcc_LP64.py | 4 | 283981 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.bridge', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## log.h (module 'core'): ns3::LogLevel [enumeration]
module.add_enum('LogLevel', ['LOG_NONE', 'LOG_ERROR', 'LOG_LEVEL_ERROR', 'LOG_WARN', 'LOG_LEVEL_WARN', 'LOG_DEBUG', 'LOG_LEVEL_DEBUG', 'LOG_INFO', 'LOG_LEVEL_INFO', 'LOG_FUNCTION', 'LOG_LEVEL_FUNCTION', 'LOG_LOGIC', 'LOG_LEVEL_LOGIC', 'LOG_ALL', 'LOG_LEVEL_ALL', 'LOG_PREFIX_FUNC', 'LOG_PREFIX_TIME', 'LOG_PREFIX_NODE', 'LOG_PREFIX_LEVEL', 'LOG_PREFIX_ALL'], import_from_module='ns.core')
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator', 'ns3::AttributeConstructionList::CIterator')
typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator*', 'ns3::AttributeConstructionList::CIterator*')
typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator&', 'ns3::AttributeConstructionList::CIterator&')
## bridge-helper.h (module 'bridge'): ns3::BridgeHelper [class]
module.add_class('BridgeHelper')
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeChecker'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeValue'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::EventImpl'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::NixVector> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::NixVector'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor'])
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## log.h (module 'core'): ns3::LogComponent [class]
module.add_class('LogComponent', import_from_module='ns.core')
typehandlers.add_type_alias('std::map< std::string, ns3::LogComponent * >', 'ns3::LogComponent::ComponentList')
typehandlers.add_type_alias('std::map< std::string, ns3::LogComponent * >*', 'ns3::LogComponent::ComponentList*')
typehandlers.add_type_alias('std::map< std::string, ns3::LogComponent * >&', 'ns3::LogComponent::ComponentList&')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
typehandlers.add_type_alias('void ( * ) ( ns3::Mac48Address )', 'ns3::Mac48Address::TracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Mac48Address )*', 'ns3::Mac48Address::TracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Mac48Address )&', 'ns3::Mac48Address::TracedCallback&')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## mac8-address.h (module 'network'): ns3::Mac8Address [class]
module.add_class('Mac8Address', import_from_module='ns.network')
## mac8-address.h (module 'network'): ns3::Mac8Address [class]
root_module['ns3::Mac8Address'].implicitly_converts_to(root_module['ns3::Address'])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class]
module.add_class('NetDeviceContainer', import_from_module='ns.network')
typehandlers.add_type_alias('std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator', 'ns3::NetDeviceContainer::Iterator')
typehandlers.add_type_alias('std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator*', 'ns3::NetDeviceContainer::Iterator*')
typehandlers.add_type_alias('std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator&', 'ns3::NetDeviceContainer::Iterator&')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::ItemType [enumeration]
module.add_enum('ItemType', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## log.h (module 'core'): ns3::ParameterLogger [class]
module.add_class('ParameterLogger', import_from_module='ns.core')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::ObjectBase'], template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'])
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST', 'AUTO'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
typehandlers.add_type_alias('void ( * ) ( ns3::Time )', 'ns3::Time::TracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Time )*', 'ns3::Time::TracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Time )&', 'ns3::Time::TracedCallback&')
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration]
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
typehandlers.add_type_alias('uint32_t', 'ns3::TypeId::hash_t')
typehandlers.add_type_alias('uint32_t*', 'ns3::TypeId::hash_t*')
typehandlers.add_type_alias('uint32_t&', 'ns3::TypeId::hash_t&')
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-128.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## int64x64-128.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## channel.h (module 'network'): ns3::Channel [class]
module.add_class('Channel', import_from_module='ns.network', parent=root_module['ns3::Object'])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class]
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class]
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class]
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class]
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
typehandlers.add_type_alias('void ( * ) ( )', 'ns3::NetDevice::LinkChangeTracedCallback')
typehandlers.add_type_alias('void ( * ) ( )*', 'ns3::NetDevice::LinkChangeTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( )&', 'ns3::NetDevice::LinkChangeTracedCallback&')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ns3::NetDevice::ReceiveCallback')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', 'ns3::NetDevice::ReceiveCallback*')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', 'ns3::NetDevice::ReceiveCallback&')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'ns3::NetDevice::PromiscReceiveCallback')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >*', 'ns3::NetDevice::PromiscReceiveCallback*')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >&', 'ns3::NetDevice::PromiscReceiveCallback&')
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > )', 'ns3::Packet::TracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > )*', 'ns3::Packet::TracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > )&', 'ns3::Packet::TracedCallback&')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )', 'ns3::Packet::AddressTracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )*', 'ns3::Packet::AddressTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )&', 'ns3::Packet::AddressTracedCallback&')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )', 'ns3::Packet::TwoAddressTracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )*', 'ns3::Packet::TwoAddressTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )&', 'ns3::Packet::TwoAddressTracedCallback&')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )', 'ns3::Packet::Mac48AddressTracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )*', 'ns3::Packet::Mac48AddressTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )&', 'ns3::Packet::Mac48AddressTracedCallback&')
typehandlers.add_type_alias('void ( * ) ( uint32_t, uint32_t )', 'ns3::Packet::SizeTracedCallback')
typehandlers.add_type_alias('void ( * ) ( uint32_t, uint32_t )*', 'ns3::Packet::SizeTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( uint32_t, uint32_t )&', 'ns3::Packet::SizeTracedCallback&')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, double )', 'ns3::Packet::SinrTracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, double )*', 'ns3::Packet::SinrTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, double )&', 'ns3::Packet::SinrTracedCallback&')
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## bridge-channel.h (module 'bridge'): ns3::BridgeChannel [class]
module.add_class('BridgeChannel', parent=root_module['ns3::Channel'])
## bridge-net-device.h (module 'bridge'): ns3::BridgeNetDevice [class]
module.add_class('BridgeNetDevice', parent=root_module['ns3::NetDevice'])
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'])
module.add_container('std::map< std::string, ns3::LogComponent * >', ('std::string', 'ns3::LogComponent *'), container_type='map')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )', 'ns3::TimePrinter')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )*', 'ns3::TimePrinter*')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )&', 'ns3::TimePrinter&')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )', 'ns3::NodePrinter')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )*', 'ns3::NodePrinter*')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )&', 'ns3::NodePrinter&')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
## Register a nested module for the namespace TracedValueCallback
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )', 'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )*', 'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )&', 'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )', 'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )*', 'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )&', 'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_types_ns3_TracedValueCallback(module):
root_module = module.get_root()
typehandlers.add_type_alias('void ( * ) ( ns3::Time, ns3::Time )', 'ns3::TracedValueCallback::Time')
typehandlers.add_type_alias('void ( * ) ( ns3::Time, ns3::Time )*', 'ns3::TracedValueCallback::Time*')
typehandlers.add_type_alias('void ( * ) ( ns3::Time, ns3::Time )&', 'ns3::TracedValueCallback::Time&')
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3BridgeHelper_methods(root_module, root_module['ns3::BridgeHelper'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeAccessor >'])
register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeChecker >'])
register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeValue >'])
register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, root_module['ns3::DefaultDeleter< ns3::CallbackImplBase >'])
register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, root_module['ns3::DefaultDeleter< ns3::EventImpl >'])
register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Hash::Implementation >'])
register_Ns3DefaultDeleter__Ns3NixVector_methods(root_module, root_module['ns3::DefaultDeleter< ns3::NixVector >'])
register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::TraceSourceAccessor >'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3LogComponent_methods(root_module, root_module['ns3::LogComponent'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3Mac8Address_methods(root_module, root_module['ns3::Mac8Address'])
register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3ParameterLogger_methods(root_module, root_module['ns3::ParameterLogger'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3Channel_methods(root_module, root_module['ns3::Channel'])
register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor'])
register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3BridgeChannel_methods(root_module, root_module['ns3::BridgeChannel'])
register_Ns3BridgeNetDevice_methods(root_module, root_module['ns3::BridgeNetDevice'])
register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_output_stream_operator()
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::CIterator ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'ns3::AttributeConstructionList::CIterator',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::CIterator ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'ns3::AttributeConstructionList::CIterator',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3BridgeHelper_methods(root_module, cls):
## bridge-helper.h (module 'bridge'): ns3::BridgeHelper::BridgeHelper(ns3::BridgeHelper const & arg0) [constructor]
cls.add_constructor([param('ns3::BridgeHelper const &', 'arg0')])
## bridge-helper.h (module 'bridge'): ns3::BridgeHelper::BridgeHelper() [constructor]
cls.add_constructor([])
## bridge-helper.h (module 'bridge'): ns3::NetDeviceContainer ns3::BridgeHelper::Install(ns3::Ptr<ns3::Node> node, ns3::NetDeviceContainer c) [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::NetDeviceContainer', 'c')])
## bridge-helper.h (module 'bridge'): ns3::NetDeviceContainer ns3::BridgeHelper::Install(std::string nodeName, ns3::NetDeviceContainer c) [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('std::string', 'nodeName'), param('ns3::NetDeviceContainer', 'c')])
## bridge-helper.h (module 'bridge'): void ns3::BridgeHelper::SetDeviceAttribute(std::string n1, ns3::AttributeValue const & v1) [member function]
cls.add_method('SetDeviceAttribute',
'void',
[param('std::string', 'n1'), param('ns3::AttributeValue const &', 'v1')])
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetRemainingSize() const [member function]
cls.add_method('GetRemainingSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function]
cls.add_method('PeekU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Adjust(int32_t adjustment) [member function]
cls.add_method('Adjust',
'void',
[param('int32_t', 'adjustment')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeAccessor> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeAccessor > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeAccessor>::Delete(ns3::AttributeAccessor * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeAccessor *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeChecker> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeChecker > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeChecker>::Delete(ns3::AttributeChecker * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeChecker *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeValue> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeValue > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeValue>::Delete(ns3::AttributeValue * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeValue *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase>::DefaultDeleter(ns3::DefaultDeleter<ns3::CallbackImplBase> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::CallbackImplBase > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::CallbackImplBase>::Delete(ns3::CallbackImplBase * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::CallbackImplBase *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl>::DefaultDeleter(ns3::DefaultDeleter<ns3::EventImpl> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::EventImpl > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::EventImpl>::Delete(ns3::EventImpl * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::EventImpl *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation>::DefaultDeleter(ns3::DefaultDeleter<ns3::Hash::Implementation> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::Hash::Implementation > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::Hash::Implementation>::Delete(ns3::Hash::Implementation * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Hash::Implementation *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3NixVector_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::NixVector>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::NixVector>::DefaultDeleter(ns3::DefaultDeleter<ns3::NixVector> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::NixVector > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::NixVector>::Delete(ns3::NixVector * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::NixVector *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor>::DefaultDeleter(ns3::DefaultDeleter<ns3::TraceSourceAccessor> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::TraceSourceAccessor > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::TraceSourceAccessor>::Delete(ns3::TraceSourceAccessor * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::TraceSourceAccessor *', 'object')],
is_static=True)
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
## event-id.h (module 'core'): void ns3::EventId::Remove() [member function]
cls.add_method('Remove',
'void',
[])
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
deprecated=True, is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
deprecated=True, is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) const [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::HasPrefix(ns3::Ipv6Prefix const & prefix) const [member function]
cls.add_method('HasPrefix',
'bool',
[param('ns3::Ipv6Prefix const &', 'prefix')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function]
cls.add_method('IsDocumentation',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
deprecated=True, is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac8Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac8Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac8Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac8Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix, uint8_t prefixLength) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix'), param('uint8_t', 'prefixLength')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix, uint8_t prefixLength) [constructor]
cls.add_constructor([param('char const *', 'prefix'), param('uint8_t', 'prefixLength')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetMinimumPrefixLength() const [member function]
cls.add_method('GetMinimumPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
deprecated=True, is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::SetPrefixLength(uint8_t prefixLength) [member function]
cls.add_method('SetPrefixLength',
'void',
[param('uint8_t', 'prefixLength')])
return
def register_Ns3LogComponent_methods(root_module, cls):
## log.h (module 'core'): ns3::LogComponent::LogComponent(ns3::LogComponent const & arg0) [constructor]
cls.add_constructor([param('ns3::LogComponent const &', 'arg0')])
## log.h (module 'core'): ns3::LogComponent::LogComponent(std::string const & name, std::string const & file, ns3::LogLevel const mask=::ns3::LogLevel::LOG_NONE) [constructor]
cls.add_constructor([param('std::string const &', 'name'), param('std::string const &', 'file'), param('ns3::LogLevel const', 'mask', default_value='::ns3::LogLevel::LOG_NONE')])
## log.h (module 'core'): void ns3::LogComponent::Disable(ns3::LogLevel const level) [member function]
cls.add_method('Disable',
'void',
[param('ns3::LogLevel const', 'level')])
## log.h (module 'core'): void ns3::LogComponent::Enable(ns3::LogLevel const level) [member function]
cls.add_method('Enable',
'void',
[param('ns3::LogLevel const', 'level')])
## log.h (module 'core'): std::string ns3::LogComponent::File() const [member function]
cls.add_method('File',
'std::string',
[],
is_const=True)
## log.h (module 'core'): static ns3::LogComponent::ComponentList * ns3::LogComponent::GetComponentList() [member function]
cls.add_method('GetComponentList',
'ns3::LogComponent::ComponentList *',
[],
is_static=True)
## log.h (module 'core'): static std::string ns3::LogComponent::GetLevelLabel(ns3::LogLevel const level) [member function]
cls.add_method('GetLevelLabel',
'std::string',
[param('ns3::LogLevel const', 'level')],
is_static=True)
## log.h (module 'core'): bool ns3::LogComponent::IsEnabled(ns3::LogLevel const level) const [member function]
cls.add_method('IsEnabled',
'bool',
[param('ns3::LogLevel const', 'level')],
is_const=True)
## log.h (module 'core'): bool ns3::LogComponent::IsNoneEnabled() const [member function]
cls.add_method('IsNoneEnabled',
'bool',
[],
is_const=True)
## log.h (module 'core'): char const * ns3::LogComponent::Name() const [member function]
cls.add_method('Name',
'char const *',
[],
is_const=True)
## log.h (module 'core'): void ns3::LogComponent::SetMask(ns3::LogLevel const level) [member function]
cls.add_method('SetMask',
'void',
[param('ns3::LogLevel const', 'level')])
return
def register_Ns3Mac48Address_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_output_stream_operator()
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac48Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv4Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv6Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function]
cls.add_method('GetMulticast6Prefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function]
cls.add_method('GetMulticastPrefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function]
cls.add_method('IsGroup',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3Mac8Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
## mac8-address.h (module 'network'): ns3::Mac8Address::Mac8Address(ns3::Mac8Address const & arg0) [constructor]
cls.add_constructor([param('ns3::Mac8Address const &', 'arg0')])
## mac8-address.h (module 'network'): ns3::Mac8Address::Mac8Address() [constructor]
cls.add_constructor([])
## mac8-address.h (module 'network'): ns3::Mac8Address::Mac8Address(uint8_t addr) [constructor]
cls.add_constructor([param('uint8_t', 'addr')])
## mac8-address.h (module 'network'): static ns3::Mac8Address ns3::Mac8Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac8Address',
[],
is_static=True)
## mac8-address.h (module 'network'): static ns3::Mac8Address ns3::Mac8Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac8Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac8-address.h (module 'network'): void ns3::Mac8Address::CopyFrom(uint8_t const * pBuffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'pBuffer')])
## mac8-address.h (module 'network'): void ns3::Mac8Address::CopyTo(uint8_t * pBuffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'pBuffer')],
is_const=True)
## mac8-address.h (module 'network'): static ns3::Mac8Address ns3::Mac8Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac8Address',
[],
is_static=True)
## mac8-address.h (module 'network'): static bool ns3::Mac8Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3NetDeviceContainer_methods(root_module, cls):
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor]
cls.add_constructor([])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor]
cls.add_constructor([param('std::string', 'devName')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NetDeviceContainer', 'other')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'deviceName')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::Iterator ns3::NetDeviceContainer::Begin() const [member function]
cls.add_method('Begin',
'ns3::NetDeviceContainer::Iterator',
[],
is_const=True)
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::Iterator ns3::NetDeviceContainer::End() const [member function]
cls.add_method('End',
'ns3::NetDeviceContainer::Iterator',
[],
is_const=True)
## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_const=True)
## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
is_virtual=True, visibility='protected')
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactory::IsTypeIdSet() const [member function]
cls.add_method('IsTypeIdSet',
'bool',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set() [member function]
cls.add_method('Set',
'void',
[])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::type [variable]
cls.add_instance_attribute('type', 'ns3::PacketMetadata::Item::ItemType', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): uint32_t ns3::PacketTagList::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-tag-list.h (module 'network'): uint32_t ns3::PacketTagList::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function]
cls.add_method('Replace',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): uint32_t ns3::PacketTagList::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 1 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3ParameterLogger_methods(root_module, cls):
## log.h (module 'core'): ns3::ParameterLogger::ParameterLogger(ns3::ParameterLogger const & arg0) [constructor]
cls.add_constructor([param('ns3::ParameterLogger const &', 'arg0')])
## log.h (module 'core'): ns3::ParameterLogger::ParameterLogger(std::ostream & os) [constructor]
cls.add_constructor([param('std::ostream &', 'os')])
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t v) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t v) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('>=')
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right'))
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit=::ns3::Time::Unit::AUTO) const [member function]
cls.add_method('As',
'ns3::TimeWithUnit',
[param('ns3::Time::Unit const', 'unit', default_value='::ns3::Time::Unit::AUTO')],
is_const=True)
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function]
cls.add_method('GetDays',
'double',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function]
cls.add_method('GetHours',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function]
cls.add_method('GetMinutes',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function]
cls.add_method('GetYears',
'double',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): ns3::Time ns3::Time::RoundTo(ns3::Time::Unit unit) const [member function]
cls.add_method('RoundTo',
'ns3::Time',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function]
cls.add_method('StaticInit',
'bool',
[],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [constructor]
cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor]
cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<const ns3::AttributeAccessor> accessor, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<const ns3::AttributeAccessor> accessor, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<const ns3::TraceSourceAccessor> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(std::size_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(std::size_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::hash_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'ns3::TypeId::hash_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint16_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint16_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint16_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint16_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(std::size_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(ns3::TypeId::hash_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(ns3::TypeId::hash_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(std::size_t i, ns3::Ptr<const ns3::AttributeValue> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('std::size_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'uid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::int64x64_t'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('>=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right'))
cls.add_unary_numeric_operator('-')
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(double const value) [constructor]
cls.add_constructor([param('double const', 'value')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long double const value) [constructor]
cls.add_constructor([param('long double const', 'value')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(int const v) [constructor]
cls.add_constructor([param('int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long int const v) [constructor]
cls.add_constructor([param('long int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int const v) [constructor]
cls.add_constructor([param('long long int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int const v) [constructor]
cls.add_constructor([param('unsigned int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int const v) [constructor]
cls.add_constructor([param('long unsigned int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int const v) [constructor]
cls.add_constructor([param('long long unsigned int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t const hi, uint64_t const lo) [constructor]
cls.add_constructor([param('int64_t const', 'hi'), param('uint64_t const', 'lo')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-128.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-128.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-128.h (module 'core'): int64_t ns3::int64x64_t::GetInt() const [member function]
cls.add_method('GetInt',
'int64_t',
[],
is_const=True)
## int64x64-128.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-128.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t const v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t const', 'v')],
is_static=True)
## int64x64-128.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
## int64x64-128.h (module 'core'): int64_t ns3::int64x64_t::Round() const [member function]
cls.add_method('Round',
'int64_t',
[],
is_const=True)
## int64x64-128.h (module 'core'): ns3::int64x64_t::implementation [variable]
cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True)
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')],
is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object> ns3::Object::GetObject() const [member function]
cls.add_method('GetObject',
'ns3::Ptr< ns3::Object >',
[],
custom_template_method_name='GetObject', is_const=True, template_parameters=['ns3::Object'])
## object.h (module 'core'): ns3::Ptr<ns3::Object> ns3::Object::GetObject(ns3::TypeId tid) const [member function]
cls.add_method('GetObject',
'ns3::Ptr< ns3::Object >',
[param('ns3::TypeId', 'tid')],
custom_template_method_name='GetObject', is_const=True, template_parameters=['ns3::Object'])
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True, visibility='protected')
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
is_virtual=True, visibility='protected')
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
is_virtual=True, visibility='protected')
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<const ns3::Object> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')],
is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<const ns3::CallbackImplBase> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['ns3::ObjectBase*'], visibility='protected')
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3Channel_methods(root_module, cls):
## channel.h (module 'network'): ns3::Channel::Channel(ns3::Channel const & arg0) [constructor]
cls.add_constructor([param('ns3::Channel const &', 'arg0')])
## channel.h (module 'network'): ns3::Channel::Channel() [constructor]
cls.add_constructor([])
## channel.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Channel::GetDevice(std::size_t i) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('std::size_t', 'i')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## channel.h (module 'network'): uint32_t ns3::Channel::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## channel.h (module 'network'): std::size_t ns3::Channel::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'std::size_t',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## channel.h (module 'network'): static ns3::TypeId ns3::Channel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3EmptyAttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor(ns3::EmptyAttributeAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker(ns3::EmptyAttributeChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True, visibility='private')
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True, visibility='private')
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True, visibility='private')
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, is_virtual=True, visibility='protected')
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3Mac48AddressChecker_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')])
return
def register_Ns3Mac48AddressValue_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'value')])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac48Address',
[],
is_const=True)
## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac48Address const &', 'value')])
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::NetDevice::PromiscReceiveCallback cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::NetDevice::ReceiveCallback cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag, uint32_t start, uint32_t end) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag'), param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header, uint32_t size) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header'), param('uint32_t', 'size')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function]
cls.add_method('ReplacePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> nixVector) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'nixVector')])
## packet.h (module 'network'): std::string ns3::Packet::ToString() const [member function]
cls.add_method('ToString',
'std::string',
[],
is_const=True)
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_Ns3BridgeChannel_methods(root_module, cls):
## bridge-channel.h (module 'bridge'): static ns3::TypeId ns3::BridgeChannel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bridge-channel.h (module 'bridge'): ns3::BridgeChannel::BridgeChannel() [constructor]
cls.add_constructor([])
## bridge-channel.h (module 'bridge'): void ns3::BridgeChannel::AddChannel(ns3::Ptr<ns3::Channel> bridgedChannel) [member function]
cls.add_method('AddChannel',
'void',
[param('ns3::Ptr< ns3::Channel >', 'bridgedChannel')])
## bridge-channel.h (module 'bridge'): std::size_t ns3::BridgeChannel::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'std::size_t',
[],
is_const=True, is_virtual=True)
## bridge-channel.h (module 'bridge'): ns3::Ptr<ns3::NetDevice> ns3::BridgeChannel::GetDevice(std::size_t i) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('std::size_t', 'i')],
is_const=True, is_virtual=True)
return
def register_Ns3BridgeNetDevice_methods(root_module, cls):
## bridge-net-device.h (module 'bridge'): ns3::BridgeNetDevice::BridgeNetDevice() [constructor]
cls.add_constructor([])
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::AddBridgePort(ns3::Ptr<ns3::NetDevice> bridgePort) [member function]
cls.add_method('AddBridgePort',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'bridgePort')])
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::NetDevice> ns3::BridgeNetDevice::GetBridgePort(uint32_t n) const [member function]
cls.add_method('GetBridgePort',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'n')],
is_const=True)
## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::Channel> ns3::BridgeNetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): uint32_t ns3::BridgeNetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): uint16_t ns3::BridgeNetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): uint32_t ns3::BridgeNetDevice::GetNBridgePorts() const [member function]
cls.add_method('GetNBridgePorts',
'uint32_t',
[],
is_const=True)
## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::Node> ns3::BridgeNetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): static ns3::TypeId ns3::BridgeNetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetPromiscReceiveCallback(ns3::NetDevice::PromiscReceiveCallback cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetReceiveCallback(ns3::NetDevice::ReceiveCallback cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True, visibility='protected')
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::ForwardBroadcast(ns3::Ptr<ns3::NetDevice> incomingPort, ns3::Ptr<const ns3::Packet> packet, uint16_t protocol, ns3::Mac48Address src, ns3::Mac48Address dst) [member function]
cls.add_method('ForwardBroadcast',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'incomingPort'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Mac48Address', 'src'), param('ns3::Mac48Address', 'dst')],
visibility='protected')
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::ForwardUnicast(ns3::Ptr<ns3::NetDevice> incomingPort, ns3::Ptr<const ns3::Packet> packet, uint16_t protocol, ns3::Mac48Address src, ns3::Mac48Address dst) [member function]
cls.add_method('ForwardUnicast',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'incomingPort'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Mac48Address', 'src'), param('ns3::Mac48Address', 'dst')],
visibility='protected')
## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::NetDevice> ns3::BridgeNetDevice::GetLearnedState(ns3::Mac48Address source) [member function]
cls.add_method('GetLearnedState',
'ns3::Ptr< ns3::NetDevice >',
[param('ns3::Mac48Address', 'source')],
visibility='protected')
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::Learn(ns3::Mac48Address source, ns3::Ptr<ns3::NetDevice> port) [member function]
cls.add_method('Learn',
'void',
[param('ns3::Mac48Address', 'source'), param('ns3::Ptr< ns3::NetDevice >', 'port')],
visibility='protected')
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::ReceiveFromDevice(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<const ns3::Packet> packet, uint16_t protocol, ns3::Address const & source, ns3::Address const & destination, ns3::NetDevice::PacketType packetType) [member function]
cls.add_method('ReceiveFromDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'destination'), param('ns3::NetDevice::PacketType', 'packetType')],
visibility='protected')
return
def register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): ns3::ObjectBase * ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()() [member operator]
cls.add_method('operator()',
'ns3::ObjectBase *',
[],
custom_name='__call__', is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.add_cpp_namespace('FatalImpl'), root_module)
register_functions_ns3_Hash(module.add_cpp_namespace('Hash'), root_module)
register_functions_ns3_TracedValueCallback(module.add_cpp_namespace('TracedValueCallback'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.add_cpp_namespace('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def register_functions_ns3_TracedValueCallback(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| gpl-2.0 | -2,653,983,618,763,929,000 | 63.234562 | 458 | 0.612375 | false |
viniciusgama/blog_gae | django/contrib/gis/gdal/prototypes/srs.py | 321 | 3378 | from ctypes import c_char_p, c_int, c_void_p, POINTER
from django.contrib.gis.gdal.libgdal import lgdal, std_call
from django.contrib.gis.gdal.prototypes.generation import \
const_string_output, double_output, int_output, \
srs_output, string_output, void_output
## Shortcut generation for routines with known parameters.
def srs_double(f):
"""
Creates a function prototype for the OSR routines that take
the OSRSpatialReference object and
"""
return double_output(f, [c_void_p, POINTER(c_int)], errcheck=True)
def units_func(f):
"""
Creates a ctypes function prototype for OSR units functions, e.g.,
OSRGetAngularUnits, OSRGetLinearUnits.
"""
return double_output(f, [c_void_p, POINTER(c_char_p)], strarg=True)
# Creation & destruction.
clone_srs = srs_output(std_call('OSRClone'), [c_void_p])
new_srs = srs_output(std_call('OSRNewSpatialReference'), [c_char_p])
release_srs = void_output(lgdal.OSRRelease, [c_void_p], errcheck=False)
destroy_srs = void_output(std_call('OSRDestroySpatialReference'), [c_void_p], errcheck=False)
srs_validate = void_output(lgdal.OSRValidate, [c_void_p])
# Getting the semi_major, semi_minor, and flattening functions.
semi_major = srs_double(lgdal.OSRGetSemiMajor)
semi_minor = srs_double(lgdal.OSRGetSemiMinor)
invflattening = srs_double(lgdal.OSRGetInvFlattening)
# WKT, PROJ, EPSG, XML importation routines.
from_wkt = void_output(lgdal.OSRImportFromWkt, [c_void_p, POINTER(c_char_p)])
from_proj = void_output(lgdal.OSRImportFromProj4, [c_void_p, c_char_p])
from_epsg = void_output(std_call('OSRImportFromEPSG'), [c_void_p, c_int])
from_xml = void_output(lgdal.OSRImportFromXML, [c_void_p, c_char_p])
from_user_input = void_output(std_call('OSRSetFromUserInput'), [c_void_p, c_char_p])
# Morphing to/from ESRI WKT.
morph_to_esri = void_output(lgdal.OSRMorphToESRI, [c_void_p])
morph_from_esri = void_output(lgdal.OSRMorphFromESRI, [c_void_p])
# Identifying the EPSG
identify_epsg = void_output(lgdal.OSRAutoIdentifyEPSG, [c_void_p])
# Getting the angular_units, linear_units functions
linear_units = units_func(lgdal.OSRGetLinearUnits)
angular_units = units_func(lgdal.OSRGetAngularUnits)
# For exporting to WKT, PROJ.4, "Pretty" WKT, and XML.
to_wkt = string_output(std_call('OSRExportToWkt'), [c_void_p, POINTER(c_char_p)])
to_proj = string_output(std_call('OSRExportToProj4'), [c_void_p, POINTER(c_char_p)])
to_pretty_wkt = string_output(std_call('OSRExportToPrettyWkt'), [c_void_p, POINTER(c_char_p), c_int], offset=-2)
# Memory leak fixed in GDAL 1.5; still exists in 1.4.
to_xml = string_output(lgdal.OSRExportToXML, [c_void_p, POINTER(c_char_p), c_char_p], offset=-2)
# String attribute retrival routines.
get_attr_value = const_string_output(std_call('OSRGetAttrValue'), [c_void_p, c_char_p, c_int])
get_auth_name = const_string_output(lgdal.OSRGetAuthorityName, [c_void_p, c_char_p])
get_auth_code = const_string_output(lgdal.OSRGetAuthorityCode, [c_void_p, c_char_p])
# SRS Properties
isgeographic = int_output(lgdal.OSRIsGeographic, [c_void_p])
islocal = int_output(lgdal.OSRIsLocal, [c_void_p])
isprojected = int_output(lgdal.OSRIsProjected, [c_void_p])
# Coordinate transformation
new_ct= srs_output(std_call('OCTNewCoordinateTransformation'), [c_void_p, c_void_p])
destroy_ct = void_output(std_call('OCTDestroyCoordinateTransformation'), [c_void_p], errcheck=False)
| bsd-3-clause | 5,370,249,436,834,026,000 | 45.916667 | 112 | 0.736827 | false |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/social/backends/nk.py | 70 | 2723 | from urllib import urlencode
import six
from requests_oauthlib import OAuth1
from social.backends.oauth import BaseOAuth2
class NKOAuth2(BaseOAuth2):
"""NK OAuth authentication backend"""
name = 'nk'
AUTHORIZATION_URL = 'https://nk.pl/oauth2/login'
ACCESS_TOKEN_URL = 'https://nk.pl/oauth2/token'
SCOPE_SEPARATOR = ','
ACCESS_TOKEN_METHOD = 'POST'
SIGNATURE_TYPE_AUTH_HEADER = 'AUTH_HEADER'
EXTRA_DATA = [
('id', 'id'),
]
def get_user_details(self, response):
"""Return user details from NK account"""
entry = response['entry']
return {
'username': entry.get('displayName'),
'email': entry['emails'][0]['value'],
'first_name': entry.get('displayName').split(' ')[0],
'id': entry.get('id')
}
def auth_complete_params(self, state=None):
client_id, client_secret = self.get_key_and_secret()
return {
'grant_type': 'authorization_code', # request auth code
'code': self.data.get('code', ''), # server response code
'client_id': client_id,
'client_secret': client_secret,
'redirect_uri': self.get_redirect_uri(state),
'scope': self.get_scope_argument()
}
def get_user_id(self, details, response):
"""Return a unique ID for the current user, by default from server
response."""
return details.get(self.ID_KEY)
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
url = 'http://opensocial.nk-net.pl/v09/social/rest/people/@me?' + urlencode({
'nk_token': access_token,
'fields': 'name,surname,avatar,localization,age,gender,emails,birthdate'
})
return self.get_json(
url,
auth=self.oauth_auth(access_token)
)
def oauth_auth(self, token=None, oauth_verifier=None,
signature_type=SIGNATURE_TYPE_AUTH_HEADER):
key, secret = self.get_key_and_secret()
oauth_verifier = oauth_verifier or self.data.get('oauth_verifier')
token = token or {}
# decoding='utf-8' produces errors with python-requests on Python3
# since the final URL will be of type bytes
decoding = None if six.PY3 else 'utf-8'
state = self.get_or_create_state()
return OAuth1(key, secret,
resource_owner_key=None,
resource_owner_secret=None,
callback_uri=self.get_redirect_uri(state),
verifier=oauth_verifier,
signature_type=signature_type,
decoding=decoding)
| agpl-3.0 | 4,359,600,533,709,399,600 | 35.797297 | 85 | 0.576937 | false |
Senseg/Py4A | python-modules/twisted/twisted/python/urlpath.py | 81 | 3431 | # -*- test-case-name: twisted.test.test_paths -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
import urlparse
import urllib
class URLPath:
def __init__(self, scheme='', netloc='localhost', path='',
query='', fragment=''):
self.scheme = scheme or 'http'
self.netloc = netloc
self.path = path or '/'
self.query = query
self.fragment = fragment
_qpathlist = None
_uqpathlist = None
def pathList(self, unquote=0, copy=1):
if self._qpathlist is None:
self._qpathlist = self.path.split('/')
self._uqpathlist = map(urllib.unquote, self._qpathlist)
if unquote:
result = self._uqpathlist
else:
result = self._qpathlist
if copy:
return result[:]
else:
return result
def fromString(klass, st):
t = urlparse.urlsplit(st)
u = klass(*t)
return u
fromString = classmethod(fromString)
def fromRequest(klass, request):
return klass.fromString(request.prePathURL())
fromRequest = classmethod(fromRequest)
def _pathMod(self, newpathsegs, keepQuery):
if keepQuery:
query = self.query
else:
query = ''
return URLPath(self.scheme,
self.netloc,
'/'.join(newpathsegs),
query)
def sibling(self, path, keepQuery=0):
l = self.pathList()
l[-1] = path
return self._pathMod(l, keepQuery)
def child(self, path, keepQuery=0):
l = self.pathList()
if l[-1] == '':
l[-1] = path
else:
l.append(path)
return self._pathMod(l, keepQuery)
def parent(self, keepQuery=0):
l = self.pathList()
if l[-1] == '':
del l[-2]
else:
# We are a file, such as http://example.com/foo/bar
# our parent directory is http://example.com/
l.pop()
l[-1] = ''
return self._pathMod(l, keepQuery)
def here(self, keepQuery=0):
l = self.pathList()
if l[-1] != '':
l[-1] = ''
return self._pathMod(l, keepQuery)
def click(self, st):
"""Return a path which is the URL where a browser would presumably take
you if you clicked on a link with an HREF as given.
"""
scheme, netloc, path, query, fragment = urlparse.urlsplit(st)
if not scheme:
scheme = self.scheme
if not netloc:
netloc = self.netloc
if not path:
path = self.path
if not query:
query = self.query
elif path[0] != '/':
l = self.pathList()
l[-1] = path
path = '/'.join(l)
return URLPath(scheme,
netloc,
path,
query,
fragment)
def __str__(self):
x = urlparse.urlunsplit((
self.scheme, self.netloc, self.path,
self.query, self.fragment))
return x
def __repr__(self):
return ('URLPath(scheme=%r, netloc=%r, path=%r, query=%r, fragment=%r)'
% (self.scheme, self.netloc, self.path, self.query, self.fragment))
| apache-2.0 | 7,563,191,472,397,032,000 | 27.122951 | 83 | 0.498688 | false |
48thct2jtnf/P | contrib/linearize/linearize-hashes.py | 18 | 3037 | #!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 51473
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| mit | 216,618,072,909,147,650 | 25.876106 | 90 | 0.663813 | false |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/linear_model/plot_ridge_path.py | 254 | 1655 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause | 6,326,107,182,332,831,000 | 27.050847 | 79 | 0.59577 | false |
wwfifi/uliweb | uliweb/orm/__init__.py | 1 | 148065 | # This module is used for wrapping SqlAlchemy to a simple ORM
# Author: limodou <[email protected]>
__all__ = ['Field', 'get_connection', 'Model', 'do_',
'set_debug_query', 'set_auto_create', 'set_auto_set_model',
'get_model', 'set_model', 'engine_manager',
'set_auto_transaction_in_web', 'set_auto_transaction_in_notweb',
'set_tablename_converter', 'set_check_max_length', 'set_post_do',
'rawsql', 'Lazy', 'set_echo', 'Session', 'get_session', 'set_session',
'CHAR', 'BLOB', 'TEXT', 'DECIMAL', 'Index', 'datetime', 'decimal',
'Begin', 'Commit', 'Rollback', 'Reset', 'ResetAll', 'CommitAll', 'RollbackAll',
'PICKLE', 'BIGINT', 'set_pk_type', 'PKTYPE', 'FILE', 'INT', 'SMALLINT', 'DATE',
'TIME', 'DATETIME', 'FLOAT', 'BOOLEAN', 'UUID', 'BINARY', 'VARBINARY',
'JSON', 'UUID_B',
'BlobProperty', 'BooleanProperty', 'DateProperty', 'DateTimeProperty',
'TimeProperty', 'DecimalProperty', 'FloatProperty', 'SQLStorage',
'IntegerProperty', 'Property', 'StringProperty', 'CharProperty',
'TextProperty', 'UnicodeProperty', 'Reference', 'ReferenceProperty',
'PickleProperty', 'BigIntegerProperty', 'FileProperty', 'JsonProperty',
'UUIDBinaryProperty', 'UUIDProperty',
'SelfReference', 'SelfReferenceProperty', 'OneToOne', 'ManyToMany',
'ReservedWordError', 'BadValueError', 'DuplicatePropertyError',
'ModelInstanceError', 'KindError', 'ConfigurationError', 'SaveError',
'BadPropertyTypeError', 'set_lazy_model_init',
'begin_sql_monitor', 'close_sql_monitor', 'set_model_config', 'text',
'get_object', 'get_cached_object',
'set_server_default', 'set_nullable', 'set_manytomany_index_reverse',
'NotFound',
'get_field_type', 'create_model', 'get_metadata', 'migrate_tables',
'print_model',
]
__auto_create__ = False
__auto_set_model__ = True
__auto_transaction_in_web__ = False
__auto_transaction_in_notweb__ = False
__debug_query__ = None
__default_engine__ = 'default'
__default_encoding__ = 'utf-8'
__zero_float__ = 0.0000005
__models__ = {}
__model_paths__ = {}
__pk_type__ = 'int'
__default_tablename_converter__ = None
__check_max_length__ = False #used to check max_length parameter
__default_post_do__ = None #used to process post_do topic
__nullable__ = False #not enabled null by default
__server_default__ = False #not enabled null by default
__manytomany_index_reverse__ = False
__lazy_model_init__ = False
import sys
import decimal
import threading
import datetime
import copy
import re
import cPickle as pickle
from uliweb.utils import date as _date
from uliweb.utils.common import (flat_list, classonlymethod, simple_value,
safe_str, import_attr)
from sqlalchemy import *
from sqlalchemy.sql import select, ColumnElement, text, true
from sqlalchemy.pool import NullPool
import sqlalchemy.engine.base as EngineBase
from uliweb.core import dispatch
import threading
import warnings
import inspect
from uliweb.utils.sorteddict import SortedDict
from . import patch
Local = threading.local()
Local.dispatch_send = True
Local.conn = {}
Local.trans = {}
Local.echo = False
Local.echo_func = sys.stdout.write
class Error(Exception):pass
class NotFound(Error):
def __init__(self, message, model, id):
self.message = message
self.model = model
self.id = id
def __str__(self):
return "%s(%s) instance can't be found" % (self.model.__name__, str(self.id))
class ModelNotFound(Error):pass
class ReservedWordError(Error):pass
class ModelInstanceError(Error):pass
class DuplicatePropertyError(Error):
"""Raised when a property is duplicated in a model definition."""
class BadValueError(Error):pass
class BadPropertyTypeError(Error):pass
class KindError(Error):pass
class ConfigurationError(Error):pass
class SaveError(Error):pass
_SELF_REFERENCE = object()
class Lazy(object): pass
class SQLStorage(dict):
"""
a dictionary that let you do d['a'] as well as d.a
"""
def __getattr__(self, key): return self[key]
def __setattr__(self, key, value):
if self.has_key(key):
raise SyntaxError('Object exists and cannot be redefined')
self[key] = value
def __repr__(self): return '<SQLStorage ' + dict.__repr__(self) + '>'
def set_auto_create(flag):
global __auto_create__
__auto_create__ = flag
def set_auto_transaction_in_notweb(flag):
global __auto_transaction_in_notweb__
__auto_transaction_in_notweb__ = flag
def set_auto_transaction_in_web(flag):
global __auto_transaction_in_web__
__auto_transaction_in_web__ = flag
def set_auto_set_model(flag):
global __auto_set_model__
__auto_set_model__ = flag
def set_debug_query(flag):
global __debug_query__
__debug_query__ = flag
def set_check_max_length(flag):
global __check_max_length__
__check_max_length__ = flag
def set_post_do(func):
global __default_post_do__
__default_post_do__ = func
def set_nullable(flag):
global __nullable__
__nullable__ = flag
def set_server_default(flag):
global __server_default__
__server_default__ = flag
def set_manytomany_index_reverse(flag):
global __manytomany_index_reverse__
__manytomany_index_reverse__ = flag
def set_encoding(encoding):
global __default_encoding__
__default_encoding__ = encoding
def set_dispatch_send(flag):
global Local
Local.dispatch_send = flag
def set_tablename_converter(converter=None):
global __default_tablename_converter__
__default_tablename_converter__ = converter
def set_lazy_model_init(flag):
global __lazy_model_init__
__lazy_model_init__ = flag
def get_tablename(tablename):
global __default_tablename_converter__
c = __default_tablename_converter__
if not c:
c = lambda x:x.lower()
return c(tablename)
def get_dispatch_send(default=True):
global Local
if not hasattr(Local, 'dispatch_send'):
Local.dispatch_send = default
return Local.dispatch_send
def set_echo(flag, time=None, explain=False, caller=True, session=None):
global Local
Local.echo = flag
Local.echo_args = {'time':time, 'explain':explain, 'caller':caller,
'session':None}
def set_pk_type(name):
global __pk_type__
__pk_type__ = name
def PKTYPE():
if __pk_type__ == 'int':
return int
else:
return BIGINT
def PKCLASS():
if __pk_type__ == 'int':
return Integer
else:
return BigInteger
class NamedEngine(object):
def __init__(self, name, options):
self.name = name
d = SQLStorage({
'engine_name':name,
'connection_args':{},
'debug_log':None,
'connection_type':'long',
'duplication':False,
})
strategy = options.pop('strategy', None)
d.update(options)
if d.get('debug_log', None) is None:
d['debug_log'] = __debug_query__
if d.get('connection_type') == 'short':
d['connection_args']['poolclass'] = NullPool
if strategy:
d['connection_args']['strategy'] = strategy
self.options = d
self.engine_instance = None
self.metadata = MetaData()
self._models = {}
self.local = threading.local() #used to save thread vars
self._create()
def _get_models(self):
if self.options.duplication:
return engine_manager[self.options.duplication].models
else:
return self._models
models = property(fget=_get_models)
def _create(self, new=False):
c = self.options
db = self.engine_instance
if not self.engine_instance or new:
args = c.get('connection_args', {})
self.engine_instance = create_engine(c.get('connection_string'), **args)
self.engine_instance.echo = c['debug_log']
self.engine_instance.metadata = self.metadata
self.metadata.bind = self.engine_instance
return self.engine_instance
def session(self, create=True):
"""
Used to created default session
"""
if hasattr(self.local, 'session'):
return self.local.session
else:
if create:
s = Session(self.name)
self.local.session = s
return s
def set_session(self, session):
self.local.session = session
@property
def engine(self):
return self.engine_instance
def print_pool_status(self):
if self.engine.pool:
print self.engine.pool.status()
class EngineManager(object):
def __init__(self):
self.engines = {}
def add(self, name, connection_args):
self.engines[name] = engine = NamedEngine(name, connection_args)
return engine
def get(self, name=None):
name = name or __default_engine__
engine = self.engines.get(name)
if not engine:
raise Error('Engine %s is not exists yet' % name)
return engine
def __getitem__(self, name=None):
return self.get(name)
def __setitem__(self, name, connection_args):
return self.add(name, connection_args)
def __contains__(self, name):
return name in self.engines
def items(self):
return self.engines.items()
engine_manager = EngineManager()
class Session(object):
"""
used to manage relationship between engine_name and connect
can also manage transcation
"""
def __init__(self, engine_name=None, auto_transaction=None,
auto_close=True, post_commit=None, post_commit_once=None):
"""
If auto_transaction is True, it'll automatically start transacation
in web environment, it'll be commit or rollback after the request finished
and in no-web environment, you should invoke commit or rollback yourself.
"""
self.engine_name = engine_name or __default_engine__
self.auto_transaction = auto_transaction
self.auto_close = auto_close
self.engine = engine_manager[engine_name]
self._conn = None
self._trans = None
self.local_cache = {}
self.post_commit = post_commit or []
self.post_commit_once = post_commit_once or []
def __str__(self):
return '<Session engine_name:%s, auto_transaction=%r, auto_close=%r>' % (
self.engine_name, self.auto_transaction, self.auto_close)
@property
def need_transaction(self):
from uliweb import is_in_web
global __auto_transaction_in_notweb__, __auto_transaction_in_web__
if self.auto_transaction is not None:
return self.auto_transaction
else:
#distinguish in web or not web environment
if is_in_web():
return __auto_transaction_in_web__
else:
return __auto_transaction_in_notweb__
@property
def connection(self):
if self._conn:
return self._conn
else:
self._conn = self.engine.engine.connect()
return self._conn
def execute(self, query, *args):
t = self.need_transaction
try:
if t:
self.begin()
return self.connection.execute(query, *args)
except:
if t:
self.rollback()
raise
def set_echo(self, flag, time=None, explain=False, caller=True):
global set_echo
set_echo(flag, time, explain, caller, self)
def do_(self, query, args=None):
global do_
return do_(query, self, args)
def begin(self):
if not self._trans:
self.connection
self._trans = self._conn.begin()
return self._trans
def commit(self):
if self._trans and self._conn.in_transaction():
self._trans.commit()
self._trans = None
if self.auto_close:
self._close()
#add post commit hook
if self.post_commit:
if not isinstance(self.post_commit, (list, tuple)):
self.post_commit = [self.post_commit]
for c in self.post_commit:
c()
#add post commit once hook
if self.post_commit_once:
if not isinstance(self.post_commit_once, (list, tuple)):
post_commit_once = [self.post_commit_once]
else:
post_commit_once = self.post_commit_once
self.post_commit_once = []
for c in post_commit_once:
c()
def in_transaction(self):
if not self._conn:
return False
return self._conn.in_transaction()
def rollback(self):
if self._trans and self._conn.in_transaction():
self._trans.rollback()
self._trans = None
if self.auto_close:
self._close()
def _close(self):
if self._conn:
self._conn.close()
self._conn = None
self.local_cache = {}
if self.engine.options.connection_type == 'short':
self.engine.engine.dispose()
def close(self):
self.rollback()
self._close()
def get_local_cache(self, key, creator=None):
value = self.local_cache.get(key)
if value:
return value
if callable(creator):
value = creator()
else:
value = creator
if value:
self.local_cache[key] = value
return value
def get_connection(connection='', engine_name=None, connection_type='long', **args):
"""
Creating an NamedEngine or just return existed engine instance
if '://' include in connection parameter, it'll create new engine object
otherwise return existed engine isntance
"""
engine_name = engine_name or __default_engine__
if '://' in connection:
d = {
'connection_string':connection,
'connection_args':args,
'connection_type':connection_type,
}
return engine_manager.add(engine_name, d).engine
else:
connection = connection or __default_engine__
if connection in engine_manager:
return engine_manager[connection].engine
else:
raise Error("Can't find engine %s" % connection)
def get_metadata(engine_name=None):
"""
get metadata according used for alembic
It'll import all tables
"""
dispatch.get(None, 'load_models')
engine = engine_manager[engine_name]
for tablename, m in engine.models.items():
get_model(tablename, engine_name, signal=False)
if hasattr(m, '__dynamic__') and getattr(m, '__dynamic__'):
m.table.__mapping_only__ = True
return engine.metadata
def get_session(ec=None, create=True):
"""
ec - engine_name or connection
"""
ec = ec or __default_engine__
if isinstance(ec, (str, unicode)):
session = engine_manager[ec].session(create=True)
elif isinstance(ec, Session):
session = ec
else:
raise Error("Connection %r should be existed engine name or Session object" % ec)
return session
def set_session(session=None, engine_name='default'):
if not session:
session = Session()
engine_manager[engine_name].set_session(session)
return session
def Reset(ec=None):
session = get_session(ec, False)
if session:
session.close()
def ResetAll():
for k, v in engine_manager.items():
session = v.session(create=False)
if session:
session.close()
@dispatch.bind('post_do', kind=dispatch.LOW)
def default_post_do(sender, query, conn, usetime):
if __default_post_do__:
__default_post_do__(sender, query, conn, usetime)
re_placeholder = re.compile(r'%\(\w+\)s')
def rawsql(query, ec=None):
if isinstance(query, Result):
query = query.get_query()
ec = ec or __default_engine__
engine = engine_manager[ec]
dialect = engine.engine.dialect
if isinstance(query, (str, unicode)):
return query
#return str(query.compile(compile_kwargs={"literal_binds": True})).replace('\n', '') + ';'
comp = query.compile(dialect=dialect)
b = re_placeholder.search(comp.string)
if b:
return comp.string % comp.params
else:
if dialect.name == 'postgresql':
return comp.string
else:
params = []
for k in comp.positiontup:
v = comp.params[k]
params.append(repr(simple_value(v)))
line = comp.string.replace('?', '%s') % tuple(params)
return line.replace('\n', '')+';'
def get_engine_name(ec=None):
"""
Get the name of a engine or session
"""
ec = ec or __default_engine__
if isinstance(ec, (str, unicode)):
return ec
elif isinstance(ec, Session):
return ec.engine_name
else:
raise Error("Parameter ec should be an engine_name or Session object, but %r found" % ec)
def print_model(model, engine_name=None, skipblank=False):
from sqlalchemy.schema import CreateTable, CreateIndex
engine = engine_manager[engine_name].engine
M = get_model(model)
t = M.table
s = []
s.append("%s;" % str(CreateTable(t).compile(dialect=engine.dialect)).rstrip())
for x in t.indexes:
s.append("%s;" % CreateIndex(x))
sql = '\n'.join(s)
if skipblank:
return re.sub('[\t\n]+', '', sql)
else:
return sql
def do_(query, ec=None, args=None):
"""
Execute a query
"""
from time import time
from uliweb.utils.common import get_caller
conn = get_session(ec)
b = time()
result = conn.execute(query, *(args or ()))
t = time() - b
dispatch.call(ec, 'post_do', query, conn, t)
flag = False
sql = ''
if hasattr(Local, 'echo') and Local.echo:
if hasattr(Local, 'echo_args'):
_ec = Local.echo_args.get('session')
else:
_ec = None
engine_name = get_engine_name(ec)
_e = get_engine_name(_ec)
if not _ec or _ec and _ec == _e:
if hasattr(Local, 'echo_args') and Local.echo_args['time']:
if t >= Local.echo_args['time']:
sql = rawsql(query)
flag = True
else:
sql = rawsql(query)
flag = True
if flag:
print '\n===>>>>> [%s]' % engine_name,
if hasattr(Local, 'echo_args') and Local.echo_args['caller']:
v = get_caller(skip=__file__)
print '(%s:%d:%s)' % v
else:
print
print sql
if hasattr(Local, 'echo_args') and Local.echo_args['explain'] and sql:
r = conn.execute('explain '+sql).fetchone()
print '\n----\nExplain: %s' % ''.join(["%s=%r, " % (k, v) for k, v in r.items()])
print '===<<<<< time used %fs\n' % t
return result
def save_file(result, filename, encoding='utf8', headers=None, convertors=None, visitor=None):
"""
save query result to a csv file
visitor can used to convert values, all value should be convert to string
visitor function should be defined as:
def visitor(keys, values, encoding):
#return new values []
convertors is used to convert single column value, for example:
convertors = {'field1':convert_func1, 'fields2':convert_func2}
def convert_func1(value, data):
value is value of field1
data is the record
if visitor and convertors all provided, only visitor is available.
headers used to convert column to a provided value
"""
import csv
from uliweb.utils.common import simple_value
convertors = convertors or {}
headers = headers or {}
def convert(k, v, data):
f = convertors.get(k)
if f:
v = f(v, data)
return v
def convert_header(k):
return headers.get(k, k)
def _r(x):
if isinstance(x, (str, unicode)):
return re.sub('\r\n|\r|\n', ' ', x)
else:
return x
with open(filename, 'wb') as f:
w = csv.writer(f)
w.writerow([simple_value(convert_header(x), encoding=encoding) for x in result.keys()])
for row in result:
if visitor and callable(visitor):
_row = visitor(result.keys, row.values(), encoding)
else:
_row = [convert(k, v, row) for k, v in zip(result.keys(), row.values())]
r = [simple_value(_r(x), encoding=encoding) for x in _row]
w.writerow(r)
def Begin(ec=None):
session = get_session(ec)
return session.begin()
def Commit(ec=None, close=None):
if close:
warnings.simplefilter('default')
warnings.warn("close parameter will not need at all.", DeprecationWarning)
session = get_session(ec, False)
if session:
return session.commit()
def CommitAll(close=None):
"""
Commit all transactions according Local.conn
"""
if close:
warnings.simplefilter('default')
warnings.warn("close parameter will not need at all.", DeprecationWarning)
for k, v in engine_manager.items():
session = v.session(create=False)
if session:
session.commit()
def Rollback(ec=None, close=None):
if close:
warnings.simplefilter('default')
warnings.warn("close parameter will not need at all.", DeprecationWarning)
session = get_session(ec, False)
if session:
return session.rollback()
def RollbackAll(close=None):
"""
Rollback all transactions, according Local.conn
"""
if close:
warnings.simplefilter('default')
warnings.warn("close parameter will not need at all.", DeprecationWarning)
for k, v in engine_manager.items():
session = v.session(create=False)
if session:
session.rollback()
def check_reserved_word(f):
if f in ['put', 'save', 'table', 'tablename', 'c', 'columns', 'manytomany'] or f in dir(Model):
raise ReservedWordError(
"Cannot define property using reserved word '%s'. " % f
)
def set_model(model, tablename=None, created=None, appname=None, model_path=None):
"""
Register an model and tablename to a global variable.
model could be a string format, i.e., 'uliweb.contrib.auth.models.User'
:param appname: if no appname, then archive according to model
item structure
created
model
model_path
appname
For dynamic model you should pass model_path with '' value
"""
if isinstance(model, type) and issubclass(model, Model):
#use alias first
tablename = model._alias or model.tablename
tablename = tablename.lower()
#set global __models__
d = __models__.setdefault(tablename, {})
engines = d.get('config', {}).pop('engines', ['default'])
if isinstance(engines, (str, unicode)):
engines = [engines]
d['engines'] = engines
item = {}
if created is not None:
item['created'] = created
else:
item['created'] = None
if isinstance(model, (str, unicode)):
if model_path is None:
model_path = model
else:
model_path = model_path
if not appname:
appname = model.rsplit('.', 2)[0]
#for example 'uliweb.contrib.auth.models.User'
model = None
else:
appname = model.__module__.rsplit('.', 1)[0]
if model_path is None:
model_path = model.__module__ + '.' + model.__name__
else:
model_path = ''
#for example 'uliweb.contrib.auth.models'
model.__engines__ = engines
item['model'] = model
item['model_path'] = model_path
item['appname'] = appname
d['model_path'] = model_path
d['appname'] = appname
for name in engines:
if not isinstance(name, (str, unicode)):
raise BadValueError('Engine name should be string type, but %r found' % name)
engine_manager[name].models[tablename] = item.copy()
def set_model_config(model_name, config, replace=False):
"""
This function should be only used in initialization phrase
:param model_name: model name it's should be string
:param config: config should be dict. e.g.
{'__mapping_only__', '__tablename__', '__ext_model__'}
:param replace: if True, then replace original config, False will update
"""
assert isinstance(model_name, str)
assert isinstance(config, dict)
d = __models__.setdefault(model_name, {})
if replace:
d['config'] = config
else:
c = d.setdefault('config', {})
c.update(config)
def create_model(modelname, fields, indexes=None, basemodel=None, **props):
"""
Create model dynamically
:param fields: Just format like [
{'name':name, 'type':type, ...},
...
]
type should be a string, eg. 'str', 'int', etc
kwargs will be passed to Property.__init__() according field type,
it'll be a dict
:param props: Model attributes, such as '__mapping_only__', '__replace__'
:param indexes: Multiple fields index, single index can be set directly using `index=True`
to a field, the value format should be:
[
{'name':name, 'fields':[...], ...},
]
e.g. [
{'name':'audit_idx', 'fields':['table_id', 'obj_id']}
]
for kwargs can be ommited.
:param basemodel: Will be the new Model base class, so new Model can inherited
parent methods, it can be a string or a real class object
"""
assert not props or isinstance(props, dict)
assert not indexes or isinstance(indexes, list)
props = SortedDict(props or {})
props['__dynamic__'] = True
props['__config__'] = False
for p in fields:
kwargs = p.copy()
name = kwargs.pop('name')
_type = kwargs.pop('type')
#if the key is start with '_', then remove it
for k in kwargs.keys():
if k.startswith('_'):
kwargs.pop(k, None)
field_type = get_field_type(_type)
prop = field_type(**kwargs)
props[name] = prop
if basemodel:
model = import_attr(basemodel)
# model.clear_relation()
else:
model = Model
# try:
# old = get_model(modelname, signal=False)
# old.clear_relation()
# except ModelNotFound as e:
# pass
cls = type(str(modelname.title()), (model,), props)
tablename = props.get('__tablename__', modelname)
set_model(cls, tablename, appname=__name__, model_path='')
get_model(modelname, signal=False, reload=True)
indexes = indexes or []
for x in indexes:
kwargs = x.copy()
name = kwargs.pop('name')
fields = kwargs.pop('fields')
#if the key is start with '_', then remove it
for k in kwargs.keys():
if k.startswith('_'):
kwargs.pop(k, None)
if not isinstance(fields, (list, tuple)):
raise ValueError("Index value format is not right, the value is %r" % indexes)
props = []
for y in fields:
props.append(cls.c[y])
Index(name, *props, **kwargs)
return cls
def valid_model(model, engine_name=None):
if isinstance(model, type) and issubclass(model, Model):
return True
if engine_name:
engine = engine_manager[engine_name]
return model in engine.models
else:
return True
def check_model_class(model_cls):
# """
# :param model: Model instance
# Model.__engines__ could be a list, so if there are multiple then use
# the first one
# """
#check dynamic flag
if getattr(model_cls, "__dynamic__", False):
return True
#check the model_path
model_path = model_cls.__module__ + '.' + model_cls.__name__
_path = __models__.get(model_cls.tablename, {}).get('model_path', '')
if _path and model_path != _path:
return False
return True
def find_metadata(model):
"""
:param model: Model instance
"""
engine_name = model.get_engine_name()
engine = engine_manager[engine_name]
return engine.metadata
def get_model(model, engine_name=None, signal=True, reload=False):
"""
Return a real model object, so if the model is already a Model class, then
return it directly. If not then import it.
if engine_name is None, then if there is multi engines defined, it'll use
'default', but if there is only one engine defined, it'll use this one
:param dispatch: Used to switch dispatch signal
"""
if isinstance(model, type) and issubclass(model, Model):
return model
if not isinstance(model, (str, unicode)):
raise Error("Model %r should be string or unicode type" % model)
#make model name is lower case
model = model.lower()
model_item = __models__.get(model)
if not model_item:
model_item = dispatch.get(None, 'find_model', model_name=model)
if model_item:
if not engine_name:
#search according model_item, and it should has only one engine defined
engines = model_item['engines']
if len(engines) > 1:
engine_name = __default_engine__
else:
engine_name = engines[0]
engine = engine_manager[engine_name]
item = engine._models.get(model)
#process duplication
if not item and engine.options.duplication:
_item = engine.models.get(model)
if _item:
item = _item.copy()
item['model'] = None
engine._models[model] = item
if item:
loaded = False #True, model is already loaded, so consider if it needs be cached
m = item['model']
m_config = __models__[model].get('config', {})
if isinstance(m, type) and issubclass(m, Model):
loaded = True
if reload:
loaded = False
#add get_model previous hook
if signal:
model_inst = dispatch.get(None, 'get_model', model_name=model, model_inst=m,
model_info=item, model_config=m_config) or m
if m is not model_inst:
loaded = False
else:
model_inst = m
else:
#add get_model previous hook
if signal:
model_inst = dispatch.get(None, 'get_model', model_name=model, model_inst=None,
model_info=item, model_config=m_config)
else:
model_inst = None
if not model_inst:
if item['model_path']:
mod_path, name = item['model_path'].rsplit('.', 1)
mod = __import__(mod_path, fromlist=['*'])
model_inst = getattr(mod, name)
#empty model_path means dynamic model
if not model_inst:
raise ModelNotFound("Can't found the model %s in engine %s" % (model, engine_name))
if not loaded:
if model_inst._bound_classname == model and not reload:
model_inst = model_inst._use(engine_name)
item['model'] = model_inst
else:
config = __models__[model].get('config', {})
if config:
for k, v in config.items():
setattr(model_inst, k, v)
item['model'] = model_inst
model_inst._alias = model
model_inst._engine_name = engine_name
if __lazy_model_init__:
for k, v in model_inst.properties.items():
v.__property_config__(model_inst, k)
#add bind process
if reload:
reset = True
else:
reset = False
model_inst.bind(engine.metadata, reset=reset)
#post get_model
if signal:
dispatch.call(None, 'post_get_model', model_name=model, model_inst=model_inst,
model_info=item, model_config=m_config)
return model_inst
raise ModelNotFound("Can't found the model %s in engine %s" % (model, engine_name))
def get_object_id(engine_name, tablename, id):
return 'OC:%s:%s:%s' % (engine_name, tablename, str(id))
def get_object(table, id=None, condition=None, cache=False, fields=None, use_local=False,
engine_name=None, session=None):
"""
Get obj in Local.object_caches first and also use get(cache=True) function if
not found in object_caches
"""
from uliweb import functions, settings
model = get_model(table, engine_name)
#if id is an object of Model, so get the real id value
if isinstance(id, Model):
return id
if cache:
if use_local:
s = get_session(session)
key = get_object_id(s.engine_name, model.tablename, id)
value = s.get_local_cache(key)
if value:
return value
obj = model.get(id, condition=condition, fields=fields, cache=True)
if use_local:
value = s.get_local_cache(key, obj)
else:
obj = model.get(id, condition=condition, fields=fields)
return obj
def get_cached_object(table, id, condition=None, cache=True, fields=None, use_local=True, session=None):
return get_object(table, id, condition, cache, fields, use_local, session)
class SQLMointor(object):
def __init__(self, key_length=65, record_details=False):
self.count = SortedDict()
self.total = 0
self.key_length = key_length
self.details = []
self.record_details = record_details
def post_do(sender, query, conn, usetime, self=self):
sql = str(query)
c = self.count.setdefault(sql, {'count':0, 'time':0})
c['count'] += 1
c['time'] += usetime
self.total += 1
if self.record_details:
self.details.append(rawsql(query))
self.post_do = post_do
def print_(self, message=''):
print
print '====== sql execution count %d <%s> =======' % (self.total, message)
for k, v in sorted(self.count.items(), key=lambda x:x[1]):
k = k.replace('\r', '')
k = k.replace('\n', '')
if self.key_length and self.key_length>1 and len(k) > self.key_length:
k = k[:self.key_length-3]+'...'
if self.key_length > 0:
format = "%%-%ds %%3d %%.3f" % self.key_length
else:
format = "%s %3d %.3f"
print format % (k, v['count'], v['time'])
if self.record_details:
print '====== sql statements %d ====' % self.total
for line in self.details:
print '.', line
print
def close(self):
self.count = {}
self.total = 0
self.details = []
def begin_sql_monitor(key_length=70, record_details=False):
sql_monitor = SQLMointor(key_length, record_details)
dispatch.bind('post_do')(sql_monitor.post_do)
return sql_monitor
def close_sql_monitor(monitor):
dispatch.unbind('post_do', monitor.post_do)
monitor.close()
def get_migrate_script(context, tables, metadata, engine_name=None):
from alembic.autogenerate.api import compare_metadata, _produce_net_changes, \
_autogen_context, _indent, _produce_upgrade_commands, _compare_tables
from sqlalchemy.engine.reflection import Inspector
diffs = []
engine = engine_manager[engine_name]
imports = set()
autogen_context, connection = _autogen_context(context, imports)
#init autogen_context
autogen_context['opts']['sqlalchemy_module_prefix'] = 'sa.'
autogen_context['opts']['alembic_module_prefix'] = 'op.'
inspector = Inspector.from_engine(connection)
_tables = set(inspector.get_table_names()) & set(tables)
conn_table_names = set(zip([None] * len(_tables), _tables))
for t in tables:
m = engine.models.get(t)
if m and not m['model']:
get_model(t, engine_name, signal=False)
metadata_table_names = set(zip([None] * len(tables), tables))
_compare_tables(conn_table_names, metadata_table_names,
(),
inspector, metadata, diffs, autogen_context, False)
script = """
def upgrade():
""" + _indent(_produce_upgrade_commands(diffs, autogen_context)) + """
upgrade()
"""
script = """
import sqlalchemy as sa
%s
""" % '\n'.join(list(imports)) + script
return script
def run_migrate_script(context, script):
import logging
from alembic.operations import Operations
log = logging.getLogger(__name__)
op = Operations(context)
code = compile(script, '<string>', 'exec', dont_inherit=True)
env = {'op':op}
log.debug(script)
exec code in env
def migrate_tables(tables, engine_name=None):
"""
Used to migrate dynamic table to database
:param tables: tables name list, such as ['user']
"""
from alembic.migration import MigrationContext
engine = engine_manager[engine_name]
mc = MigrationContext.configure(engine.session().connection)
script = get_migrate_script(mc, tables, engine.metadata)
run_migrate_script(mc, script)
class ModelMetaclass(type):
def __init__(cls, name, bases, dct):
super(ModelMetaclass, cls).__init__(name, bases, dct)
if name == 'Model':
return
cls._set_tablename()
cls.properties = {}
cls._fields_list = []
cls._collection_names = {}
defined = set()
is_replace = dct.get('__replace__')
for base in bases:
if hasattr(base, 'properties') and not is_replace:
cls.properties.update(base.properties)
is_config = dct.get('__config__', True)
cls._manytomany = {}
cls._onetoone = {}
for attr_name in dct.keys():
attr = dct[attr_name]
if isinstance(attr, Property):
cls.add_property(attr_name, attr, set_property=False, config=not __lazy_model_init__)
if isinstance(attr, ManyToMany):
cls._manytomany[attr_name] = attr
#if there is already defined primary_key, the id will not be primary_key
#enable multi primary
#has_primary_key = bool([v for v in cls.properties.itervalues() if 'primary_key' in v.kwargs])
#add __without_id__ attribute to model, if set it, uliorm will not
#create 'id' field for the model
#if there is already has primary key, then id will not created
#change in 0.2.6 version
without_id = getattr(cls, '__without_id__', False)
if 'id' not in cls.properties and not without_id:
cls.properties['id'] = f = Field(PKTYPE(), autoincrement=True,
primary_key=True, default=None, nullable=False, server_default=None)
if not __lazy_model_init__:
f.__property_config__(cls, 'id')
setattr(cls, 'id', f)
fields_list = [(k, v) for k, v in cls.properties.items()]
fields_list.sort(lambda x, y: cmp(x[1].creation_counter, y[1].creation_counter))
cls._fields_list = fields_list
#check if cls is matched with __models__ module_path
if not check_model_class(cls):
return
if cls._bind and not __lazy_model_init__:
cls.bind(auto_create=__auto_create__)
class LazyValue(object):
def __init__(self, name, property):
self.name = name
self.property = property
def __get__(self, model_instance, model_class):
if model_instance is None:
return self
return self.property.get_lazy(model_instance, self.name, self.property.default)
def __set__(self, model_instance, value):
if model_instance is None:
return
setattr(model_instance, self.name, value)
class Property(object):
data_type = str
field_class = String
type_name = 'str'
creation_counter = 0
property_type = 'column' #Property type: 'column', 'compound', 'relation'
server_default = None
def __init__(self, verbose_name=None, fieldname=None, default=None,
required=False, validators=None, choices=None, max_length=None,
hint='', auto=None, auto_add=None, type_class=None, type_attrs=None,
placeholder='', extra=None,
sequence=False, **kwargs):
self.verbose_name = verbose_name
self.property_name = None
self.name = None
self.fieldname = fieldname
self.default = default
self.required = required
self.auto = auto
self.auto_add = auto_add
self.validators = validators or []
self.hint = hint
if not isinstance(self.validators, (tuple, list)):
self.validators = [self.validators]
self.choices = choices
self.max_length = max_length
self.kwargs = kwargs
self.sequence = sequence
self.creation_counter = Property.creation_counter
self.value = None
self.placeholder = placeholder
self.extra = extra or {}
self.type_attrs = type_attrs or {}
self.type_class = type_class or self.field_class
Property.creation_counter += 1
def get_parameters(self):
"""
Get common attributes and it'll used for Model.relationship clone process
"""
d = {}
for k in ['verbose_name', 'required', 'hint', 'placeholder', 'choices',
'default', 'validators', 'max_length']:
d[k] = getattr(self, k)
return d
def _get_column_info(self, kwargs):
kwargs['primary_key'] = self.kwargs.get('primary_key', False)
kwargs['autoincrement'] = self.kwargs.get('autoincrement', False)
kwargs['index'] = self.kwargs.get('index', False)
kwargs['unique'] = self.kwargs.get('unique', False)
#nullable default change to False
kwargs['nullable'] = self.kwargs.get('nullable', __nullable__)
if __server_default__:
kwargs['server_default' ] = self.kwargs.get('server_default', self.server_default)
else:
v = self.kwargs.get('server_default', None)
if v is not None and isinstance(v, (int, long)):
v = text(str(v))
kwargs['server_default' ] = v
def create(self, cls):
global __nullable__
kwargs = self.kwargs.copy()
kwargs['key'] = self.name
self._get_column_info(kwargs)
f_type = self._create_type()
args = ()
if self.sequence:
args = (self.sequence, )
# return Column(self.property_name, f_type, *args, **kwargs)
return Column(self.fieldname, f_type, *args, **kwargs)
def _create_type(self):
if self.max_length:
f_type = self.type_class(self.max_length, **self.type_attrs)
else:
f_type = self.type_class(**self.type_attrs)
return f_type
def __property_config__(self, model_class, property_name):
self.model_class = model_class
self.property_name = property_name
self.name = property_name
if not self.fieldname:
self.fieldname = property_name
setattr(model_class, self._lazy_value(), LazyValue(self._attr_name(), self))
def get_attr(self, model_instance, name, default):
v = None
if hasattr(model_instance, name):
v = getattr(model_instance, name)
if v is None:
if callable(default):
v = default()
else:
v = default
return v
def get_lazy(self, model_instance, name, default=None):
v = self.get_attr(model_instance, name, default)
if v is Lazy:
_id = getattr(model_instance, 'id')
if not _id:
raise BadValueError('Instance is not a validate object of Model %s, ID property is not found' % model_class.__name__)
model_instance.refresh()
v = self.get_attr(model_instance, name, default)
return v
def __get__(self, model_instance, model_class):
if model_instance is None:
return self
try:
return self.get_lazy(model_instance, self._attr_name(), self.default)
except AttributeError:
return None
def __set__(self, model_instance, value):
if model_instance is None:
return
value = self.validate(value)
#add value to model_instance._changed_value, so that you can test if
#a object really need to save
setattr(model_instance, self._attr_name(), value)
def default_value(self):
if callable(self.default):
d = self.default()
else:
d = self.default
return d
def get_choices(self):
if callable(self.choices):
choices = self.choices()
else:
choices = self.choices
return choices or []
def get_display_value(self, value):
if value is None:
return ''
if self.choices:
v = dict(self.get_choices()).get(value, '')
if isinstance(v, str):
v = unicode(v, __default_encoding__)
return v
else:
if isinstance(value, Model):
return unicode(value)
else:
return self.to_unicode(value)
def _validate(self, value, from_dump=False):
if self.empty(value):
if self.required:
raise BadValueError('Property "%s" of Model [%s] is required, but %r found' % (self.name, self.model_class.__name__, value))
#skip Lazy value
if value is Lazy:
return value
try:
if from_dump:
value = self.convert_dump(value)
else:
value = self.convert(value)
except TypeError as err:
raise BadValueError('Property %s must be convertible to %s, but the value is (%s)' % (self.name, self.data_type, err))
if hasattr(self, 'custom_validate'):
value = self.custom_validate(value)
for v in self.validators:
v(value)
return value
def validate(self, value):
return self._validate(value)
def validate_dump(self, value):
return self._validate(value, from_dump=True)
def empty(self, value):
return (value is None) or (isinstance(value, (str, unicode)) and not value.strip())
def get_value_for_datastore(self, model_instance):
return getattr(model_instance, self._attr_name(), None)
def make_value_from_datastore(self, value):
return value
def convert(self, value):
if self.data_type and not isinstance(value, self.data_type):
return self.data_type(value)
else:
return value
def convert_dump(self, value):
return self.convert(value)
def __repr__(self):
return ("<%s 'type':%r, 'verbose_name':%r, 'name':%r, 'fieldname':%r, "
"'default':%r, 'required':%r, 'validator':%r, "
"'chocies':%r, 'max_length':%r, 'kwargs':%r>"
% (
self.__class__.__name__,
self.data_type,
self.verbose_name,
self.name,
self.fieldname,
self.default,
self.required,
self.validators,
self.choices,
self.max_length,
self.kwargs)
)
def _attr_name(self):
return '_STORED_' + self.name + '_'
def _lazy_value(self):
return '_' + self.name + '_'
def to_str(self, v):
if isinstance(v, unicode):
return v.encode(__default_encoding__)
elif isinstance(v, str):
return v
else:
if v is None:
return ''
return str(v)
def to_unicode(self, v):
if isinstance(v, str):
return unicode(v, __default_encoding__)
elif isinstance(v, unicode):
return v
else:
if v is None:
return u''
return unicode(v)
def to_column_info(self):
d = {}
d['verbose_name'] = self.verbose_name or ''
d['name'] = self.name
d['fieldname'] = self.fieldname
d['type'] = self.type_name
d['type_name'] = self.get_column_type_name()
d['relation'] = ''
if isinstance(self, Reference):
d['relation'] = '%s(%s:%s)' % (self.type_name, self.reference_class.__name__, self.reference_fieldname)
self._get_column_info(d)
return d
def get_column_type_name(self):
return self.type_name
class CharProperty(Property):
data_type = unicode
field_class = CHAR
server_default=''
type_name = 'CHAR'
def __init__(self, verbose_name=None, default=u'', max_length=None, **kwds):
if __check_max_length__ and not max_length:
raise BadPropertyTypeError("max_length parameter not passed for property %s" % self.__class__.__name__)
max_length = max_length or 255
super(CharProperty, self).__init__(verbose_name, default=default, max_length=max_length, **kwds)
def convert(self, value):
if value is None:
return u''
if isinstance(value, str):
return unicode(value, __default_encoding__)
else:
return self.data_type(value)
def _create_type(self):
if self.max_length:
f_type = self.type_class(self.max_length, convert_unicode=True, **self.type_attrs)
else:
f_type = self.type_class(**self.type_attrs)
return f_type
def to_str(self, v):
return safe_str(v)
def get_column_type_name(self):
return '%s(%d)' % (self.type_name, self.max_length)
class StringProperty(CharProperty):
type_name = 'VARCHAR'
field_class = VARCHAR
class BinaryProperty(CharProperty):
type_name = 'BINARY'
field_class = BINARY
data_type = str
def _create_type(self):
if self.max_length:
f_type = self.type_class(self.max_length, **self.type_attrs)
else:
f_type = self.type_class(**self.type_attrs)
return f_type
class VarBinaryProperty(BinaryProperty):
type_name = 'VARBINARY'
field_class = VARBINARY
class UUIDBinaryProperty(VarBinaryProperty):
type_name = 'UUID_B'
field_class = VARBINARY
def __init__(self, **kwds):
kwds['max_length'] = 16
super(UUIDBinaryProperty, self).__init__(**kwds)
self.auto_add = True
def default_value(self):
import uuid
u = uuid.uuid4()
return u.get_bytes()
def convert(self, value):
if value is None:
return ''
return value
class UUIDProperty(StringProperty):
type_name = 'UUID'
field_class = VARCHAR
def __init__(self, **kwds):
kwds['max_length'] = 32
super(UUIDProperty, self).__init__(**kwds)
self.auto_add = True
def default_value(self):
import uuid
u = uuid.uuid4()
return u.get_hex()[:self.max_length]
def convert(self, value):
if value is None:
return ''
return value
class FileProperty(StringProperty):
def __init__(self, verbose_name=None, max_length=None, upload_to=None, upload_to_sub=None, **kwds):
max_length = max_length or 255
super(FileProperty, self).__init__(verbose_name, max_length=max_length, **kwds)
self.upload_to = upload_to
self.upload_to_sub = upload_to_sub
class UnicodeProperty(StringProperty):
pass
class TextProperty(Property):
field_class = Text
data_type = unicode
type_name = 'TEXT'
def __init__(self, verbose_name=None, default=u'', **kwds):
super(TextProperty, self).__init__(verbose_name, default=default, max_length=None, **kwds)
def convert(self, value):
if not value:
return u''
if isinstance(value, str):
return unicode(value, __default_encoding__)
else:
return self.data_type(value)
class BlobProperty(Property):
field_class = BLOB
data_type = str
type_name = 'BLOB'
def __init__(self, verbose_name=None, default='', **kwds):
super(BlobProperty, self).__init__(verbose_name, default=default, max_length=None, **kwds)
def get_display_value(self, value):
return repr(value)
def convert(self, value):
if not value:
return ''
return value
class PickleProperty(BlobProperty):
field_class = PickleType
data_type = None
type_name = 'PICKLE'
def to_str(self, v):
return pickle.dumps(v, pickle.HIGHEST_PROTOCOL)
def convert_dump(self, v):
return pickle.loads(v)
def convert(self, value):
return value
class JsonProperty(TextProperty):
field_class = TEXT
data_type = None
type_name = 'JSON'
def get_value_for_datastore(self, model_instance):
from uliweb import json_dumps
return json_dumps(getattr(model_instance, self._attr_name(), None))
def make_value_from_datastore(self, value):
return self.convert_dump(value)
def convert_dump(self, v):
import json
return json.loads(v)
def convert(self, value):
return value
class DateTimeProperty(Property):
data_type = datetime.datetime
field_class = DateTime
server_default = '0000-00-00 00:00:00'
type_name = 'DATETIME'
def __init__(self, verbose_name=None, auto_now=False, auto_now_add=False,
format=None, **kwds):
super(DateTimeProperty, self).__init__(verbose_name, **kwds)
self.auto_now = auto_now
self.auto_now_add = auto_now_add
self.format = format
def custom_validate(self, value):
if value and not isinstance(value, self.data_type):
raise BadValueError('Property %s must be a %s' %
(self.name, self.data_type.__name__))
return value
@staticmethod
def now():
return _date.now()
def make_value_from_datastore(self, value):
if value is not None:
value = self._convert_func(value)
return value
@staticmethod
def _convert_func(*args, **kwargs):
return _date.to_datetime(*args, **kwargs)
def convert(self, value):
if not value:
return None
d = self._convert_func(value, format=self.format)
if d:
return d
raise BadValueError('The datetime value is not a valid format')
def to_str(self, v):
if isinstance(v, self.data_type):
return _date.to_string(v, timezone=False)
else:
if not v:
return ''
return str(v)
def to_unicode(self, v):
if isinstance(v, self.data_type):
return unicode(_date.to_string(v, timezone=False))
else:
if not v:
return u''
return unicode(v)
class DateProperty(DateTimeProperty):
data_type = datetime.date
field_class = Date
server_default = '0000-00-00'
type_name = 'DATE'
@staticmethod
def _convert_func(*args, **kwargs):
return _date.to_date(*args, **kwargs)
@staticmethod
def now():
return _date.to_date(_date.now())
class TimeProperty(DateTimeProperty):
data_type = datetime.time
field_class = Time
server_default = '00:00:00'
type_name = 'TIME'
@staticmethod
def _convert_func(*args, **kwargs):
return _date.to_time(*args, **kwargs)
@staticmethod
def now():
return _date.to_time(_date.now())
class IntegerProperty(Property):
"""An integer property."""
data_type = int
field_class = Integer
server_default=text('0')
type_name = 'INTEGER'
def __init__(self, verbose_name=None, default=0, **kwds):
super(IntegerProperty, self).__init__(verbose_name, default=default, **kwds)
def convert(self, value):
if value == '':
return 0
if value is None:
return value
return self.data_type(value)
def custom_validate(self, value):
if value and not isinstance(value, (int, long, bool)):
raise BadValueError('Property %s must be an int, long or bool, not a %s'
% (self.name, type(value).__name__))
return value
class BigIntegerProperty(IntegerProperty):
field_class = BigInteger
type_name = 'BIGINT'
class SmallIntegerProperty(IntegerProperty):
field_class = SmallInteger
type_name = 'SMALLINT'
class FloatProperty(Property):
"""A float property."""
data_type = float
field_class = Float
server_default=text('0')
type_name = 'FLOAT'
def __init__(self, verbose_name=None, default=0.0, precision=None, **kwds):
super(FloatProperty, self).__init__(verbose_name, default=default, **kwds)
self.precision = precision
def _create_type(self):
f_type = self.type_class(precision=self.precision, **self.type_attrs)
return f_type
def convert(self, value):
if value == '' or value is None:
return 0.0
return self.data_type(value)
def custom_validate(self, value):
if value and not isinstance(value, float):
raise BadValueError('Property %s must be a float, not a %s'
% (self.name, type(value).__name__))
if abs(value) < __zero_float__:
value = 0.0
return value
def get_column_type_name(self):
return '%s' % self.type_name
class DecimalProperty(Property):
"""A float property."""
data_type = decimal.Decimal
field_class = Numeric
server_default=text('0.00')
type_name = 'DECIMAL'
def __init__(self, verbose_name=None, default='0.0', precision=10, scale=2, **kwds):
super(DecimalProperty, self).__init__(verbose_name, default=default, **kwds)
self.precision = precision
self.scale = scale
def convert(self, value):
if value == '' or value is None:
return decimal.Decimal('0.0')
return self.data_type(value)
def _create_type(self):
f_type = self.type_class(precision=self.precision, scale=self.scale, **self.type_attrs)
return f_type
def get_display_value(self, value):
if value is None:
return ''
if self.choices:
v = dict(self.get_choices()).get(str(value), '')
if isinstance(v, str):
v = unicode(v, __default_encoding__)
return v
else:
return str(value)
def get_column_type_name(self):
return '%s(%d,%d)' % (self.type_name, self.precision, self.scale)
class BooleanProperty(Property):
"""A boolean property."""
data_type = bool
field_class = Boolean
server_default=text('0')
type_name = 'BOOL'
def __init__(self, verbose_name=None, default=False, **kwds):
super(BooleanProperty, self).__init__(verbose_name, default=default, **kwds)
def custom_validate(self, value):
if value is not None and not isinstance(value, bool):
raise BadValueError('Property %s must be a boolean, not a %s'
% (self.name, type(value).__name__))
return value
def convert(self, value):
if not value:
return False
if value in ['1', 'True', 'true', True]:
return True
else:
return False
class ReferenceProperty(Property):
"""A property that represents a many-to-one reference to another model.
"""
data_type = int
field_class = PKCLASS()
type_name = 'Reference'
def __init__(self, reference_class=None, verbose_name=None, collection_name=None,
reference_fieldname=None, required=False, engine_name=None, **attrs):
"""Construct ReferenceProperty.
Args:
reference_class: Which model class this property references.
verbose_name: User friendly name of property.
collection_name: If provided, alternate name of collection on
reference_class to store back references. Use this to allow
a Model to have multiple fields which refer to the same class.
reference_fieldname used to specify which fieldname of reference_class
should be referenced
"""
super(ReferenceProperty, self).__init__(verbose_name, **attrs)
self._collection_name = collection_name
self.reference_fieldname = reference_fieldname or 'id'
self.required = required
self.engine_name = engine_name
self.reference_class = reference_class
if __lazy_model_init__:
if inspect.isclass(self.reference_class) and issubclass(self.reference_class, Model):
warnings.simplefilter('default')
warnings.warn("Reference Model should be a string type, but [%s] model class found." % self.reference_class.__name__, DeprecationWarning)
def create(self, cls):
global __nullable__
args = self.kwargs.copy()
args['key'] = self.name
# if not callable(self.default):
# args['default'] = self.default
args['primary_key'] = self.kwargs.get('primary_key', False)
args['autoincrement'] = self.kwargs.get('autoincrement', False)
args['index'] = self.kwargs.get('index', False)
args['unique'] = self.kwargs.get('unique', False)
args['nullable'] = self.kwargs.get('nullable', __nullable__)
f_type = self._create_type()
if __server_default__:
#for int or long data_type, it'll automatically set text('0')
if self.data_type is int or self.data_type is long :
args['server_default'] = text('0')
else:
v = self.reference_field.kwargs.get('server_default')
args['server_default'] = v
return Column(self.fieldname, f_type, **args)
def _create_type(self):
if not hasattr(self.reference_class, self.reference_fieldname):
raise KindError('reference_fieldname is not existed')
self.reference_field = getattr(self.reference_class, self.reference_fieldname)
#process data_type
self.data_type = self.reference_field.data_type
field_class = self.reference_field.field_class
if self.reference_field.max_length:
f_type = field_class(self.reference_field.max_length)
else:
f_type = field_class
return f_type
def __property_config__(self, model_class, property_name):
"""Loads all of the references that point to this model.
"""
super(ReferenceProperty, self).__property_config__(model_class, property_name)
if not (
(isinstance(self.reference_class, type) and issubclass(self.reference_class, Model)) or
self.reference_class is _SELF_REFERENCE or
valid_model(self.reference_class, self.engine_name)):
raise KindError('reference_class %r must be Model or _SELF_REFERENCE or available table name' % self.reference_class)
if self.reference_class is _SELF_REFERENCE or self.reference_class is None:
self.reference_class = model_class
else:
self.reference_class = get_model(self.reference_class, self.engine_name,
signal=False)
self.collection_name = self.reference_class.get_collection_name(model_class.tablename, self._collection_name, model_class.tablename)
setattr(self.reference_class, self.collection_name,
_ReverseReferenceProperty(model_class, property_name, self._id_attr_name()))
def __get__(self, model_instance, model_class):
"""Get reference object.
This method will fetch unresolved entities from the datastore if
they are not already loaded.
Returns:
ReferenceProperty to Model object if property is set, else None.
"""
if model_instance is None:
return self
if hasattr(model_instance, self._attr_name()):
# reference_id = getattr(model_instance, self._attr_name())
reference_id = self.get_lazy(model_instance, self._attr_name(), None)
else:
reference_id = None
if reference_id:
#this will cache the reference object
resolved = getattr(model_instance, self._resolved_attr_name())
if resolved is not None:
return resolved
else:
#change id_field to reference_fieldname
# id_field = self._id_attr_name()
# d = self.reference_class.c[id_field]
d = self.reference_class.c[self.reference_fieldname]
instance = self.reference_class.get(d==reference_id)
if instance is None:
raise NotFound('ReferenceProperty %s failed to be resolved' % self.reference_fieldname, self.reference_class, reference_id)
setattr(model_instance, self._resolved_attr_name(), instance)
return instance
else:
return None
def get_value_for_datastore(self, model_instance):
if not model_instance:
return None
else:
return getattr(model_instance, self._attr_name(), None)
def __set__(self, model_instance, value):
"""Set reference."""
value = self.validate(value)
if value is not None:
if not isinstance(value, Model):
setattr(model_instance, self._attr_name(), value)
setattr(model_instance, self._resolved_attr_name(), None)
else:
setattr(model_instance, self._attr_name(), getattr(value, self.reference_fieldname))
setattr(model_instance, self._resolved_attr_name(), value)
else:
setattr(model_instance, self._attr_name(), None)
setattr(model_instance, self._resolved_attr_name(), None)
def validate(self, value):
"""Validate reference.
Returns:
A valid value.
Raises:
BadValueError for the following reasons:
- Value is not saved.
- Object not of correct model type for reference.
"""
if value == '':
if self.kwargs.get('nullable', __nullable__):
value = None
else:
value = 0
if not isinstance(value, Model):
return super(ReferenceProperty, self).validate(value)
if not value.is_saved():
raise BadValueError(
'%s instance must be saved before it can be stored as a '
'reference' % self.reference_class.__class__.__name__)
if not isinstance(value, self.reference_class):
raise KindError('Property %s must be an instance of %s' %
(self.name, self.reference_class.__class__.__name__))
return value
validate_dump = validate
def _id_attr_name(self):
"""Get attribute of referenced id.
"""
return self.reference_fieldname
def _resolved_attr_name(self):
"""Get attribute of resolved attribute.
The resolved attribute is where the actual loaded reference instance is
stored on the referring model instance.
Returns:
Attribute name of where to store resolved reference model instance.
"""
return '_RESOLVED_' + self._attr_name()
def convert(self, value):
if value == '':
return 0
if value is None:
return value
return self.data_type(value)
def get_column_type_name(self):
return self.reference_field.get_column_type_name()
Reference = ReferenceProperty
class OneToOne(ReferenceProperty):
type_name = 'OneToOne'
def create(self, cls):
global __nullable__
args = self.kwargs.copy()
args['key'] = self.name
# if not callable(self.default):
# args['default'] = self.default
args['primary_key'] = self.kwargs.get('primary_key', False)
args['autoincrement'] = self.kwargs.get('autoincrement', False)
args['index'] = self.kwargs.get('index', True)
args['unique'] = self.kwargs.get('unique', True)
args['nullable'] = self.kwargs.get('nullable', __nullable__)
f_type = self._create_type()
if __server_default__:
if self.data_type is int or self.data_type is long :
args['server_default'] = text('0')
else:
args['server_default'] = self.reference_field.kwargs.get('server_default')
return Column(self.fieldname, f_type, **args)
def __property_config__(self, model_class, property_name):
"""Loads all of the references that point to this model.
"""
#Direct invoke super with ReferenceProperty in order to skip the
#ReferenceProperty process, but instead of invode ReferenceProperty's
#parent function
super(ReferenceProperty, self).__property_config__(model_class, property_name)
if not (
(isinstance(self.reference_class, type) and issubclass(self.reference_class, Model)) or
self.reference_class is _SELF_REFERENCE or
valid_model(self.reference_class, self.engine_name)):
raise KindError('reference_class %r must be Model or _SELF_REFERENCE or available table name' % self.reference_class)
if self.reference_class is _SELF_REFERENCE:
self.reference_class = self.data_type = model_class
else:
self.reference_class = get_model(self.reference_class, self.engine_name,
signal=False)
self.collection_name = self._collection_name
if self.collection_name is None:
self.collection_name = '%s' % (model_class.tablename)
if hasattr(self.reference_class, self.collection_name):
raise DuplicatePropertyError('Class %s already has property %s'
% (self.reference_class.__name__, self.collection_name))
setattr(self.reference_class, self.collection_name,
_OneToOneReverseReferenceProperty(model_class, property_name,
self._id_attr_name(), self.collection_name))
#append to reference_class._onetoone
self.reference_class._onetoone[self.collection_name] = model_class
def get_objs_columns(objs, field='id'):
ids = []
new_objs = []
if isinstance(objs, (str, unicode)):
objs = [int(x) for x in objs.split(',')]
for x in objs:
if not x:
continue
if isinstance(x, (tuple, list)):
new_objs.extend(x)
else:
new_objs.append(x)
for o in new_objs:
if not isinstance(o, Model):
_id = o
else:
_id = o.get_datastore_value(field)
if _id not in ids:
ids.append(_id)
return ids
class Result(object):
def __init__(self, model=None, condition=None, *args, **kwargs):
self.model = model
self.condition = condition
self.columns = list(self.model.table.c)
self.funcs = []
self.args = args
self.kwargs = kwargs
self.result = None
self.default_query_flag = True
self._group_by = None
self._having = None
self.distinct_field = None
self._values_flag = False
self._join = []
self._limit = None
self._offset = None
self.connection = model.get_session()
def do_(self, query):
global do_
return do_(query, self.connection)
def get_column(self, model, fieldname):
if isinstance(fieldname, (str, unicode)):
if issubclass(model, Model):
v = fieldname.split('.')
if len(v) > 1:
field = get_model(v[0], engine_name=self.model.get_engine_name(),
signal=False).table.c(v[1])
else:
field = model.table.c[fieldname]
else:
field = model.c[fieldname]
else:
field = fieldname
return field
def get_columns(self, model=None, columns=None):
columns = columns or self.columns
model = model or self.model
fields = []
field = None
if self.distinct_field is not None:
field = self.get_column(model, self.distinct_field)
fields.append(func.distinct(field).label(field.name))
for col in columns:
if col is not field:
fields.append(col)
return fields
def get_fields(self):
"""
get property instance according self.columns
"""
columns = self.columns
model = self.model
fields = []
for col in columns:
if isinstance(col, (str, unicode)):
v = col.split('.')
if len(v) > 1:
field = get_model(v[0], engine_name=self.model.get_engine_name(),
signal=False).properties(v[1])
else:
field = model.properties[col]
elif isinstance(col, Column):
field = get_model(col.table.name, engine_name=self.model.get_engine_name(),
signal=False).properties[col.name]
else:
field = col
fields.append(field)
return fields
def connect(self, connection):
if connection:
self.connection = connection
return self
use = connect
def all(self):
return self
def join(self, model, cond, isouter=False):
_join = None
model = get_model(model, engine_name=self.model.get_engine_name(),
signal=False)
if issubclass(model, Model):
# if cond is None:
# for prop in Model.proterties:
# if isinstance(prop, ReferenceProperty) and prop.reference_class is self.model:
# _right = prop.reference_class
# _join = self.model.table.join(_right.table,
# _right.c[prop.reference_fieldname])
# break
# else:
_join = self.model.table.join(model.table, cond, isouter=isouter)
self._join.append(_join)
else:
raise BadValueError("Only Model support in this function.")
return self
def get(self, condition=None):
if isinstance(condition, (int, long)):
return self.filter(self.model.c.id==condition).one()
else:
return self.filter(condition).one()
def count(self):
"""
If result is True, then the count will process result set , if
result if False, then only use condition to count
"""
if self._group_by or self._join:
return self.do_(self.get_query().alias().count()).scalar()
else:
return self.do_(self.get_query().with_only_columns([func.count()]).limit(None).order_by(None).offset(None)).scalar()
def any(self):
row = self.do_(
self.get_query().limit(1)
)
return len(list(row)) > 0
def filter(self, *condition):
"""
If there are multple condition, then treats them *and* relastion.
"""
if not condition:
return self
cond = true()
for c in condition:
if c is not None:
cond = c & cond
if self.condition is not None:
self.condition = cond & self.condition
else:
self.condition = cond
return self
def order_by(self, *args, **kwargs):
self.funcs.append(('order_by', args, kwargs))
return self
def group_by(self, *args):
self._group_by = args
return self
def having(self, *args):
self._having = args
return self
def fields(self, *args, **kwargs):
if args:
args = flat_list(args)
if args:
if 'id' not in args:
args.append('id')
self.funcs.append(('with_only_columns', ([self.get_column(self.model, x) for x in args],), kwargs))
return self
def values(self, *args, **kwargs):
self.funcs.append(('with_only_columns', ([self.get_column(self.model, x) for x in args],), kwargs))
self._values_flag = True
return self
def values_one(self, *args, **kwargs):
self.funcs.append(('with_only_columns', ([self.get_column(self.model, x) for x in args],), kwargs))
self.run(1)
result = self.result.fetchone()
return result
def distinct(self, field=None):
"""
If field is None, then it means that it'll create:
select distinct *
and if field is not None, for example: 'name', it'll create:
select distinc(name),
"""
if field is None:
self.funcs.append(('distinct', (), {}))
else:
self.distinct_field = field
return self
def limit(self, *args, **kwargs):
self.funcs.append(('limit', args, kwargs))
if args:
self._limit = bool(args[0])
else:
self._limit = False
return self
def offset(self, *args, **kwargs):
self._offset = True
self.funcs.append(('offset', args, kwargs))
return self
def update(self, **kwargs):
"""
Execute update table set field = field+1 like statement
"""
if self.condition is not None:
self.result = self.do_(self.model.table.update().where(self.condition).values(**kwargs))
else:
self.result = self.do_(self.model.table.update().values(**kwargs))
return self.result
def without(self, flag='default_query'):
if flag == 'default_query':
self.default_query_flag = False
return self
def run(self, limit=0):
query = self.get_query()
#add limit support
if limit > 0:
query = getattr(query, 'limit')(limit)
self.result = self.do_(query)
return self.result
def save_file(self, filename, encoding='utf8', headers=None, convertors=None, display=True):
"""
save result to a csv file.
display = True will convert value according choices value
"""
global save_file
convertors = convertors or {}
if display:
fields = self.get_fields()
for i, column in enumerate(fields):
if column.name not in convertors:
def f(value, data):
return column.get_display_value(value)
convertors[column.name] = f
return save_file(self.run(), filename, encoding=encoding, headers=headers, convertors=convertors)
def get_query(self, columns=None):
#user can define default_query, and default_query
#should be class method
columns = columns or self.get_columns()
if self.default_query_flag:
_f = getattr(self.model, 'default_query', None)
if _f:
_f(self)
from_ = self._join
from_.append(self.model.table)
if self.condition is not None:
query = select(columns, self.condition, from_obj=from_, **self.kwargs)
else:
query = select(columns, from_obj=from_, **self.kwargs)
for func, args, kwargs in self.funcs:
query = getattr(query, func)(*args, **kwargs)
if self._group_by:
query = query.group_by(*self._group_by)
if self._having:
query = query.having(*self._having)
return query
def load(self, values):
if self._values_flag:
return values
else:
return self.model.load(values.items())
def for_update(self, flag=True):
"""
please see http://docs.sqlalchemy.org/en/latest/core/expression_api.html search for update
"""
self.kwargs['for_update'] = flag
return self
def one(self):
self.run(1)
if not self.result:
return
result = self.result.fetchone()
if result:
return self.load(result)
first = one
def clear(self):
return do_(self.model.table.delete(self.condition), self.connection)
remove = clear
def __del__(self):
if self.result:
self.result.close()
self.result = None
def __iter__(self):
self.result = self.run()
while 1:
result = self.result.fetchone()
if not result:
raise StopIteration
yield self.load(result)
class ReverseResult(Result):
def __init__(self, model, condition, a_field, b_table, instance, b_field, *args, **kwargs):
self.model = model
self.b_table = b_table
self.b_field = b_field
self.instance = instance
self.condition = condition
self.a_field = a_field
self.columns = list(self.model.table.c)
self.funcs = []
self.args = args
self.kwargs = kwargs
self.result = None
self.default_query_flag = True
self._group_by = None
self._having = None
self._limit = None
self._offset = None
self._join = []
self.distinct_field = None
self._values_flag = False
self.connection = model.get_session()
def has(self, *objs):
ids = get_objs_columns(objs)
if not ids:
return False
return self.model.filter(self.condition, self.model.table.c['id'].in_(ids)).any()
def ids(self):
query = select([self.model.c['id']], self.condition)
ids = [x[0] for x in self.do_(query)]
return ids
def clear(self, *objs):
"""
Clear the third relationship table, but not the ModelA or ModelB
"""
if objs:
ids = get_objs_columns(objs)
self.do_(self.model.table.delete(self.condition & self.model.table.c['id'].in_(ids)))
else:
self.do_(self.model.table.delete(self.condition))
remove = clear
class ManyResult(Result):
def __init__(self, modela, instance, property_name, modelb,
table, fielda, fieldb, realfielda, realfieldb, valuea, through_model=None):
"""
modela will define property_name = ManyToMany(modelb) relationship.
instance will be modela instance
"""
self.modela = modela
self.instance = instance
self.property_name = property_name
self.modelb = modelb
self.table = table #third table
self.fielda = fielda
self.fieldb = fieldb
self.realfielda = realfielda
self.realfieldb = realfieldb
self.valuea = valuea
self.columns = list(self.modelb.table.c)
self.condition = ''
self.funcs = []
self.result = None
self.with_relation_name = None
self.through_model = through_model
self.default_query_flag = True
self._group_by = None
self._having = None
self._join = []
self._limit = None
self._offset = None
self.distinct_field = None
self._values_flag = False
self.connection = self.modela.get_session()
self.kwargs = {}
def all(self, cache=False):
"""
can use cache to return objects
"""
if cache:
return [get_object(self.modelb, obj_id, cache=True, use_local=True) for obj_id in self.ids(True)]
else:
return self
def get(self, condition=None):
if not isinstance(condition, ColumnElement):
return self.filter(self.modelb.c[self.realfieldb]==condition).one()
else:
return self.filter(condition).one()
def add(self, *objs):
new_objs = []
for x in objs:
if not x:
continue
if isinstance(x, (tuple, list)):
new_objs.extend(x)
else:
new_objs.append(x)
modified = False
for o in new_objs:
if not self.has(o):
if isinstance(o, Model):
v = getattr(o, self.realfieldb)
else:
v = o
d = {self.fielda:self.valuea, self.fieldb:v}
if self.through_model:
obj = self.through_model(**d)
obj.save()
else:
self.do_(self.table.insert().values(**d))
modified = modified or True
#cache [] to _STORED_attr_name
setattr(self.instance, self.store_key, Lazy)
return modified
@property
def store_key(self):
if self.property_name in self.instance.properties:
return self.instance.properties[self.property_name]._attr_name()
else:
return '_CACHED_'+self.property_name
def ids(self, cache=False):
key = self.store_key
ids = getattr(self.instance, key, None)
if not cache or ids is None or ids is Lazy:
if self.valuea is None:
return []
query = select([self.table.c[self.fieldb]], self.table.c[self.fielda]==self.valuea)
ids = [x[0] for x in self.do_(query)]
if cache:
setattr(self.instance, key, ids)
return ids
def update(self, *objs):
"""
Update the third relationship table, but not the ModelA or ModelB
"""
ids = self.ids()
new_ids = get_objs_columns(objs, self.realfieldb)
modified = False
for v in new_ids:
if v in ids: #the id has been existed, so don't insert new record
ids.remove(v)
else:
d = {self.fielda:self.valuea, self.fieldb:v}
if self.through_model:
obj = self.through_model(**d)
obj.save()
else:
self.do_(self.table.insert().values(**d))
modified = True
if ids: #if there are still ids, so delete them
self.clear(*ids)
modified = True
#cache [] to _STORED_attr_name
setattr(self.instance, self.store_key, new_ids)
return modified
def clear(self, *objs):
"""
Clear the third relationship table, but not the ModelA or ModelB
"""
if objs:
ids = get_objs_columns(objs, self.realfieldb)
self.do_(self.table.delete((self.table.c[self.fielda]==self.valuea) & (self.table.c[self.fieldb].in_(ids))))
else:
self.do_(self.table.delete(self.table.c[self.fielda]==self.valuea))
#cache [] to _STORED_attr_name
setattr(self.instance, self.store_key, Lazy)
remove = clear
def count(self):
if self._group_by or self._join:
return self.do_(self.get_query().alias().count()).scalar()
else:
return self.do_(
self.get_query().with_only_columns([func.count()]).limit(None).order_by(None).offset(None)
).scalar()
def any(self):
row = self.do_(
select([self.table.c[self.fieldb]],
(self.table.c[self.fielda]==self.valuea) &
self.condition).limit(1)
)
return len(list(row)) > 0
def has(self, *objs):
ids = get_objs_columns(objs, self.realfieldb)
if not ids:
return False
row = self.do_(select([text('*')],
(self.table.c[self.fielda]==self.valuea) &
(self.table.c[self.fieldb].in_(ids))).limit(1))
return len(list(row)) > 0
def fields(self, *args, **kwargs):
if args:
args = flat_list(args)
if args:
if 'id' not in args and 'id' in self.modelb.c:
args.append(self.modelb.c.id)
self.funcs.append(('with_only_columns', ([self.get_column(self.modelb, x) for x in args],), kwargs))
return self
def values(self, *args, **kwargs):
self.funcs.append(('with_only_columns', ([self.get_column(self.modelb, x) for x in args],), kwargs))
self._values_flag = True
return self
def values_one(self, *args, **kwargs):
self.funcs.append(('with_only_columns', ([self.get_column(self.modelb, x) for x in args],), kwargs))
self.run(1)
result = self.result.fetchone()
return result
def with_relation(self, relation_name=None):
"""
if relation is not None, when fetch manytomany result, also
fetch relation record and saved them to manytomany object,
and named them as relation.
If relation_name is not given, then default value is 'relation'
"""
if not relation_name:
relation_name = 'relation'
if hasattr(self.modelb, relation_name):
raise Error("The attribute name %s has already existed in Model %s!" % (relation_name, self.modelb.__name__))
if not self.through_model:
raise Error("Only with through style in ManyToMany supports with_relation function of Model %s!" % self.modelb.__name__)
self.with_relation_name = relation_name
return self
def run(self, limit=0):
query = self.get_query()
if limit > 0:
query = getattr(query, 'limit')(limit)
self.result = self.do_(query)
return self.result
def get_query(self):
#user can define default_query, and default_query
#should be class method
if self.default_query_flag:
_f = getattr(self.modelb, 'default_query', None)
if _f:
_f(self)
if self.with_relation_name:
columns = list(self.table.c) + self.columns
else:
columns = self.columns
query = select(
self.get_columns(self.modelb, columns),
(self.table.c[self.fielda] == self.valuea) &
(self.table.c[self.fieldb] == self.modelb.c[self.realfieldb]) &
self.condition,
**self.kwargs)
for func, args, kwargs in self.funcs:
query = getattr(query, func)(*args, **kwargs)
if self._group_by:
query = query.group_by(*self._group_by)
if self._having:
query = query.having(*self._having)
return query
def one(self):
self.run(1)
if not self.result:
return
result = self.result.fetchone()
if result:
if self._values_flag:
return result
offset = 0
if self.with_relation_name:
offset = len(self.table.columns)
o = self.modelb.load(zip(result.keys()[offset:], result.values()[offset:]))
if self.with_relation_name:
r = self.through_model.load(zip(result.keys()[:offset], result.values()[:offset]))
setattr(o, self.with_relation_name, r)
return o
def __del__(self):
if self.result:
self.result.close()
self.result = None
def __iter__(self):
self.run()
if not self.result:
raise StopIteration
offset = 0
if self.with_relation_name:
offset = len(self.table.columns)
while 1:
result = self.result.fetchone()
if not result:
raise StopIteration
if self._values_flag:
yield result
continue
o = self.modelb.load(zip(result.keys()[offset:], result.values()[offset:]))
if self.with_relation_name:
r = self.through_model.load(zip(result.keys()[:offset], result.values()[:offset]))
setattr(o, self.with_relation_name, r)
yield o
class ManyToMany(ReferenceProperty):
type_name = 'ManyToMany'
def __init__(self, reference_class=None, verbose_name=None, collection_name=None,
reference_fieldname=None, reversed_fieldname=None, required=False, through=None,
through_reference_fieldname=None, through_reversed_fieldname=None,
**attrs):
"""
Definition of ManyToMany property
:param reference_fieldname: relative to field of B
:param reversed_fieldname: relative to field of A
:param through_reference_fieldname: through model relative to field of B
:param through_reversed_fieldname: throught model relative to field of A
:param index_reverse: create index reversed
"""
super(ManyToMany, self).__init__(reference_class=reference_class,
verbose_name=verbose_name, collection_name=collection_name,
reference_fieldname=reference_fieldname, required=required, **attrs)
self.reversed_fieldname = reversed_fieldname or 'id'
self.through = through
self.through_reference_fieldname = through_reference_fieldname
self.through_reversed_fieldname = through_reversed_fieldname
self.index_reverse = attrs['index_reverse'] if 'index_reverse' in attrs else __manytomany_index_reverse__
def create(self, cls):
if not self.through:
self.fielda = "%s_id" % self.model_class.tablename
#test model_a is equels model_b
#modified by limodou
#if self.model_class.tablename == self.reference_class.tablename:
if cls.tablename == self.reference_class.tablename:
_t = self.reference_class.tablename + '_b'
else:
_t = self.reference_class.tablename
self.fieldb = "%s_id" % _t
self.table = self.create_table()
#add appname to self.table
# appname = self.model_class.__module__
appname = cls.__module__
self.table.__appname__ = appname[:appname.rfind('.')]
#modified by limodou
#self.model_class.manytomany.append(self.table)
cls.manytomany.append(self.table)
index_name = '%s_mindx' % self.tablename
if index_name not in [x.name for x in self.table.indexes]:
Index(index_name, self.table.c[self.fielda], self.table.c[self.fieldb], unique=True)
#add field_b index
if self.index_reverse:
Index('%s_rmindx' % self.tablename, self.table.c[self.fieldb])
#process __mapping_only__ property, if the modela or modelb is mapping only
#then manytomany table will be mapping only
# if getattr(self.model_class, '__mapping_only__', False) or getattr(self.reference_class, '__mapping_only__', False):
if getattr(cls, '__mapping_only__', False) or getattr(self.reference_class, '__mapping_only__', False):
self.table.__mapping_only__ = True
else:
self.table.__mapping_only__ = False
def get_real_property(self, model, field):
return getattr(model, field).field_class
def get_type(self, model, field):
field = getattr(model, field)
field_class = field.field_class
if field.max_length:
f_type = field_class(field.max_length)
else:
f_type = field_class
return f_type
def create_table(self):
_table = Table(self.tablename, self.model_class.metadata,
Column(self.fielda, self.get_type(self.model_class, self.reversed_fieldname)),
Column(self.fieldb, self.get_type(self.reference_class, self.reference_fieldname)),
# ForeignKeyConstraint([a], [a_id]),
# ForeignKeyConstraint([b], [b_id]),
extend_existing=True
)
return _table
def init_through(self):
def find_property(properties, model, skip=None):
for k, v in properties.items():
if isinstance(v, ReferenceProperty) and v.reference_class is model and (not skip or skip and v.reference_class is not skip):
return k, v
if self.through and (not isinstance(self.through, type) or not issubclass(self.through, Model)):
if not (
(isinstance(self.through, type) and issubclass(self.reference_class, Model)) or
valid_model(self.reference_class)):
raise KindError('through must be Model or available table name')
self.through = get_model(self.through, engine_name=self.engine_name,
signal=False)
#auto find model
_auto_model = None
#process through_reference_fieldname
if self.through_reversed_fieldname:
k = self.through_reversed_fieldname
v = self.through.properties.get(k)
if not v:
raise BadPropertyTypeError("Can't find property %s in through model %s" % (
k, self.through.__name__))
else:
x = find_property(self.through.properties, self.model_class)
if not x:
raise BadPropertyTypeError("Can't find reference property of model %s in through model %s" % (
self.model_class.__name__, self.through.__name__))
k, v = x
_auto_model = self.model_class
self.fielda = k
self.reversed_fieldname = v.reference_fieldname
#process through_reversed_fieldname
if self.through_reference_fieldname:
k = self.through_reference_fieldname
v = self.through.properties.get(k)
if not v:
raise BadPropertyTypeError("Can't find property %s in through model %s" % (
k, self.through.__name__))
else:
x = find_property(self.through.properties, self.reference_class, self.model_class)
if not x:
raise BadPropertyTypeError("Can't find reference property of model %s in through model %s" % (
self.model_class.__name__, self.through.__name__))
k, v = x
#check if the auto find models are the same
if _auto_model and _auto_model is self.reference_class:
raise BadPropertyTypeError("If the two reference fields come from the same"
" model, you should specify them via through_reference_fieldname or"
" through_reversed_fieldname in through model %s" % self.through.__name__)
self.fieldb = k
self.reference_fieldname = v.reference_fieldname
self.table = self.through.table
appname = self.model_class.__module__
self.table.__appname__ = appname[:appname.rfind('.')]
self.model_class.manytomany.append(self.table)
Index('%s_mindx' % self.tablename, self.table.c[self.fielda], self.table.c[self.fieldb], unique=True)
def __property_config__(self, model_class, property_name):
"""Loads all of the references that point to this model.
"""
#Direct invoke super with ReferenceProperty in order to skip the
#ReferenceProperty process, but instead of invode ReferenceProperty's
#parent function
super(ReferenceProperty, self).__property_config__(model_class, property_name)
if not (
(isinstance(self.reference_class, type) and issubclass(self.reference_class, Model)) or
self.reference_class is _SELF_REFERENCE or
valid_model(self.reference_class, self.engine_name)):
raise KindError('reference_class %r must be Model or _SELF_REFERENCE or available table name' % self.reference_class)
if self.reference_class is _SELF_REFERENCE or self.reference_class is None:
self.reference_class = self.data_type = model_class
else:
self.reference_class = get_model(self.reference_class, self.engine_name,
signal=False)
self.tablename = '%s_%s_%s' % (model_class.tablename, self.reference_class.tablename, property_name)
self.collection_name = self.reference_class.get_collection_name(model_class.tablename, self._collection_name, model_class.tablename)
setattr(self.reference_class, self.collection_name,
_ManyToManyReverseReferenceProperty(self, self.collection_name))
def get_lazy(self, model_instance, name, default=None):
v = self.get_attr(model_instance, name, default)
if v is Lazy:
# _id = getattr(model_instance, 'id')
# if not _id:
# raise BadValueError('Instance is not a validate object of Model %s, ID property is not found' % model_instance.__class__.__name__)
result = getattr(model_instance, self.name)
v = result.ids(True)
setattr(model_instance, name, v)
#2014/3/1 save value to Model_instance._old_values
#this will cause manytomany need not to check when saving
#or it'll compare the difference between old_value and database(use select)
model_instance._old_values[self.name] = v
return v
def __get__(self, model_instance, model_class):
"""Get reference object.
This method will fetch unresolved entities from the datastore if
they are not already loaded.
Returns:
ReferenceProperty to Model object if property is set, else None.
"""
self.init_through()
if model_instance:
reference_id = getattr(model_instance, self.reversed_fieldname, None)
x = ManyResult(self.model_class, model_instance, self.property_name, self.reference_class, self.table,
self.fielda, self.fieldb, self.reversed_fieldname,
self.reference_fieldname, reference_id, through_model=self.through)
return x
else:
return self
def __set__(self, model_instance, value):
if model_instance is None:
return
if value and value is not Lazy:
value = get_objs_columns(value, self.reference_fieldname)
setattr(model_instance, self._attr_name(), value)
def get_value_for_datastore(self, model_instance, cached=False):
"""Get key of reference rather than reference itself."""
value = getattr(model_instance, self._attr_name(), None)
if not cached:
value = getattr(model_instance, self.property_name).ids()
setattr(model_instance, self._attr_name(), value)
return value
def get_display_value(self, value):
s = []
for x in value:
s.append(unicode(x))
return ' '.join(s)
def in_(self, *objs):
"""
Create a condition
"""
if not objs:
return self.table.c[self.fielda]!=self.table.c[self.fielda]
else:
ids = get_objs_columns(objs, self.reference_fieldname)
sub_query = select([self.table.c[self.fielda]], (self.table.c[self.fieldb] == self.reference_class.c[self.reference_fieldname]) & (self.table.c[self.fieldb].in_(ids)))
condition = self.model_class.c[self.reversed_fieldname].in_(sub_query)
return condition
def join_in(self, *objs):
"""
Create a join condition, connect A and C
"""
if not objs:
return self.table.c[self.fielda]!=self.table.c[self.fielda]
else:
ids = get_objs_columns(objs, self.reference_fieldname)
return (self.table.c[self.fielda] == self.model_class.c[self.reversed_fieldname]) & (self.table.c[self.fieldb].in_(ids))
def join_right_in(self, *objs):
"""
Create a join condition, connect B and C
"""
if not objs:
return self.table.c[self.fielda]!=self.table.c[self.fielda]
else:
ids = get_objs_columns(objs, self.reference_fieldname)
return (self.table.c[self.fieldb] == self.reference_class.c[self.reference_fieldname]) & (self.table.c[self.fielda].in_(ids))
def filter(self, *condition):
cond = true()
for c in condition:
if c is not None:
cond = c & cond
sub_query = select([self.table.c[self.fielda]], (self.table.c[self.fieldb] == self.reference_class.c[self.reference_fieldname]) & cond)
condition = self.model_class.c[self.reversed_fieldname].in_(sub_query)
return condition
def join_filter(self, *condition):
cond = true()
for c in condition:
if c is not None:
cond = c & cond
return (self.table.c[self.fielda] == self.model_class.c[self.reversed_fieldname]) & (self.table.c[self.fieldb] == self.reference_class.c[self.reference_fieldname]) & cond
def convert_dump(self, value):
if not value:
return []
return [int(x) for x in value.split(',')]
def to_column_info(self):
d = {}
d['verbose_name'] = self.verbose_name or ''
d['name'] = self.name
d['fieldname'] = self.fieldname
d['type'] = self.type_name
d['type_name'] = self.type_name
d['relation'] = 'ManyToMany(%s:%s-%s:%s)' % (self.model_class.__name__, self.reversed_fieldname,
self.reference_class.__name__, self.reference_fieldname)
self._get_column_info(d)
return d
def SelfReferenceProperty(verbose_name=None, collection_name=None, **attrs):
"""Create a self reference.
"""
if 'reference_class' in attrs:
raise ConfigurationError(
'Do not provide reference_class to self-reference.')
return ReferenceProperty(_SELF_REFERENCE, verbose_name, collection_name, **attrs)
SelfReference = SelfReferenceProperty
class _ReverseReferenceProperty(Property):
"""The inverse of the Reference property above.
We construct reverse references automatically for the model to which
the Reference property is pointing to create the one-to-many property for
that model. For example, if you put a Reference property in model A that
refers to model B, we automatically create a _ReverseReference property in
B called a_set that can fetch all of the model A instances that refer to
that instance of model B.
"""
def __init__(self, model, reference_id, reversed_id):
"""Constructor for reverse reference.
Constructor does not take standard values of other property types.
"""
self._model = model #A
self._reference_id = reference_id #A Reference(B) this is A's reference field
self._reversed_id = reversed_id #B's reference_field
self.verbose_name = ''
def __get__(self, model_instance, model_class):
"""Fetches collection of model instances of this collection property."""
if model_instance is not None: #model_instance is B's
_id = getattr(model_instance, self._reversed_id, None)
if _id is not None:
a_id = self._reference_id
a_field = self._model.c[self._reference_id]
return ReverseResult(self._model, a_field==_id, self._reference_id, model_class.table, model_instance, self._reversed_id)
else:
# return Result()
return None
else:
return self
def __set__(self, model_instance, value):
"""Not possible to set a new collection."""
raise BadValueError('Virtual property is read-only')
class _OneToOneReverseReferenceProperty(_ReverseReferenceProperty):
def __init__(self, model, reference_id, reversed_id, collection_name):
"""Constructor for reverse reference.
Constructor does not take standard values of other property types.
"""
self._model = model
self._reference_id = reference_id #B Reference(A) this is B's id
self._reversed_id = reversed_id #A's id
self._collection_name = collection_name
def __get__(self, model_instance, model_class):
"""Fetches collection of model instances of this collection property."""
if model_instance:
_id = getattr(model_instance, self._reversed_id, None)
# print self._resolved_attr_name()
if _id is not None:
#this will cache the reference object
resolved = getattr(model_instance, self._resolved_attr_name(), None)
if resolved is not None:
return resolved
else:
b_id = self._reference_id
d = self._model.c[self._reference_id]
instance = self._model.get(d==_id)
if not instance:
instance = self._model(**{b_id:_id})
instance.save()
setattr(model_instance, self._resolved_attr_name(), instance)
return instance
else:
return None
else:
return self
def _resolved_attr_name(self):
"""Get attribute of resolved attribute.
The resolved attribute is where the actual loaded reference instance is
stored on the referring model instance.
Returns:
Attribute name of where to store resolved reference model instance.
"""
return '_RESOLVED_' + self._collection_name
class _ManyToManyReverseReferenceProperty(_ReverseReferenceProperty):
def __init__(self, reference_property, collection_name):
"""Constructor for reverse reference.
Constructor does not take standard values of other property types.
"""
self.reference_property = reference_property
self._collection_name = collection_name
def __get__(self, model_instance, model_class):
"""Fetches collection of model instances of this collection property."""
self.reference_property.init_through()
self._reversed_id = self.reference_property.reference_fieldname
if model_instance:
reference_id = getattr(model_instance, self._reversed_id, None)
x = ManyResult(self.reference_property.reference_class, model_instance,
self._collection_name,
self.reference_property.model_class, self.reference_property.table,
self.reference_property.fieldb, self.reference_property.fielda,
self.reference_property.reference_fieldname,
self.reference_property.reversed_fieldname, reference_id,
through_model=self.reference_property.through)
return x
else:
return self
FILE = FileProperty
PICKLE = PickleProperty
UUID = UUIDProperty
UUID_B = UUIDBinaryProperty
JSON = JsonProperty
_fields_mapping = {
BIGINT:BigIntegerProperty,
str:StringProperty,
VARCHAR:StringProperty,
CHAR:CharProperty,
unicode: UnicodeProperty,
BINARY: BinaryProperty,
VARBINARY: VarBinaryProperty,
TEXT: TextProperty,
BLOB: BlobProperty,
int:IntegerProperty,
SMALLINT: SmallIntegerProperty,
INT:IntegerProperty,
float:FloatProperty,
FLOAT:FloatProperty,
bool:BooleanProperty,
BOOLEAN:BooleanProperty,
datetime.datetime:DateTimeProperty,
DATETIME:DateTimeProperty,
JSON:JsonProperty,
datetime.date:DateProperty,
DATE:DateProperty,
datetime.time:TimeProperty,
TIME:TimeProperty,
decimal.Decimal:DecimalProperty,
DECIMAL:DecimalProperty,
UUID_B:UUIDBinaryProperty,
UUID:UUIDProperty
}
def Field(type, *args, **kwargs):
t = _fields_mapping.get(type, type)
return t(*args, **kwargs)
def get_field_type(_type):
assert isinstance(_type, (str, unicode))
_t = eval(_type)
return _fields_mapping.get(_t, _t)
class ModelReprDescriptor(object):
def __get__(self, model_instance, model_class):
def f():
from IPython.display import display_html, display_svg
if model_instance is None:
display_html(self._cls_repr_html_(model_class))
display_svg(self._cls_repr_svg_(model_class))
else:
display_html(self._instance_repr_html_(model_instance))
return f
def _cls_repr_html_(self, cls):
from IPython.display import HTML
return HTML('<pre>'+print_model(cls)+'</pre>')
def _cls_repr_svg_(self, cls):
import os
from uliweb.orm.graph import generate_file
from uliweb import application
from uliweb.utils.common import get_tempfilename
from IPython.display import SVG
engine_name = cls.get_engine_name()
fontname = os.environ.get('dot_fontname', '')
outputfile = get_tempfilename('dot_svg_', suffix='.svg')
generate_file({cls.tablename:cls.table}, application.apps,
outputfile, 'svg', engine_name, fontname=fontname)
return SVG(filename=outputfile)
def _instance_repr_html_(self, instance):
from uliweb.core.html import Table
from IPython.display import HTML
s = []
for k, v in instance._fields_list:
if not isinstance(v, ManyToMany):
info = v.to_column_info()
d = [info['verbose_name'], info['name'], info['type_name']]
t = getattr(instance, k, None)
if isinstance(v, Reference) and t:
d.append('%s:%r:%s' % (v.reference_class.__name__, t.id, unicode(t)))
else:
d.append(t)
s.append(d)
return HTML(str(Table(s, ['Display Name', 'Column Name',
'Column Type', 'Value'])))
class Model(object):
__metaclass__ = ModelMetaclass
__dispatch_enabled__ = True
_engine_name = None
_connection = None
_alias = None #can be used via get_model(alias)
_collection_set_id = 1
_bind = True
_bound_classname = ''
_base_class = None
_lock = threading.Lock()
_c_lock = threading.Lock()
#add support for IPython notebook display
_ipython_display_ = ModelReprDescriptor()
def __init__(self, **kwargs):
self._old_values = {}
self._load(kwargs, from_='')
def set_saved(self):
self._old_values = self.to_dict()
for k, v in self.properties.items():
if isinstance(v, ManyToMany):
t = v.get_value_for_datastore(self, cached=True)
if not t is Lazy:
self._old_values[k] = t
def to_dict(self, fields=None, convert=True, manytomany=False):
d = {}
fields = fields or []
for k, v in self.properties.items():
if fields and not k in fields:
continue
if not isinstance(v, ManyToMany):
t = v.get_value_for_datastore(self)
if isinstance(t, Model):
t = t.id
if convert:
d[k] = self.field_str(t)
else:
d[k] = t
else:
if manytomany:
d[k] = getattr(self, v._lazy_value(), [])
return d
def field_str(self, v, strict=False):
if v is None:
if strict:
return ''
return v
if isinstance(v, datetime.datetime):
return v.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(v, datetime.date):
return v.strftime('%Y-%m-%d')
elif isinstance(v, datetime.time):
return v.strftime('%H:%M:%S')
elif isinstance(v, decimal.Decimal):
return str(v)
elif isinstance(v, unicode):
return v.encode(__default_encoding__)
else:
if strict:
return str(v)
return copy.deepcopy(v)
def _get_data(self):
"""
Get the changed property, it'll be used to save the object
"""
if self.id is None or self.id == '':
d = {}
for k, v in self.properties.items():
# if not isinstance(v, ManyToMany):
if v.property_type == 'compound':
continue
if not isinstance(v, ManyToMany):
x = v.get_value_for_datastore(self)
if isinstance(x, Model):
x = x.id
elif x is None or (k=='id' and not x):
if isinstance(v, DateTimeProperty) and v.auto_now_add:
x = v.now()
elif (v.auto_add or (not v.auto and not v.auto_add)):
x = v.default_value()
else:
x = v.get_value_for_datastore(self, cached=True)
if x is not None and not x is Lazy:
d[k] = x
else:
d = {}
d['id'] = self.id
for k, v in self.properties.items():
if v.property_type == 'compound':
continue
t = self._old_values.get(k, None)
if not isinstance(v, ManyToMany):
x = v.get_value_for_datastore(self)
if isinstance(x, Model):
x = x.id
else:
x = v.get_value_for_datastore(self, cached=True)
if t != self.field_str(x) and not x is Lazy:
d[k] = x
return d
def is_saved(self):
return bool(self.id)
def update(self, **data):
for k, v in data.iteritems():
if k in self.properties:
if not isinstance(self.properties[k], ManyToMany):
x = self.properties[k].get_value_for_datastore(self)
if self.field_str(x) != self.field_str(v):
setattr(self, k, v)
else:
setattr(self, k, v)
return self
def save(self, insert=False, changed=None, saved=None,
send_dispatch=True, version=False, version_fieldname=None,
version_exception=True):
"""
If insert=True, then it'll use insert() indead of update()
changed will be callback function, only when the non manytomany properties
are saved, the signature is:
def changed(created, old_data, new_data, obj=None):
if flag is true, then it means the record is changed
you can change new_data, and the new_data will be saved to database
version = Optimistic Concurrency Control
version_fieldname default is 'version'
if check_many, it'll auto check if manytomany value need to save,
only available in UPDATE
"""
_saved = False
created = False
version_fieldname = version_fieldname or 'version'
d = self._get_data()
#fix when d is empty, orm will not insert record bug 2013/04/07
if d or not self.id or insert:
if not self.id or insert:
created = True
old = d.copy()
if get_dispatch_send() and self.__dispatch_enabled__:
dispatch.call(self.__class__, 'pre_save', instance=self, created=True, data=d, old_data=self._old_values, signal=self.tablename)
#process auto_now_add
_manytomany = {}
for k, v in self.properties.items():
if v.property_type == 'compound':
continue
if not isinstance(v, ManyToMany):
if isinstance(v, DateTimeProperty) and v.auto_now_add and k not in d:
d[k] = v.now()
elif (not k in d) and v.auto_add:
d[k] = v.default_value()
else:
if k in d:
_manytomany[k] = d.pop(k)
if d:
if callable(changed):
changed(self, created, self._old_values, d)
old.update(d)
obj = do_(self.table.insert().values(**d), self.get_session())
_saved = True
if obj.inserted_primary_key:
setattr(self, 'id', obj.inserted_primary_key[0])
if _manytomany:
for k, v in _manytomany.items():
if v:
_saved = getattr(self, k).update(v) or _saved
else:
_id = d.pop('id')
if d:
old = d.copy()
if get_dispatch_send() and self.__dispatch_enabled__:
dispatch.call(self.__class__, 'pre_save', instance=self, created=False, data=d, old_data=self._old_values, signal=self.tablename)
#process auto_now
_manytomany = {}
for k, v in self.properties.items():
if v.property_type == 'compound' or k == 'id':
continue
if not isinstance(v, ManyToMany):
if isinstance(v, DateTimeProperty) and v.auto_now and k not in d:
d[k] = v.now()
elif (not k in d) and v.auto:
d[k] = v.default_value()
else:
if k in d:
_manytomany[k] = d.pop(k)
if d:
_cond = self.table.c.id == self.id
if version:
version_field = self.table.c.get(version_fieldname)
if version_field is None:
raise KindError("version_fieldname %s is not existed in Model %s" % (version_fieldname, self.__class__.__name__))
_version_value = getattr(self, version_fieldname, 0)
# setattr(self, version_fieldname, _version_value+1)
d[version_fieldname] = _version_value+1
_cond = (version_field == _version_value) & _cond
if callable(changed):
changed(self, created, self._old_values, d)
old.update(d)
result = do_(self.table.update(_cond).values(**d), self.get_session())
_saved = True
if version:
if result.rowcount != 1:
_saved = False
if version_exception:
raise SaveError("The record %s:%d has been saved by others, current version is %d" % (self.tablename, self.id, _version_value))
else:
setattr(self, version_fieldname, d[version_fieldname])
if _manytomany:
for k, v in _manytomany.items():
if v is not None:
_saved = getattr(self, k).update(v) or _saved
if _saved:
for k, v in d.items():
x = self.properties[k].get_value_for_datastore(self)
if self.field_str(x) != self.field_str(v):
setattr(self, k, v)
if send_dispatch and get_dispatch_send() and self.__dispatch_enabled__:
dispatch.call(self.__class__, 'post_save', instance=self, created=created, data=old, old_data=self._old_values, signal=self.tablename)
self.set_saved()
if callable(saved):
saved(self, created, self._old_values, old)
return _saved
def put(self, *args, **kwargs):
warnings.simplefilter('default')
warnings.warn("put method will be deprecated in next version.", DeprecationWarning)
return self.save(*args, **kwargs)
def delete(self, manytomany=True, delete_fieldname=None, send_dispatch=True,
onetoone=True):
"""
Delete current obj
:param manytomany: if also delete all manytomany relationships
:param delete_fieldname: if True then it'll use 'deleted', others will
be the property name
"""
if get_dispatch_send() and self.__dispatch_enabled__:
dispatch.call(self.__class__, 'pre_delete', instance=self, signal=self.tablename)
if manytomany:
for k, v in self._manytomany.items():
getattr(self, k).clear()
if onetoone:
for k, v in self._onetoone.items():
row = getattr(self, k)
if row:
row.delete()
if delete_fieldname:
if delete_fieldname is True:
delete_fieldname = 'deleted'
if not hasattr(self, delete_fieldname):
raise KeyError("There is no %s property exists" % delete_fieldname)
setattr(self, delete_fieldname, True)
self.save()
else:
do_(self.table.delete(self.table.c.id==self.id), self.get_session())
self.id = None
self._old_values = {}
if send_dispatch and get_dispatch_send() and self.__dispatch_enabled__:
dispatch.call(self.__class__, 'post_delete', instance=self, signal=self.tablename)
def __repr__(self):
s = []
for k, v in self._fields_list:
if not isinstance(v, ManyToMany):
t = getattr(self, k, None)
if isinstance(v, Reference) and t:
s.append('%r:<%s:%d>' % (k, v.__class__.__name__, t.id))
else:
s.append('%r:%r' % (k, t))
if self.__class__._base_class:
clsname = self.__class__._base_class.__name__
else:
clsname = self.__class__.__name__
return ('<%s {' % clsname) + ','.join(s) + '}>'
def __str__(self):
return str(self.id)
def __unicode__(self):
return str(self.id)
def get_display_value(self, field_name, value=None):
return self.properties[field_name].get_display_value(value or getattr(self, field_name))
def get_datastore_value(self, field_name):
return self.properties[field_name].get_value_for_datastore(self)
#classmethod========================================================
@classmethod
def add_property(cls, name, prop, config=True, set_property=True):
if isinstance(prop, Property):
check_reserved_word(name)
#process if there is already the same property
old_prop = cls.properties.get(name)
if old_prop:
prop.creation_counter = old_prop.creation_counter
cls.properties[name] = prop
if config:
prop.__property_config__(cls, name)
if set_property:
setattr(cls, name, prop)
if hasattr(cls, '_fields_list'):
index = -1
for i, (n, p) in enumerate(cls._fields_list):
if name == n:
index = i
break
if index >= 0:
cls._fields_list[index] = (name, prop)
else:
cls._fields_list.append((name, prop))
cls._fields_list.sort(lambda x, y: cmp(x[1].creation_counter, y[1].creation_counter))
else:
raise AttributeError("Prop should be instance of Property, but %r found" % prop)
@classmethod
def update_property(cls, name, prop, config=True, set_property=True):
if isinstance(prop, Property):
old_prop = cls.properties[name]
prop.creation_counter = old_prop.creation_counter
cls.properties[name] = prop
if config:
prop.__property_config__(cls, name)
if set_property:
setattr(cls, name, prop)
if hasattr(cls, '_fields_list'):
index = -1
for i, (n, p) in enumerate(cls._fields_list):
if name == n:
index = i
break
if index >= 0:
cls._fields_list[index] = (name, prop)
else:
raise AttributeError("Prop should be instance of Property, but %r found" % prop)
@classmethod
def get_collection_name(cls, from_class_name, collection_name=None, prefix=None):
"""
Get reference collection_name, if the collection_name is None
then make sure the collection_name is not conflict, but
if the collection_name is not None, then check if the collection_name
is already exists, if existed then raise Exception.
"""
if not collection_name:
collection_name = prefix + '_set'
if hasattr(cls, collection_name):
#if the xxx_set is already existed, then automatically
#create unique collection_set id
collection_name = prefix + '_set_' + str(cls._collection_set_id)
cls._collection_set_id += 1
else:
if collection_name in cls._collection_names:
if cls._collection_names.get(collection_name) != from_class_name:
raise DuplicatePropertyError("Model %s already has property %s" % (cls.__name__, collection_name))
return collection_name
@classmethod
def Reference(cls, name, model, reference_fieldname=None, collection_name=None, **kwargs):
field_from = getattr(cls, name)
if not field_from:
raise AttributeError("Field %s can't be found in Model %s" % (name, cls.tablename))
d = field_from.get_parameters()
d.update(kwargs)
prop = ReferenceProperty(reference_class=model,
reference_fieldname=reference_fieldname,
collection_name=collection_name,
**d)
cls.update_property(name, prop)
@classmethod
def OneToOne(cls, name, model, reference_fieldname=None, collection_name=None, **kwargs):
field_from = getattr(cls, name)
if not field_from:
raise AttributeError("Field %s can't be found in Model %s" % (name, cls.tablename))
d = field_from.get_parameters()
d.update(kwargs)
prop = OneToOne(reference_class=model,
reference_fieldname=reference_fieldname,
collection_name=collection_name,
**d)
cls.update_property(name, prop)
@classmethod
def ManyToMany(cls, name, model, collection_name=None,
reference_fieldname=None, reversed_fieldname=None, required=False,
through=None,
through_reference_fieldname=None, through_reversed_fieldname=None,
**kwargs):
prop = ManyToMany(reference_class=model,
collection_name=collection_name,
reference_fieldname=reference_fieldname,
reversed_fieldname=reversed_fieldname,
through=through,
through_reference_fieldname=through_reference_fieldname,
through_reversed_fieldname=through_reversed_fieldname,
**kwargs)
cls.add_property(name, prop)
#create property, it'll create Table object
prop.create(cls)
#create real table
if __auto_create__:
engine = cls.get_engine().engine
if not prop.through and not prop.table.exists(engine):
prop.table.create(engine, checkfirst=True)
@classmethod
def _set_tablename(cls, appname=None):
if not hasattr(cls, '__tablename__'):
name = get_tablename(cls.__name__)
else:
name = cls.__tablename__
if appname:
name = appname.lower() + '_' + name
cls.tablename = name
@classmethod
def get_session(cls):
if cls._connection:
return cls._connection
return get_session(cls.get_engine_name())
@classmethod
def get_engine_name(cls):
return cls._engine_name or __default_engine__
@classmethod
def get_engine(cls):
ec = cls.get_engine_name()
return engine_manager[ec]
@classmethod
def _use(cls, ec):
"""
underly implement of use
"""
# class ConnectModel(cls):
# pass
ConnectModel = type(cls.__name__, (cls,), {})
ConnectModel.tablename = cls.tablename
ConnectModel._base_class = cls
if isinstance(ec, (str, unicode)):
ConnectModel._engine_name = ec
elif isinstance(ec, Session):
ConnectModel._engine_name = ec.engine_name
ConnectModel._connection = ec
return ConnectModel
@classmethod
def use(cls, ec):
"""
use will duplicate a new Model class and bind ec
ec is Engine name or Sesstion object
"""
if isinstance(ec, (str, unicode)):
m = get_model(cls._alias, ec, signal=False)
else:
m = cls._use(ec)
return m
@classmethod
def bind(cls, metadata=None, auto_create=False, reset=False):
cls._lock.acquire()
try:
cls.metadata = metadata or find_metadata(cls)
if cls.metadata:
cols = []
cls.manytomany = []
#add pre_create process
for k, f in cls._fields_list:
func = getattr(f, 'pre_create', None)
if func:
func(cls)
for k, f in cls._fields_list:
c = f.create(cls)
if c is not None:
cols.append(c)
if not getattr(cls, '__dynamic__', False):
#check the model_path
if cls._base_class:
model_path = cls._base_class.__module__ + '.' + cls._base_class.__name__
else:
model_path = cls.__module__ + '.' + cls.__name__
_path = __models__.get(cls.tablename, {}).get('model_path', '')
if _path and model_path != _path:
return
#check if the table is already existed
t = cls.metadata.tables.get(cls.tablename, None)
if t is not None and not __auto_set_model__ and not reset:
return
if t is not None:
cls.metadata.remove(t)
args = getattr(cls, '__table_args__', {})
args['mysql_charset'] = 'utf8'
cls.table = Table(cls.tablename, cls.metadata, *cols, **args)
#add appname to self.table
appname = cls.__module__
cls.table.__appname__ = appname[:appname.rfind('.')]
#add __mapping_only__ property to Table object
cls.table.__mapping_only__ = getattr(cls, '__mapping_only__', False)
cls.c = cls.table.c
cls.columns = cls.table.c
if hasattr(cls, 'OnInit'):
cls.OnInit()
if auto_create:
#only metadata is and bound
#then the table will be created
#otherwise the creation of tables will be via: create_all(db)
if cls.metadata.bind:
cls.create()
set_model(cls, created=True)
else:
set_model(cls)
else:
if __auto_set_model__:
set_model(cls)
cls._bound_classname = cls._alias
finally:
cls._lock.release()
@classmethod
def create(cls):
cls._c_lock.acquire()
try:
engine = get_connection(cls.get_engine_name())
if not cls.table.exists(engine):
cls.table.create(engine, checkfirst=True)
for x in cls.manytomany:
if not x.exists(engine):
x.create(engine, checkfirst=True)
finally:
cls._c_lock.release()
@classmethod
def get(cls, id=None, condition=None, fields=None, cache=False, engine_name=None, **kwargs):
"""
Get object from Model, if given fields, then only fields will be loaded
into object, other properties will be Lazy
if cache is True or defined __cacheable__=True in Model class, it'll use cache first
"""
if id is None and condition is None:
return None
can_cacheable = (cache or getattr(cls, '__cacheable__', None)) and \
isinstance(id, (int, long, str, unicode))
if can_cacheable:
#send 'get_object' topic to get cached object
obj = dispatch.get(cls, 'get_object', id)
if obj:
return obj
if condition is not None:
_cond = condition
else:
if isinstance(id, (int, long)):
_cond = cls.c.id==id
elif isinstance(id, (str, unicode)) and id.isdigit():
_cond = cls.c.id==int(id)
else:
_cond = id
#if there is no cached object, then just fetch from database
obj = cls.filter(_cond, **kwargs).fields(*(fields or [])).one()
if obj and cache or getattr(cls, '__cacheable__', None):
dispatch.call(cls, 'set_object', instance=obj)
return obj
def put_cached(self):
dispatch.call(self.__class__, 'set_object', instance=self)
@classmethod
def get_or_notfound(cls, condition=None, fields=None):
obj = cls.get(condition, fields=fields)
if not obj:
raise NotFound("Can't found the object", cls, condition)
return obj
@classmethod
def _data_prepare(cls, record):
d = {}
for k, v in record:
p = cls.properties.get(k)
if p and not isinstance(p, ManyToMany):
d[str(k)] = p.make_value_from_datastore(v)
else:
d[str(k)] = v
return d
@classmethod
def all(cls, **kwargs):
return Result(cls, **kwargs)
@classmethod
def filter(cls, *condition, **kwargs):
return Result(cls, **kwargs).filter(*condition)
@classonlymethod
def remove(cls, condition=None, **kwargs):
if isinstance(condition, (int, long)):
condition = cls.c.id==condition
elif isinstance(condition, (tuple, list)):
condition = cls.c.id.in_(condition)
do_(cls.table.delete(condition, **kwargs), cls.get_session())
@classmethod
def count(cls, condition=None, **kwargs):
count = do_(cls.table.count(condition, **kwargs), cls.get_session()).scalar()
return count
@classmethod
def any(cls, *condition, **kwargs):
return Result(cls, **kwargs).filter(*condition).any()
@classmethod
def load(cls, values, set_saved=True, from_='db'):
if isinstance(values, (list, tuple)):
d = cls._data_prepare(values)
elif isinstance(values, dict):
d = values
else:
raise BadValueError("Can't support the data type %r" % values)
# if 'id' not in d or not d['id']:
# raise BadValueError("ID property must be existed or could not be empty.")
o = cls()
o._load(d, use_delay=True, from_=from_)
if set_saved:
o.set_saved()
return o
def refresh(self, fields=None, **kwargs):
"""
Re get the instance of current id
"""
cond = self.c.id==self.id
query = self.filter(cond, **kwargs)
if not fields:
fields = list(self.table.c)
v = query.values_one(*fields)
if not v:
raise NotFound('Instance <%s:%d> can not be found' % (self.tablename, self.id))
d = self._data_prepare(v.items())
self.update(**d)
self.set_saved()
def _load(self, data, use_delay=False, from_='db'):
if not data:
return
#compounds fields will be processed in the end
compounds = []
for prop in self.properties.values():
if from_ == 'db':
name = prop.fieldname
else:
name = prop.name
if name in data:
if prop.property_type == 'compound':
compounds.append(prop)
continue
value = data[name]
if from_ == 'dump':
value = prop.convert_dump(value)
else:
if prop.property_type == 'compound':
continue
# if use_delay or isinstance(prop, ManyToMany):
if use_delay:
value = Lazy
else:
if name != 'id':
value = prop.default_value()
else:
value = None
prop.__set__(self, value)
for prop in compounds:
if from_ == 'db':
name = prop.fieldname
else:
name = prop.name
if name in data:
value = data[name]
prop.__set__(self, value)
def dump(self, fields=None, exclude=None):
"""
Dump current object to dict, but the value is string
for manytomany fields will not automatically be dumpped, only when
they are given in fields parameter
"""
exclude = exclude or []
d = {}
if fields and 'id' not in fields:
fields = list(fields)
fields.append('id')
for k, v in self.properties.items():
if ((not fields) or (k in fields)) and (not exclude or (k not in exclude)):
if not isinstance(v, ManyToMany):
t = v.get_value_for_datastore(self)
if t is Lazy:
self.refresh()
t = v.get_value_for_datastore(self)
if isinstance(t, Model):
t = t.id
d[k] = v.to_str(t)
else:
if fields:
d[k] = ','.join([str(x) for x in getattr(self, v._lazy_value(), [])])
if d and 'id' not in d and 'id' in self.properties:
d['id'] = str(self.id)
return d
@classmethod
def migrate(cls, manytomany=True):
tables = [cls.tablename]
if manytomany:
for x in cls.manytomany:
tables.append(x.name)
migrate_tables(tables, cls.get_engine_name())
@classmethod
def clear_relation(cls):
"""
Clear relation properties for reference Model, such as OneToOne, Reference,
ManyToMany
"""
for k, v in cls.properties.items():
if isinstance(v, ReferenceProperty):
if hasattr(v, 'collection_name') and hasattr(v.reference_class, v.collection_name):
delattr(v.reference_class, v.collection_name)
if isinstance(v, OneToOne):
#append to reference_class._onetoone
del v.reference_class._onetoone[v.collection_name]
@classmethod
def get_columns_info(cls):
for k, v in cls._fields_list:
yield v.to_column_info()
| bsd-2-clause | 3,979,961,714,335,375,000 | 34.868459 | 179 | 0.552089 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.