repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
GTmac/deepwalk
refs/heads/master
setup.py
8
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import sys try: from setuptools import setup except ImportError: from distutils.core import setup readme = open('README.rst').read() history = open('HISTORY.rst').read().replace('.. :changelog:', '') requirements = [ # TODO: put package requirements here ] test_requirements = [ # TODO: put package test requirements here ] setup( name='deepwalk', version='1.0.1', description='DeepWalk online learning of social representations.', long_description=readme + '\n\n' + history, author='Bryan Perozzi', author_email='[email protected]', url='https://github.com/phanein/deepwalk', packages=[ 'deepwalk', ], entry_points={'console_scripts': ['deepwalk = deepwalk.__main__:main']}, package_dir={'deepwalk': 'deepwalk'}, include_package_data=True, install_requires=requirements, license="GPLv3", zip_safe=False, keywords='deepwalk', classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', "Programming Language :: Python :: 2", 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', ], test_suite='tests', tests_require=test_requirements )
woel0007/caravel
refs/heads/master
caravel/lib/python2.7/site-packages/pip/_vendor/progress/__init__.py
916
# Copyright (c) 2012 Giorgos Verigakis <[email protected]> # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from __future__ import division from collections import deque from datetime import timedelta from math import ceil from sys import stderr from time import time __version__ = '1.2' class Infinite(object): file = stderr sma_window = 10 def __init__(self, *args, **kwargs): self.index = 0 self.start_ts = time() self._ts = self.start_ts self._dt = deque(maxlen=self.sma_window) for key, val in kwargs.items(): setattr(self, key, val) def __getitem__(self, key): if key.startswith('_'): return None return getattr(self, key, None) @property def avg(self): return sum(self._dt) / len(self._dt) if self._dt else 0 @property def elapsed(self): return int(time() - self.start_ts) @property def elapsed_td(self): return timedelta(seconds=self.elapsed) def update(self): pass def start(self): pass def finish(self): pass def next(self, n=1): if n > 0: now = time() dt = (now - self._ts) / n self._dt.append(dt) self._ts = now self.index = self.index + n self.update() def iter(self, it): for x in it: yield x self.next() self.finish() class Progress(Infinite): def __init__(self, *args, **kwargs): super(Progress, self).__init__(*args, **kwargs) self.max = kwargs.get('max', 100) @property def eta(self): return int(ceil(self.avg * self.remaining)) @property def eta_td(self): return timedelta(seconds=self.eta) @property def percent(self): return self.progress * 100 @property def progress(self): return min(1, self.index / self.max) @property def remaining(self): return max(self.max - self.index, 0) def start(self): self.update() def goto(self, index): incr = index - self.index self.next(incr) def iter(self, it): try: self.max = len(it) except TypeError: pass for x in it: yield x self.next() self.finish()
vertoe/p2pool-drk
refs/heads/master
p2pool/test/util/test_graph.py
284
import unittest from p2pool.util import graph class Test(unittest.TestCase): def test_keep_largest(self): b = dict(a=1, b=3, c=5, d=7, e=9) assert graph.keep_largest(3, 'squashed')(b) == {'squashed': 9, 'd': 7, 'e': 9} assert graph.keep_largest(3)(b) == {'c': 5, 'd': 7, 'e': 9}
tuomassiren/proofofexistence
refs/heads/master
babel/messages/pofile.py
136
# -*- coding: utf-8 -*- """ babel.messages.pofile ~~~~~~~~~~~~~~~~~~~~~ Reading and writing of files in the ``gettext`` PO (portable object) format. :copyright: (c) 2013 by the Babel Team. :license: BSD, see LICENSE for more details. """ import os import re from babel.messages.catalog import Catalog, Message from babel.util import wraptext from babel._compat import text_type def unescape(string): r"""Reverse `escape` the given string. >>> print unescape('"Say:\\n \\"hello, world!\\"\\n"') Say: "hello, world!" <BLANKLINE> :param string: the string to unescape """ def replace_escapes(match): m = match.group(1) if m == 'n': return '\n' elif m == 't': return '\t' elif m == 'r': return '\r' # m is \ or " return m return re.compile(r'\\([\\trn"])').sub(replace_escapes, string[1:-1]) def denormalize(string): r"""Reverse the normalization done by the `normalize` function. >>> print denormalize(r'''"" ... "Say:\n" ... " \"hello, world!\"\n"''') Say: "hello, world!" <BLANKLINE> >>> print denormalize(r'''"" ... "Say:\n" ... " \"Lorem ipsum dolor sit " ... "amet, consectetur adipisicing" ... " elit, \"\n"''') Say: "Lorem ipsum dolor sit amet, consectetur adipisicing elit, " <BLANKLINE> :param string: the string to denormalize """ if '\n' in string: escaped_lines = string.splitlines() if string.startswith('""'): escaped_lines = escaped_lines[1:] lines = map(unescape, escaped_lines) return ''.join(lines) else: return unescape(string) def read_po(fileobj, locale=None, domain=None, ignore_obsolete=False, charset=None): """Read messages from a ``gettext`` PO (portable object) file from the given file-like object and return a `Catalog`. >>> from datetime import datetime >>> from StringIO import StringIO >>> buf = StringIO(''' ... #: main.py:1 ... #, fuzzy, python-format ... msgid "foo %(name)s" ... msgstr "quux %(name)s" ... ... # A user comment ... #. An auto comment ... #: main.py:3 ... msgid "bar" ... msgid_plural "baz" ... msgstr[0] "bar" ... msgstr[1] "baaz" ... ''') >>> catalog = read_po(buf) >>> catalog.revision_date = datetime(2007, 04, 01) >>> for message in catalog: ... if message.id: ... print (message.id, message.string) ... print ' ', (message.locations, message.flags) ... print ' ', (message.user_comments, message.auto_comments) (u'foo %(name)s', u'quux %(name)s') ([(u'main.py', 1)], set([u'fuzzy', u'python-format'])) ([], []) ((u'bar', u'baz'), (u'bar', u'baaz')) ([(u'main.py', 3)], set([])) ([u'A user comment'], [u'An auto comment']) .. versionadded:: 1.0 Added support for explicit charset argument. :param fileobj: the file-like object to read the PO file from :param locale: the locale identifier or `Locale` object, or `None` if the catalog is not bound to a locale (which basically means it's a template) :param domain: the message domain :param ignore_obsolete: whether to ignore obsolete messages in the input :param charset: the character set of the catalog. """ catalog = Catalog(locale=locale, domain=domain, charset=charset) counter = [0] offset = [0] messages = [] translations = [] locations = [] flags = [] user_comments = [] auto_comments = [] obsolete = [False] context = [] in_msgid = [False] in_msgstr = [False] in_msgctxt = [False] def _add_message(): translations.sort() if len(messages) > 1: msgid = tuple([denormalize(m) for m in messages]) else: msgid = denormalize(messages[0]) if isinstance(msgid, (list, tuple)): string = [] for idx in range(catalog.num_plurals): try: string.append(translations[idx]) except IndexError: string.append((idx, '')) string = tuple([denormalize(t[1]) for t in string]) else: string = denormalize(translations[0][1]) if context: msgctxt = denormalize('\n'.join(context)) else: msgctxt = None message = Message(msgid, string, list(locations), set(flags), auto_comments, user_comments, lineno=offset[0] + 1, context=msgctxt) if obsolete[0]: if not ignore_obsolete: catalog.obsolete[msgid] = message else: catalog[msgid] = message del messages[:]; del translations[:]; del context[:]; del locations[:]; del flags[:]; del auto_comments[:]; del user_comments[:]; obsolete[0] = False counter[0] += 1 def _process_message_line(lineno, line): if line.startswith('msgid_plural'): in_msgid[0] = True msg = line[12:].lstrip() messages.append(msg) elif line.startswith('msgid'): in_msgid[0] = True offset[0] = lineno txt = line[5:].lstrip() if messages: _add_message() messages.append(txt) elif line.startswith('msgstr'): in_msgid[0] = False in_msgstr[0] = True msg = line[6:].lstrip() if msg.startswith('['): idx, msg = msg[1:].split(']', 1) translations.append([int(idx), msg.lstrip()]) else: translations.append([0, msg]) elif line.startswith('msgctxt'): if messages: _add_message() in_msgid[0] = in_msgstr[0] = False context.append(line[7:].lstrip()) elif line.startswith('"'): if in_msgid[0]: messages[-1] += u'\n' + line.rstrip() elif in_msgstr[0]: translations[-1][1] += u'\n' + line.rstrip() elif in_msgctxt[0]: context.append(line.rstrip()) for lineno, line in enumerate(fileobj.readlines()): line = line.strip() if not isinstance(line, text_type): line = line.decode(catalog.charset) if line.startswith('#'): in_msgid[0] = in_msgstr[0] = False if messages and translations: _add_message() if line[1:].startswith(':'): for location in line[2:].lstrip().split(): pos = location.rfind(':') if pos >= 0: try: lineno = int(location[pos + 1:]) except ValueError: continue locations.append((location[:pos], lineno)) elif line[1:].startswith(','): for flag in line[2:].lstrip().split(','): flags.append(flag.strip()) elif line[1:].startswith('~'): obsolete[0] = True _process_message_line(lineno, line[2:].lstrip()) elif line[1:].startswith('.'): # These are called auto-comments comment = line[2:].strip() if comment: # Just check that we're not adding empty comments auto_comments.append(comment) else: # These are called user comments user_comments.append(line[1:].strip()) else: _process_message_line(lineno, line) if messages: _add_message() # No actual messages found, but there was some info in comments, from which # we'll construct an empty header message elif not counter[0] and (flags or user_comments or auto_comments): messages.append(u'') translations.append([0, u'']) _add_message() return catalog WORD_SEP = re.compile('(' r'\s+|' # any whitespace r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w)' # em-dash ')') def escape(string): r"""Escape the given string so that it can be included in double-quoted strings in ``PO`` files. >>> escape('''Say: ... "hello, world!" ... ''') '"Say:\\n \\"hello, world!\\"\\n"' :param string: the string to escape """ return '"%s"' % string.replace('\\', '\\\\') \ .replace('\t', '\\t') \ .replace('\r', '\\r') \ .replace('\n', '\\n') \ .replace('\"', '\\"') def normalize(string, prefix='', width=76): r"""Convert a string into a format that is appropriate for .po files. >>> print normalize('''Say: ... "hello, world!" ... ''', width=None) "" "Say:\n" " \"hello, world!\"\n" >>> print normalize('''Say: ... "Lorem ipsum dolor sit amet, consectetur adipisicing elit, " ... ''', width=32) "" "Say:\n" " \"Lorem ipsum dolor sit " "amet, consectetur adipisicing" " elit, \"\n" :param string: the string to normalize :param prefix: a string that should be prepended to every line :param width: the maximum line width; use `None`, 0, or a negative number to completely disable line wrapping """ if width and width > 0: prefixlen = len(prefix) lines = [] for line in string.splitlines(True): if len(escape(line)) + prefixlen > width: chunks = WORD_SEP.split(line) chunks.reverse() while chunks: buf = [] size = 2 while chunks: l = len(escape(chunks[-1])) - 2 + prefixlen if size + l < width: buf.append(chunks.pop()) size += l else: if not buf: # handle long chunks by putting them on a # separate line buf.append(chunks.pop()) break lines.append(u''.join(buf)) else: lines.append(line) else: lines = string.splitlines(True) if len(lines) <= 1: return escape(string) # Remove empty trailing line if lines and not lines[-1]: del lines[-1] lines[-1] += '\n' return u'""\n' + u'\n'.join([(prefix + escape(l)) for l in lines]) def write_po(fileobj, catalog, width=76, no_location=False, omit_header=False, sort_output=False, sort_by_file=False, ignore_obsolete=False, include_previous=False): r"""Write a ``gettext`` PO (portable object) template file for a given message catalog to the provided file-like object. >>> catalog = Catalog() >>> catalog.add(u'foo %(name)s', locations=[('main.py', 1)], ... flags=('fuzzy',)) <Message...> >>> catalog.add((u'bar', u'baz'), locations=[('main.py', 3)]) <Message...> >>> from io import BytesIO >>> buf = BytesIO() >>> write_po(buf, catalog, omit_header=True) >>> print buf.getvalue() #: main.py:1 #, fuzzy, python-format msgid "foo %(name)s" msgstr "" <BLANKLINE> #: main.py:3 msgid "bar" msgid_plural "baz" msgstr[0] "" msgstr[1] "" <BLANKLINE> <BLANKLINE> :param fileobj: the file-like object to write to :param catalog: the `Catalog` instance :param width: the maximum line width for the generated output; use `None`, 0, or a negative number to completely disable line wrapping :param no_location: do not emit a location comment for every message :param omit_header: do not include the ``msgid ""`` entry at the top of the output :param sort_output: whether to sort the messages in the output by msgid :param sort_by_file: whether to sort the messages in the output by their locations :param ignore_obsolete: whether to ignore obsolete messages and not include them in the output; by default they are included as comments :param include_previous: include the old msgid as a comment when updating the catalog """ def _normalize(key, prefix=''): return normalize(key, prefix=prefix, width=width) def _write(text): if isinstance(text, text_type): text = text.encode(catalog.charset, 'backslashreplace') fileobj.write(text) def _write_comment(comment, prefix=''): # xgettext always wraps comments even if --no-wrap is passed; # provide the same behaviour if width and width > 0: _width = width else: _width = 76 for line in wraptext(comment, _width): _write('#%s %s\n' % (prefix, line.strip())) def _write_message(message, prefix=''): if isinstance(message.id, (list, tuple)): if message.context: _write('%smsgctxt %s\n' % (prefix, _normalize(message.context, prefix))) _write('%smsgid %s\n' % (prefix, _normalize(message.id[0], prefix))) _write('%smsgid_plural %s\n' % ( prefix, _normalize(message.id[1], prefix) )) for idx in range(catalog.num_plurals): try: string = message.string[idx] except IndexError: string = '' _write('%smsgstr[%d] %s\n' % ( prefix, idx, _normalize(string, prefix) )) else: if message.context: _write('%smsgctxt %s\n' % (prefix, _normalize(message.context, prefix))) _write('%smsgid %s\n' % (prefix, _normalize(message.id, prefix))) _write('%smsgstr %s\n' % ( prefix, _normalize(message.string or '', prefix) )) messages = list(catalog) if sort_output: messages.sort() elif sort_by_file: messages.sort(lambda x,y: cmp(x.locations, y.locations)) for message in messages: if not message.id: # This is the header "message" if omit_header: continue comment_header = catalog.header_comment if width and width > 0: lines = [] for line in comment_header.splitlines(): lines += wraptext(line, width=width, subsequent_indent='# ') comment_header = u'\n'.join(lines) _write(comment_header + u'\n') for comment in message.user_comments: _write_comment(comment) for comment in message.auto_comments: _write_comment(comment, prefix='.') if not no_location: locs = u' '.join([u'%s:%d' % (filename.replace(os.sep, '/'), lineno) for filename, lineno in message.locations]) _write_comment(locs, prefix=':') if message.flags: _write('#%s\n' % ', '.join([''] + sorted(message.flags))) if message.previous_id and include_previous: _write_comment('msgid %s' % _normalize(message.previous_id[0]), prefix='|') if len(message.previous_id) > 1: _write_comment('msgid_plural %s' % _normalize( message.previous_id[1] ), prefix='|') _write_message(message) _write('\n') if not ignore_obsolete: for message in catalog.obsolete.values(): for comment in message.user_comments: _write_comment(comment) _write_message(message, prefix='#~ ') _write('\n')
brutkin/commons
refs/heads/master
src/python/twitter/common/dirutil/tail.py
14
# ================================================================================================== # Copyright 2011 Twitter, Inc. # -------------------------------------------------------------------------------------------------- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this work except in compliance with the License. # You may obtain a copy of the License in the LICENSE file, or at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ================================================================================================== __author__ = 'Brian Wickman' import os import errno import time def _tail_lines(fd, linesback=10): if fd is None: return # Contributed to Python Cookbook by Ed Pascoe (2003) avgcharsperline = 75 while True: try: fd.seek(int(-1 * avgcharsperline * linesback), 2) except IOError: fd.seek(0) atstart = fd.tell() == 0 lines = fd.read().splitlines() if atstart or len(lines) > (linesback + 1): break avgcharsperline = avgcharsperline * 1.3 if len(lines) > linesback: start = len(lines) - linesback - 1 else: start = 0 return lines[start:start+linesback] def wait_until_opened(filename, forever=True, clock=time): while True: try: return open(filename, 'r') except OSError as e: if e.errno == errno.ENOENT: if forever: clock.sleep(1) else: return None else: raise def tail(filename, lines=10): with open(filename, 'r') as fp: for line in _tail_lines(fp, lines): yield line def tail_f(filename, forever=True, include_last=False, clock=time): fd = wait_until_opened(filename, forever, clock) # wind back to near the end of the file... last_lines = _tail_lines(fd, 10) while True: if fd is None: return where = fd.tell() if last_lines: yield last_lines.pop(0) continue else: line = fd.readline() if line: yield line else: # check health of the file descriptor. fd_results = os.fstat(fd.fileno()) try: st_results = None st_results = os.stat(filename) except OSError as e: if e.errno == errno.ENOENT: fd = wait_until_opened(filename, forever, clock) continue else: raise # file changed from underneath us, reopen if fd_results.st_ino != st_results.st_ino: fd.close() fd = wait_until_opened(filename, forever, clock) continue if st_results.st_size < where: # file truncated, rewind fd.seek(0) else: # our buffer has not yet caught up, wait. clock.sleep(1) fd.seek(where)
eternalthinker/flask-server-rq-example
refs/heads/master
edge_detect_tests/cv_fgcrop_test.py
1
import os import cv2 import numpy as np from matplotlib import pyplot as plt def detect_all(): srcdir = "images" i = 0 for f in os.listdir(srcdir): if f.endswith(".jpg"): i += 1 print "Processing:", i, f detect_object(srcdir, f) print "END" def detect_object(srcdir, imgname): imgpath = srcdir + os.sep + imgname img = cv2.imread(imgpath, 1) grayimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #grayimg = cv2.Canny(img, 250, 250) _, thresh = cv2.threshold(grayimg, 250, 255, cv2.THRESH_BINARY_INV) contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # Only a few rects need to be processed; the whole content is mostly within these rects # For example, in case of earrings, two separate large rects might be detected. So processing just the # largest 2 rects should give the bounding box of whole content. # Uncomment rectangle() call in below loop to see the rects contours = sorted(contours, key=cv2.contourArea, reverse=True)[:10] print "Processing countours. Count:", len(contours) xmin, ymin, xmax, ymax = img.shape[1], img.shape[0], 0, 0 for c in contours: x, y, w, h = cv2.boundingRect(c) #cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 1) # Draw all processed contour rects if x < xmin: xmin = x if x + w > xmax: xmax = x + w if y < ymin: ymin = y if y + h > ymax: ymax = y + h x, y, w, h = xmin, ymin, xmax - xmin, ymax - ymin crop = img[y:y+h, x:x+w] cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 1) plt.subplot(121), plt.imshow(img, cmap = 'gray') plt.title('Original Image'), plt.xticks([]), plt.yticks([]) plt.subplot(122), plt.imshow(crop, cmap = 'gray') plt.title('Threshold Cropped Image'), plt.xticks([]), plt.yticks([]) destdir = "cropped_threshold/" plt.savefig(destdir + imgname) plt.show() if __name__ == "__main__": #detect_all detect_object('images', 'test.jpg') #detect_object('.', 'egg.jpg')
shanecelis/ode-0.12-drawstuff
refs/heads/master
bindings/python/demos/tutorial2.py
13
#!/usr/bin/env python # http://pyode.sourceforge.net/tutorials/tutorial2.html # pyODE example 2: Connecting bodies with joints # modified by Gideon Klompje (removed literals and using # 'ode.Mass.setSphereTotal' instead of 'ode.Mass.setSphere') import ode import pygame from pygame.locals import QUIT, KEYDOWN # Constants WINDOW_RESOLUTION = (640, 480) DRAW_SCALE = WINDOW_RESOLUTION[0] / 5 """Factor to multiply physical coordinates by to obtain screen size in pixels""" DRAW_OFFSET = (WINDOW_RESOLUTION[0] / 2, 50) """Screen coordinates (in pixels) that map to the physical origin (0, 0, 0)""" BACKGROUND_COLOR = (255, 255, 255) GRAVITY = (0, -9.81, 0) SPHERE1_POSITION = (1, 0, 0) SPHERE1_MASS = 1 SPHERE1_RADIUS = 0.15 SPHERE1_COLOR = (55, 0, 200) SPHERE2_POSITION = (2, 0, 0) SPHERE2_MASS = 1 SPHERE2_RADIUS = 0.15 SPHERE2_COLOR = (55, 0, 200) JOINT1_ANCHOR = (0, 0, 0) JOINT1_COLOR = (200, 0, 55) JOINT1_WIDTH = 2 """Width of the line (in pixels) representing the joint""" JOINT2_ANCHOR = SPHERE1_POSITION JOINT2_COLOR = (200, 0, 55) JOINT2_WIDTH = 2 """Width of the line (in pixels) representing the joint""" TIME_STEP = 0.04 # Utility functions def coord(x, y, integer=False): """ Convert world coordinates to pixel coordinates. Setting 'integer' to True will return integer coordinates. """ xs = (DRAW_OFFSET[0] + DRAW_SCALE*x) ys = (DRAW_OFFSET[1] - DRAW_SCALE*y) if integer: return int(round(xs)), int(round(ys)) else: return xs, ys # Initialize pygame pygame.init() # Open a display screen = pygame.display.set_mode(WINDOW_RESOLUTION) # Create a world object world = ode.World() world.setGravity(GRAVITY) # Create two bodies body1 = ode.Body(world) M = ode.Mass() M.setSphereTotal(SPHERE1_MASS, SPHERE1_RADIUS) body1.setMass(M) body1.setPosition(SPHERE1_POSITION) body2 = ode.Body(world) M = ode.Mass() M.setSphereTotal(SPHERE2_MASS, SPHERE2_RADIUS) body2.setMass(M) body2.setPosition(SPHERE2_POSITION) # Connect body1 with the static environment j1 = ode.BallJoint(world) j1.attach(body1, ode.environment) j1.setAnchor(JOINT1_ANCHOR) # Connect body2 with body1 j2 = ode.BallJoint(world) j2.attach(body1, body2) j2.setAnchor(JOINT2_ANCHOR) # Simulation loop... if __name__ == "__main__": fps = 1.0 / TIME_STEP clk = pygame.time.Clock() sph1_rad = int(DRAW_SCALE * SPHERE1_RADIUS) sph2_rad = int(DRAW_SCALE * SPHERE2_RADIUS) loopFlag = True while loopFlag: for e in pygame.event.get(): if e.type==QUIT: loopFlag=False if e.type==KEYDOWN: loopFlag=False # Clear the screen screen.fill(BACKGROUND_COLOR) # Draw the two bodies and the lines representing the joints x1, y1, z1 = body1.getPosition() x2, y2, z2 = body2.getPosition() xj1, yj1, zj1 = j1.getAnchor() xj2, yj2, zj2 = j2.getAnchor() pygame.draw.line(screen, JOINT1_COLOR, coord(xj1, yj1), coord(x1, y1), JOINT1_WIDTH) pygame.draw.line(screen, JOINT2_COLOR, coord(xj2, yj2), coord(x2, y2), JOINT2_WIDTH) pygame.draw.circle(screen, SPHERE1_COLOR, coord(x1, y1, integer=True), sph1_rad, 0) pygame.draw.circle(screen, SPHERE2_COLOR, coord(x2, y2, integer=True), sph2_rad, 0) pygame.display.flip() # Next simulation step world.step(TIME_STEP) # Try to keep the specified framerate clk.tick(fps)
abdellatifkarroum/odoo
refs/heads/8.0
addons/gamification/wizard/__init__.py
389
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013 OpenERP SA (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import update_goal import grant_badge
kocsenc/problem-of-the-day
refs/heads/master
2014/october/SentenceFinder/finder.py
1
# Kocsen Chung # Sentence Finder # PROBLEM # Given the lipsum text create an array of all the sentences. Then with that data create a function that takes in # a string and each sentence that contains that string. Partial word matches such as "sum" in the word "lipsum" # still counts. Matching should also be case insensitive. def main(): """ Gather sentences split up by . and stripped. Use pythons in to check for this """ filename = "lorem.txt" sentences = [] query = "lorem" with open(filename) as f: sentences = f.read().split(".") found_sentences = [] for sentence in sentences: sentence = sentence.strip() if query.lower() in sentence.lower(): found_sentences.append(sentence) print("Found Sentences with query " + query) print(found_sentences) if __name__ == "__main__": main()
sobjornstad/esc
refs/heads/master
esc/function_loader.py
1
""" function_loader.py - load esc functions from builtins and plugins onto menus """ import importlib import os from pathlib import Path import sys from .oops import ProgrammingError def _import_user_functions(): """ Dynamically import any .py files in the user's esc plugins directory. The plugins directory is just inside the esc config directory (which presently doesn't contain anything else!) This is the first of ~/.esc or $XDG_CONFIG_HOME/share/esc or ~/.config/esc that is found. The files are imported with the esc namespace first on the path, so doing e.g., 'from esc.commands import main_menu' will work automagically, even if esc isn't on the PYTHONPATH. """ xdg_home = os.environ.get('XDG_CONFIG_HOME', str(Path.home() / ".config")) possible_dirs = (Path.home() / ".esc" / "plugins", Path(xdg_home) / "esc" / "plugins") try: config_path = next(i for i in possible_dirs if i.exists() and i.is_dir()) except StopIteration: # no config path, don't import anything return sys.path.insert(0, str(config_path)) for child in sorted(config_path.iterdir()): try: if child.is_file() and child.name.endswith('.py'): mod_name = child.name.rsplit('.', 1)[0] importlib.import_module(mod_name) except Exception as e: raise ProgrammingError( f"Your custom function file '{str(child)}' could not be loaded. " f"Please see the traceback above for details.") from e del sys.path[0] def load_all(): """ Load built-in and user functions files. This will execute the constructors in the functions files, which will (if these files are written correctly) ultimately register the functions onto main_menu. This method needs to be called only once at application startup. """ from . import functions # pylint: disable=unused-import, wrong-import-position _import_user_functions()
hustbeta/openstack-juno-api-adventure
refs/heads/master
examples/keystone/v3/10_create_user.py
1
#!/usr/bin/env python # -*- coding: utf-8 -*- import datetime import json import keystoneclient import keystoneclient.auth.identity.v3 import keystoneclient.exceptions import keystoneclient.session import keystoneclient.v3.client import local_settings def get_unscoped_client(): keystone = keystoneclient.v3.client.Client(auth_url=local_settings.auth_url_v3, username=local_settings.username, password=local_settings.password, unscoped=True) return keystone def list_user_projects(keystone): projects = keystone.projects.list(user=keystone.user_id) return projects def main(): """ 1. 从username、password获取unscoped token 2. 使用unscoped token获取该用户的projects列表 3. 使用该用户的第一个可用project生成scoped token 4. 试试这个scoped token吧! """ keystone = get_unscoped_client() keystone.management_url = local_settings.auth_url_v3 projects = list_user_projects(keystone) auth = keystoneclient.auth.identity.v3.Token(auth_url=local_settings.auth_url_v3, token=keystone.auth_token, project_id=projects[0].id) session = keystoneclient.session.Session(auth=auth) # 注意下面的auth_url是必须的,否则会报错 keystone2 = keystoneclient.client.Client(auth_url=local_settings.auth_url_v3, session=session) now = datetime.datetime.now() name = now.strftime('%Y%m%d%H%M%S') user = keystone2.users.create(name, password='oseasy', email='%[email protected]' % name, description='description...', default_project='b1760fd937764b61a86feff3d4bd42d1') print json.dumps(user.to_dict()) main()
demis001/scikit-bio
refs/heads/master
skbio/stats/ordination/tests/test_ordination.py
3
# ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function import six from six import binary_type, text_type import warnings import unittest import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import numpy.testing as npt import pandas as pd from IPython.core.display import Image, SVG from nose.tools import assert_is_instance, assert_true from scipy.spatial.distance import pdist from skbio import DistanceMatrix from skbio.stats.ordination import ( CA, RDA, CCA, PCoA, OrdinationResults, corr, mean_and_std, assert_ordination_results_equal) from skbio.util import get_data_path def normalize_signs(arr1, arr2): """Change column signs so that "column" and "-column" compare equal. This is needed because results of eigenproblmes can have signs flipped, but they're still right. Notes ===== This function tries hard to make sure that, if you find "column" and "-column" almost equal, calling a function like np.allclose to compare them after calling `normalize_signs` succeeds. To do so, it distinguishes two cases for every column: - It can be all almost equal to 0 (this includes a column of zeros). - Otherwise, it has a value that isn't close to 0. In the first case, no sign needs to be flipped. I.e., for |epsilon| small, np.allclose(-epsilon, 0) is true if and only if np.allclose(epsilon, 0) is. In the second case, the function finds the number in the column whose absolute value is largest. Then, it compares its sign with the number found in the same index, but in the other array, and flips the sign of the column as needed. """ # Let's convert everyting to floating point numbers (it's # reasonable to assume that eigenvectors will already be floating # point numbers). This is necessary because np.array(1) / # np.array(0) != np.array(1.) / np.array(0.) arr1 = np.asarray(arr1, dtype=np.float64) arr2 = np.asarray(arr2, dtype=np.float64) if arr1.shape != arr2.shape: raise ValueError( "Arrays must have the same shape ({0} vs {1}).".format(arr1.shape, arr2.shape) ) # To avoid issues around zero, we'll compare signs of the values # with highest absolute value max_idx = np.abs(arr1).argmax(axis=0) max_arr1 = arr1[max_idx, range(arr1.shape[1])] max_arr2 = arr2[max_idx, range(arr2.shape[1])] sign_arr1 = np.sign(max_arr1) sign_arr2 = np.sign(max_arr2) # Store current warnings, and ignore division by zero (like 1. / # 0.) and invalid operations (like 0. / 0.) wrn = np.seterr(invalid='ignore', divide='ignore') differences = sign_arr1 / sign_arr2 # The values in `differences` can be: # 1 -> equal signs # -1 -> diff signs # Or nan (0/0), inf (nonzero/0), 0 (0/nonzero) np.seterr(**wrn) # Now let's deal with cases where `differences != \pm 1` special_cases = (~np.isfinite(differences)) | (differences == 0) # In any of these cases, the sign of the column doesn't matter, so # let's just keep it differences[special_cases] = 1 return arr1 * differences, arr2 def chi_square_distance(data_table, between_rows=True): """Computes the chi-square distance between two rows or columns of input. It is a measure that has no upper limit, and it excludes double-zeros. Parameters ---------- data_table : 2D array_like An array_like object of shape (n, p). The input must be a frequency table (so that the sum of all cells equals 1, and all values are non-negative). between_rows : bool (defaults to True) Indicates whether distance is computed between rows (default) or columns. Returns ------- Y : ndarray Returns a condensed distance matrix. For each i and j (where i<j<n), the chi square distance between u=X[i] and v=X[j] is computed and stored in `Y[(n choose 2) - (n - i choose 2) + (j - i - 1)]`. See Also -------- scipy.spatial.distance.squareform References ---------- This coefficient appears in Legendre and Legendre (1998) as formula 7.54 (as D_{16}). Another source is http://www.springerreference.com/docs/html/chapterdbid/60817.html """ data_table = np.asarray(data_table, dtype=np.float64) if not np.allclose(data_table.sum(), 1): raise ValueError("Input is not a frequency table: if it is an" " abundance table you could scale it as" " `data_table / data_table.sum()`.") if np.any(data_table < 0): raise ValueError("A frequency table can't have negative values.") # The distances are always computed between the rows of F F = data_table if between_rows else data_table.T row_sums = F.sum(axis=1, keepdims=True) column_sums = F.sum(axis=0) scaled_F = F / (row_sums * np.sqrt(column_sums)) return pdist(scaled_F, 'euclidean') class TestNormalizeSigns(object): def test_shapes_and_nonarray_input(self): with npt.assert_raises(ValueError): normalize_signs([[1, 2], [3, 5]], [[1, 2]]) def test_works_when_different(self): """Taking abs value of everything would lead to false positives.""" a = np.array([[1, -1], [2, 2]]) b = np.array([[-1, -1], [2, 2]]) with npt.assert_raises(AssertionError): npt.assert_equal(*normalize_signs(a, b)) def test_easy_different(self): a = np.array([[1, 2], [3, -1]]) b = np.array([[-1, 2], [-3, -1]]) npt.assert_equal(*normalize_signs(a, b)) def test_easy_already_equal(self): a = np.array([[1, -2], [3, 1]]) b = a.copy() npt.assert_equal(*normalize_signs(a, b)) def test_zeros(self): a = np.array([[0, 3], [0, -1]]) b = np.array([[0, -3], [0, 1]]) npt.assert_equal(*normalize_signs(a, b)) def test_hard(self): a = np.array([[0, 1], [1, 2]]) b = np.array([[0, 1], [-1, 2]]) npt.assert_equal(*normalize_signs(a, b)) def test_harder(self): """We don't want a value that might be negative due to floating point inaccuracies to make a call to allclose in the result to be off.""" a = np.array([[-1e-15, 1], [5, 2]]) b = np.array([[1e-15, 1], [5, 2]]) # Clearly a and b would refer to the same "column # eigenvectors" but a slopppy implementation of # normalize_signs could change the sign of column 0 and make a # comparison fail npt.assert_almost_equal(*normalize_signs(a, b)) def test_column_zeros(self): a = np.array([[0, 1], [0, 2]]) b = np.array([[0, -1], [0, -2]]) npt.assert_equal(*normalize_signs(a, b)) def test_column_almost_zero(self): a = np.array([[1e-15, 3], [-2e-14, -6]]) b = np.array([[0, 3], [-1e-15, -6]]) npt.assert_almost_equal(*normalize_signs(a, b)) class TestChiSquareDistance(object): def test_errors(self): a = np.array([[-0.5, 0], [1, 0.5]]) with npt.assert_raises(ValueError): chi_square_distance(a) b = np.array([[0.5, 0], [0.5, 0.1]]) with npt.assert_raises(ValueError): chi_square_distance(b) def test_results(self): """Some random numbers.""" a = np.array([[0.02808988764, 0.056179775281, 0.084269662921, 0.140449438202], [0.01404494382, 0.196629213483, 0.109550561798, 0.033707865169], [0.02808988764, 0.112359550562, 0.056179775281, 0.140449438202]]) dist = chi_square_distance(a) expected = [0.91413919964333856, 0.33651110106124049, 0.75656884966269089] npt.assert_almost_equal(dist, expected) def test_results2(self): """A tiny example from Legendre & Legendre 1998, p. 285.""" a = np.array([[0, 1, 1], [1, 0, 0], [0, 4, 4]]) dist = chi_square_distance(a / a.sum()) # Note L&L used a terrible calculator because they got a wrong # number (says it's 3.477) :( expected = [3.4785054261852175, 0, 3.4785054261852175] npt.assert_almost_equal(dist, expected) class TestUtils(object): def setup(self): self.x = np.array([[1, 2, 3], [4, 5, 6]]) self.y = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) def test_mean_and_std_no_mean_no_std(self): with npt.assert_raises(ValueError): mean_and_std(self.x, with_mean=False, with_std=False) def test_corr_shape_mismatch(self): with npt.assert_raises(ValueError): corr(self.x, self.y) def test_assert_ordination_results_equal(self): minimal1 = OrdinationResults([1, 2]) # a minimal set of results should be equal to itself assert_ordination_results_equal(minimal1, minimal1) # type mismatch with npt.assert_raises(AssertionError): assert_ordination_results_equal(minimal1, 'foo') # numeric values should be checked that they're almost equal almost_minimal1 = OrdinationResults([1.0000001, 1.9999999]) assert_ordination_results_equal(minimal1, almost_minimal1) # species_ids missing in one, present in the other almost_minimal1.species_ids = ['abc', 'def'] with npt.assert_raises(AssertionError): assert_ordination_results_equal(minimal1, almost_minimal1) almost_minimal1.species_ids = None # site_ids missing in one, present in the other almost_minimal1.site_ids = ['abc', 'def'] with npt.assert_raises(AssertionError): assert_ordination_results_equal(minimal1, almost_minimal1) almost_minimal1.site_ids = None # test each of the optional numeric attributes for attr in ('species', 'site', 'biplot', 'site_constraints', 'proportion_explained'): # missing optional numeric attribute in one, present in the other setattr(almost_minimal1, attr, [[1, 2], [3, 4]]) with npt.assert_raises(AssertionError): assert_ordination_results_equal(minimal1, almost_minimal1) setattr(almost_minimal1, attr, None) # optional numeric attributes present in both, but not almost equal setattr(minimal1, attr, [[1, 2], [3, 4]]) setattr(almost_minimal1, attr, [[1, 2], [3.00002, 4]]) with npt.assert_raises(AssertionError): assert_ordination_results_equal(minimal1, almost_minimal1) setattr(minimal1, attr, None) setattr(almost_minimal1, attr, None) # optional numeric attributes present in both, and almost equal setattr(minimal1, attr, [[1, 2], [3, 4]]) setattr(almost_minimal1, attr, [[1, 2], [3.00000002, 4]]) assert_ordination_results_equal(minimal1, almost_minimal1) setattr(minimal1, attr, None) setattr(almost_minimal1, attr, None) class TestCAResults(object): def setup(self): """Data from table 9.11 in Legendre & Legendre 1998.""" self.X = np.loadtxt(get_data_path('L&L_CA_data')) self.ordination = CA(self.X, ['Site1', 'Site2', 'Site3'], ['Species1', 'Species2', 'Species3']) def test_scaling2(self): scores = self.ordination.scores(scaling=2) # p. 460 L&L 1998 F_hat = np.array([[0.40887, -0.06955], [-0.11539, 0.29977], [-0.30997, -0.18739]]) npt.assert_almost_equal(*normalize_signs(F_hat, scores.species), decimal=5) V_hat = np.array([[-0.84896, -0.88276], [-0.22046, 1.34482], [1.66697, -0.47032]]) npt.assert_almost_equal(*normalize_signs(V_hat, scores.site), decimal=5) def test_scaling1(self): scores = self.ordination.scores(scaling=1) # p. 458 V = np.array([[1.31871, -0.34374], [-0.37215, 1.48150], [-0.99972, -0.92612]]) npt.assert_almost_equal(*normalize_signs(V, scores.species), decimal=5) F = np.array([[-0.26322, -0.17862], [-0.06835, 0.27211], [0.51685, -0.09517]]) npt.assert_almost_equal(*normalize_signs(F, scores.site), decimal=5) def test_maintain_chi_square_distance_scaling1(self): """In scaling 1, chi^2 distance among rows (sites) is equal to euclidean distance between them in transformed space.""" frequencies = self.X / self.X.sum() chi2_distances = chi_square_distance(frequencies) transformed_sites = self.ordination.scores(1).site euclidean_distances = pdist(transformed_sites, 'euclidean') npt.assert_almost_equal(chi2_distances, euclidean_distances) def test_maintain_chi_square_distance_scaling2(self): """In scaling 2, chi^2 distance among columns (species) is equal to euclidean distance between them in transformed space.""" frequencies = self.X / self.X.sum() chi2_distances = chi_square_distance(frequencies, between_rows=False) transformed_species = self.ordination.scores(2).species euclidean_distances = pdist(transformed_species, 'euclidean') npt.assert_almost_equal(chi2_distances, euclidean_distances) class TestCAErrors(object): def test_negative(self): X = np.array([[1, 2], [-0.1, -2]]) with npt.assert_raises(ValueError): CA(X, None, None) class TestRDAErrors(object): def test_shape(self): for n, p, n_, m in [(3, 4, 2, 1), (3, 4, 3, 10)]: Y = np.random.randn(n, p) X = np.random.randn(n_, m) yield npt.assert_raises, ValueError, RDA, Y, X, None, None class TestRDAResults(object): # STATUS: L&L only shows results with scaling 1, and they agree # with vegan's (module multiplying by a constant). I can also # compute scaling 2, agreeing with vegan, but there are no written # results in L&L. def setup(self): """Data from table 11.3 in Legendre & Legendre 1998.""" Y = np.loadtxt(get_data_path('example2_Y')) X = np.loadtxt(get_data_path('example2_X')) self.ordination = RDA(Y, X, ['Site0', 'Site1', 'Site2', 'Site3', 'Site4', 'Site5', 'Site6', 'Site7', 'Site8', 'Site9'], ['Species0', 'Species1', 'Species2', 'Species3', 'Species4', 'Species5']) def test_scaling1(self): scores = self.ordination.scores(1) # Load data as computed with vegan 2.0-8 vegan_species = np.loadtxt(get_data_path( 'example2_species_scaling1_from_vegan')) npt.assert_almost_equal(scores.species, vegan_species, decimal=6) vegan_site = np.loadtxt(get_data_path( 'example2_site_scaling1_from_vegan')) npt.assert_almost_equal(scores.site, vegan_site, decimal=6) def test_scaling2(self): scores = self.ordination.scores(2) # Load data as computed with vegan 2.0-8 vegan_species = np.loadtxt(get_data_path( 'example2_species_scaling2_from_vegan')) npt.assert_almost_equal(scores.species, vegan_species, decimal=6) vegan_site = np.loadtxt(get_data_path( 'example2_site_scaling2_from_vegan')) npt.assert_almost_equal(scores.site, vegan_site, decimal=6) class TestCCAErrors(object): def setup(self): """Data from table 11.3 in Legendre & Legendre 1998.""" self.Y = np.loadtxt(get_data_path('example3_Y')) self.X = np.loadtxt(get_data_path('example3_X')) def test_shape(self): X, Y = self.X, self.Y with npt.assert_raises(ValueError): CCA(Y, X[:-1], None, None) def test_Y_values(self): X, Y = self.X, self.Y Y[0, 0] = -1 with npt.assert_raises(ValueError): CCA(Y, X, None, None) Y[0] = 0 with npt.assert_raises(ValueError): CCA(Y, X, None, None) class TestCCAResults(object): def setup(self): """Data from table 11.3 in Legendre & Legendre 1998 (p. 590). Loaded results as computed with vegan 2.0-8 and compared with table 11.5 if also there.""" Y = np.loadtxt(get_data_path('example3_Y')) X = np.loadtxt(get_data_path('example3_X')) self.ordination = CCA(Y, X[:, :-1], ['Site0', 'Site1', 'Site2', 'Site3', 'Site4', 'Site5', 'Site6', 'Site7', 'Site8', 'Site9'], ['Species0', 'Species1', 'Species2', 'Species3', 'Species4', 'Species5', 'Species6', 'Species7', 'Species8']) def test_scaling1_species(self): scores = self.ordination.scores(1) vegan_species = np.loadtxt(get_data_path( 'example3_species_scaling1_from_vegan')) npt.assert_almost_equal(scores.species, vegan_species, decimal=6) def test_scaling1_site(self): scores = self.ordination.scores(1) vegan_site = np.loadtxt(get_data_path( 'example3_site_scaling1_from_vegan')) npt.assert_almost_equal(scores.site, vegan_site, decimal=4) def test_scaling2_species(self): scores = self.ordination.scores(2) vegan_species = np.loadtxt(get_data_path( 'example3_species_scaling2_from_vegan')) npt.assert_almost_equal(scores.species, vegan_species, decimal=5) def test_scaling2_site(self): scores = self.ordination.scores(2) vegan_site = np.loadtxt(get_data_path( 'example3_site_scaling2_from_vegan')) npt.assert_almost_equal(scores.site, vegan_site, decimal=4) class TestPCoAResults(object): def setup(self): """Sample data set from page 111 of W.J Krzanowski. Principles of multivariate analysis, 2000, Oxford University Press.""" matrix = np.loadtxt(get_data_path('PCoA_sample_data')) dist_matrix = DistanceMatrix(matrix, map(str, range(matrix.shape[0]))) self.dist_matrix = dist_matrix def test_negative_eigenvalue_warning(self): """This data has some small negative eigenvalues.""" npt.assert_warns(RuntimeWarning, PCoA, self.dist_matrix) def test_values(self): """Adapted from cogent's `test_principal_coordinate_analysis`: "I took the example in the book (see intro info), and did the principal coordinates analysis, plotted the data and it looked right".""" with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=RuntimeWarning) ordination = PCoA(self.dist_matrix) scores = ordination.scores() exp_eigvals = np.array([0.73599103, 0.26260032, 0.14926222, 0.06990457, 0.02956972, 0.01931184, 0., 0., 0., 0., 0., 0., 0., 0.]) exp_site = np.loadtxt(get_data_path('exp_PCoAzeros_site')) exp_prop_expl = np.array([0.58105792, 0.20732046, 0.1178411, 0.05518899, 0.02334502, 0.01524651, 0., 0., 0., 0., 0., 0., 0., 0.]) exp_site_ids = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13'] # Note the absolute value because column can have signs swapped npt.assert_almost_equal(scores.eigvals, exp_eigvals) npt.assert_almost_equal(np.abs(scores.site), exp_site) npt.assert_almost_equal(scores.proportion_explained, exp_prop_expl) npt.assert_equal(scores.site_ids, exp_site_ids) class TestPCoAResultsExtensive(object): def setup(self): matrix = np.loadtxt(get_data_path('PCoA_sample_data_2')) self.ids = [str(i) for i in range(matrix.shape[0])] dist_matrix = DistanceMatrix(matrix, self.ids) self.ordination = PCoA(dist_matrix) def test_values(self): results = self.ordination.scores() npt.assert_equal(len(results.eigvals), len(results.site[0])) expected = np.array([[-0.028597, 0.22903853, 0.07055272, 0.26163576, 0.28398669, 0.0], [0.37494056, 0.22334055, -0.20892914, 0.05057395, -0.18710366, 0.0], [-0.33517593, -0.23855979, -0.3099887, 0.11521787, -0.05021553, 0.0], [0.25412394, -0.4123464, 0.23343642, 0.06403168, -0.00482608, 0.0], [-0.28256844, 0.18606911, 0.28875631, -0.06455635, -0.21141632, 0.0], [0.01727687, 0.012458, -0.07382761, -0.42690292, 0.1695749, 0.0]]) npt.assert_almost_equal(*normalize_signs(expected, results.site)) expected = np.array([0.3984635, 0.36405689, 0.28804535, 0.27479983, 0.19165361, 0.0]) npt.assert_almost_equal(results.eigvals, expected) expected = np.array([0.2626621381, 0.2399817314, 0.1898758748, 0.1811445992, 0.1263356565, 0.0]) npt.assert_almost_equal(results.proportion_explained, expected) npt.assert_equal(results.site_ids, self.ids) class TestPCoAEigenResults(object): def setup(self): dist_matrix = DistanceMatrix.read(get_data_path('PCoA_sample_data_3')) self.ordination = PCoA(dist_matrix) self.ids = ['PC.636', 'PC.635', 'PC.356', 'PC.481', 'PC.354', 'PC.593', 'PC.355', 'PC.607', 'PC.634'] def test_values(self): results = self.ordination.scores() npt.assert_almost_equal(len(results.eigvals), len(results.site[0])) expected = np.loadtxt(get_data_path('exp_PCoAEigenResults_site')) npt.assert_almost_equal(*normalize_signs(expected, results.site)) expected = np.array([0.51236726, 0.30071909, 0.26791207, 0.20898868, 0.19169895, 0.16054235, 0.15017696, 0.12245775, 0.0]) npt.assert_almost_equal(results.eigvals, expected) expected = np.array([0.2675738328, 0.157044696, 0.1399118638, 0.1091402725, 0.1001110485, 0.0838401162, 0.0784269939, 0.0639511764, 0.0]) npt.assert_almost_equal(results.proportion_explained, expected) npt.assert_equal(results.site_ids, self.ids) class TestPCoAPrivateMethods(object): def setup(self): self.matrix = np.arange(1, 7).reshape(2, 3) self.matrix2 = np.arange(1, 10).reshape(3, 3) def test_E_matrix(self): E = PCoA._E_matrix(self.matrix) expected_E = np.array([[-0.5, -2., -4.5], [-8., -12.5, -18.]]) npt.assert_almost_equal(E, expected_E) def test_F_matrix(self): F = PCoA._F_matrix(self.matrix2) expected_F = np.zeros((3, 3)) # Note that `test_make_F_matrix` in cogent is wrong npt.assert_almost_equal(F, expected_F) class TestPCoAErrors(object): def test_input(self): with npt.assert_raises(TypeError): PCoA([[1, 2], [3, 4]]) class TestOrdinationResults(unittest.TestCase): def setUp(self): # Define in-memory CA results to serialize and deserialize. eigvals = np.array([0.0961330159181, 0.0409418140138]) species = np.array([[0.408869425742, 0.0695518116298], [-0.1153860437, -0.299767683538], [-0.309967102571, 0.187391917117]]) site = np.array([[-0.848956053187, 0.882764759014], [-0.220458650578, -1.34482000302], [1.66697179591, 0.470324389808]]) biplot = None site_constraints = None prop_explained = None species_ids = ['Species1', 'Species2', 'Species3'] site_ids = ['Site1', 'Site2', 'Site3'] self.ordination_results = OrdinationResults( eigvals=eigvals, species=species, site=site, biplot=biplot, site_constraints=site_constraints, proportion_explained=prop_explained, species_ids=species_ids, site_ids=site_ids) # DataFrame for testing plot method. Has a categorical column with a # mix of numbers and strings. Has a numeric column with a mix of ints, # floats, and strings that can be converted to floats. Has a numeric # column with missing data (np.nan). self.df = pd.DataFrame([['foo', '42', 10], [22, 0, 8], [22, -4.2, np.nan], ['foo', '42.19', 11]], index=['A', 'B', 'C', 'D'], columns=['categorical', 'numeric', 'nancolumn']) # Minimal ordination results for easier testing of plotting method. # Paired with df above. eigvals = np.array([0.50, 0.25, 0.25]) site = np.array([[0.1, 0.2, 0.3], [0.2, 0.3, 0.4], [0.3, 0.4, 0.5], [0.4, 0.5, 0.6]]) self.min_ord_results = OrdinationResults(eigvals=eigvals, site=site, site_ids=['A', 'B', 'C', 'D']) def test_str(self): exp = ("Ordination results:\n" "\tEigvals: 2\n" "\tProportion explained: N/A\n" "\tSpecies: 3x2\n" "\tSite: 3x2\n" "\tBiplot: N/A\n" "\tSite constraints: N/A\n" "\tSpecies IDs: 'Species1', 'Species2', 'Species3'\n" "\tSite IDs: 'Site1', 'Site2', 'Site3'") obs = str(self.ordination_results) self.assertEqual(obs, exp) # all optional attributes missing exp = ("Ordination results:\n" "\tEigvals: 1\n" "\tProportion explained: N/A\n" "\tSpecies: N/A\n" "\tSite: N/A\n" "\tBiplot: N/A\n" "\tSite constraints: N/A\n" "\tSpecies IDs: N/A\n" "\tSite IDs: N/A") obs = str(OrdinationResults(np.array([4.2]))) self.assertEqual(obs, exp) def check_basic_figure_sanity(self, fig, exp_num_subplots, exp_title, exp_legend_exists, exp_xlabel, exp_ylabel, exp_zlabel): # check type assert_is_instance(fig, mpl.figure.Figure) # check number of subplots axes = fig.get_axes() npt.assert_equal(len(axes), exp_num_subplots) # check title ax = axes[0] npt.assert_equal(ax.get_title(), exp_title) # shouldn't have tick labels for tick_label in (ax.get_xticklabels() + ax.get_yticklabels() + ax.get_zticklabels()): npt.assert_equal(tick_label.get_text(), '') # check if legend is present legend = ax.get_legend() if exp_legend_exists: assert_true(legend is not None) else: assert_true(legend is None) # check axis labels npt.assert_equal(ax.get_xlabel(), exp_xlabel) npt.assert_equal(ax.get_ylabel(), exp_ylabel) npt.assert_equal(ax.get_zlabel(), exp_zlabel) def test_plot_no_metadata(self): fig = self.min_ord_results.plot() self.check_basic_figure_sanity(fig, 1, '', False, '0', '1', '2') def test_plot_with_numeric_metadata_and_plot_options(self): fig = self.min_ord_results.plot( self.df, 'numeric', axes=(1, 0, 2), axis_labels=['PC 2', 'PC 1', 'PC 3'], title='a title', cmap='Reds') self.check_basic_figure_sanity( fig, 2, 'a title', False, 'PC 2', 'PC 1', 'PC 3') def test_plot_with_categorical_metadata_and_plot_options(self): fig = self.min_ord_results.plot( self.df, 'categorical', axes=[2, 0, 1], title='a title', cmap='Accent') self.check_basic_figure_sanity(fig, 1, 'a title', True, '2', '0', '1') def test_plot_with_invalid_axis_labels(self): with six.assertRaisesRegex(self, ValueError, 'axis_labels.*4'): self.min_ord_results.plot(axes=[2, 0, 1], axis_labels=('a', 'b', 'c', 'd')) def test_validate_plot_axes_valid_input(self): # shouldn't raise an error on valid input. nothing is returned, so # nothing to check here self.min_ord_results._validate_plot_axes(self.min_ord_results.site.T, (1, 2, 0)) def test_validate_plot_axes_invalid_input(self): # not enough dimensions with six.assertRaisesRegex(self, ValueError, '2 dimension\(s\)'): self.min_ord_results._validate_plot_axes( np.asarray([[0.1, 0.2, 0.3], [0.2, 0.3, 0.4]]), (0, 1, 2)) coord_matrix = self.min_ord_results.site.T # wrong number of axes with six.assertRaisesRegex(self, ValueError, 'exactly three.*found 0'): self.min_ord_results._validate_plot_axes(coord_matrix, []) with six.assertRaisesRegex(self, ValueError, 'exactly three.*found 4'): self.min_ord_results._validate_plot_axes(coord_matrix, (0, 1, 2, 3)) # duplicate axes with six.assertRaisesRegex(self, ValueError, 'must be unique'): self.min_ord_results._validate_plot_axes(coord_matrix, (0, 1, 0)) # out of range axes with six.assertRaisesRegex(self, ValueError, 'axes\[1\].*3'): self.min_ord_results._validate_plot_axes(coord_matrix, (0, -1, 2)) with six.assertRaisesRegex(self, ValueError, 'axes\[2\].*3'): self.min_ord_results._validate_plot_axes(coord_matrix, (0, 2, 3)) def test_get_plot_point_colors_invalid_input(self): # column provided without df with npt.assert_raises(ValueError): self.min_ord_results._get_plot_point_colors(None, 'numeric', ['B', 'C'], 'jet') # df provided without column with npt.assert_raises(ValueError): self.min_ord_results._get_plot_point_colors(self.df, None, ['B', 'C'], 'jet') # column not in df with six.assertRaisesRegex(self, ValueError, 'missingcol'): self.min_ord_results._get_plot_point_colors(self.df, 'missingcol', ['B', 'C'], 'jet') # id not in df with six.assertRaisesRegex(self, ValueError, 'numeric'): self.min_ord_results._get_plot_point_colors( self.df, 'numeric', ['B', 'C', 'missingid', 'A'], 'jet') # missing data in df with six.assertRaisesRegex(self, ValueError, 'nancolumn'): self.min_ord_results._get_plot_point_colors(self.df, 'nancolumn', ['B', 'C', 'A'], 'jet') def test_get_plot_point_colors_no_df_or_column(self): obs = self.min_ord_results._get_plot_point_colors(None, None, ['B', 'C'], 'jet') npt.assert_equal(obs, (None, None)) def test_get_plot_point_colors_numeric_column(self): # subset of the ids in df exp = [0.0, -4.2, 42.0] obs = self.min_ord_results._get_plot_point_colors( self.df, 'numeric', ['B', 'C', 'A'], 'jet') npt.assert_almost_equal(obs[0], exp) assert_true(obs[1] is None) # all ids in df exp = [0.0, 42.0, 42.19, -4.2] obs = self.min_ord_results._get_plot_point_colors( self.df, 'numeric', ['B', 'A', 'D', 'C'], 'jet') npt.assert_almost_equal(obs[0], exp) assert_true(obs[1] is None) def test_get_plot_point_colors_categorical_column(self): # subset of the ids in df exp_colors = [[0., 0., 0.5, 1.], [0., 0., 0.5, 1.], [0.5, 0., 0., 1.]] exp_color_dict = { 'foo': [0.5, 0., 0., 1.], 22: [0., 0., 0.5, 1.] } obs = self.min_ord_results._get_plot_point_colors( self.df, 'categorical', ['B', 'C', 'A'], 'jet') npt.assert_almost_equal(obs[0], exp_colors) npt.assert_equal(obs[1], exp_color_dict) # all ids in df exp_colors = [[0., 0., 0.5, 1.], [0.5, 0., 0., 1.], [0.5, 0., 0., 1.], [0., 0., 0.5, 1.]] obs = self.min_ord_results._get_plot_point_colors( self.df, 'categorical', ['B', 'A', 'D', 'C'], 'jet') npt.assert_almost_equal(obs[0], exp_colors) # should get same color dict as before npt.assert_equal(obs[1], exp_color_dict) def test_plot_categorical_legend(self): fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # we shouldn't have a legend yet assert_true(ax.get_legend() is None) self.min_ord_results._plot_categorical_legend( ax, {'foo': 'red', 'bar': 'green'}) # make sure we have a legend now legend = ax.get_legend() assert_true(legend is not None) # do some light sanity checking to make sure our input labels and # colors are present. we're not using nose.tools.assert_items_equal # because it isn't available in Python 3. labels = [t.get_text() for t in legend.get_texts()] npt.assert_equal(sorted(labels), ['bar', 'foo']) colors = [l.get_color() for l in legend.get_lines()] npt.assert_equal(sorted(colors), ['green', 'red']) def test_repr_png(self): obs = self.min_ord_results._repr_png_() assert_is_instance(obs, binary_type) assert_true(len(obs) > 0) def test_repr_svg(self): obs = self.min_ord_results._repr_svg_() # print_figure(format='svg') can return text or bytes depending on the # version of IPython assert_true(isinstance(obs, text_type) or isinstance(obs, binary_type)) assert_true(len(obs) > 0) def test_png(self): assert_is_instance(self.min_ord_results.png, Image) def test_svg(self): assert_is_instance(self.min_ord_results.svg, SVG) if __name__ == '__main__': import nose nose.runmodule()
laurenrevere/osf.io
refs/heads/develop
admin_tests/pre_reg/utils.py
2
from osf.models import DraftRegistration, MetaSchema def draft_reg_util(): DraftRegistration.objects.all().delete() return MetaSchema.objects.get(name='Prereg Challenge', schema_version=2) SCHEMA_DATA = { 'q20': { 'comments': [], 'value': 'The Effect of sugar on brownie tastiness does not require any additional transformations. However, if it were using a regression analysis and each level of sweet had been categorically described (e.g. not sweet, somewhat sweet, sweet, and very sweet), sweet could be dummy coded with not sweet as the reference category.', 'extra': {} }, 'q21': { 'comments': [], 'value': 'If the the ANOVA indicates that the mean taste perceptions are significantly different (p&lt;.05), then we will use a Tukey-Kramer HSD test to conduct all possible pairwise comparison.', 'extra': {} }, 'q22': { 'comments': [], 'value': 'We will use the standard p&lt;.05 criteria for determining if the ANOVA and the post hoc test suggest that the results are significantly different from those expected if the null hypothesis were correct. The post-hoc Tukey-Kramer test adjusts for multiple comparisons.', 'extra': {} }, 'q23': { 'comments': [], 'value': 'No checks will be performed to determine eligibility for inclusion besides verification that each subject answered each of the three tastiness indices. Outliers will be included in the analysis', 'extra': {} }, 'q24': { 'comments': [], 'value': 'If a subject does not complete any of the three indices of tastiness, that subject will not be included in the analysis.', 'extra': {} }, 'q25': { 'comments': [], 'value': '', 'extra': {} }, 'q26': { 'comments': [], 'value': 'sugar_taste.R', 'extra': {} }, 'q27': { 'comments': [], 'value': '', 'extra': {} }, 'q1': { 'comments': [], 'value': 'Effect of sugar on brownie tastiness', 'extra': {} }, 'q3': { 'comments': [], 'value': 'Though there is strong evidence to suggest that sugar affects taste preferences, the effect has never been demonstrated in brownies. Therefore, we will measure taste preference for four different levels of sugar concentration in a standard brownie recipe to determine if the effect exists in this pastry. ', 'extra': {} }, 'q2': { 'comments': [], 'value': 'David Mellor, Jolene Esposito', 'extra': {} }, 'q5': { 'comments': [], 'value': 'Registration prior to creation of data: As of the date of submission of this research plan for preregistration, the data have not yet been collected, created, or realized.', 'extra': {} }, 'q4': { 'comments': [], 'value': 'If taste affects preference, then mean preference indices will be higher with higher concentrations of sugar.', 'extra': {} }, 'q7': { 'comments': [], 'value': { 'question': { 'comments': [], 'value': 'Participants will be recruited through advertisements at local pastry shops. Participants will be paid $10 for agreeing to participate (raised to $30 if our sample size is not reached within 15 days of beginning recruitment). Participants must be at least 18 years old and be able to eat the ingredients of the pastries.', 'extra': {} }, 'uploader16': { 'comments': [], 'value': '', 'extra': {} } }, 'extra': {} }, 'q6': { 'comments': [], 'value': 'Data do not yet exist', 'extra': {} }, 'q9': { 'comments': [], 'value': 'We used the software program G*Power to conduct a power analysis. Our goal was to obtain .95 power to detect a medium effect size of .25 at the standard .05 alpha error probability. ', 'extra': {} }, 'q8': { 'comments': [], 'value': 'Our target sample size is 280 participants. We will attempt to recruit up to 320, assuming that not all will complete the total task. ', 'extra': {} }, 'q15': { 'comments': [], 'value': [ 'For studies that involve human subjects, they will not know the treatment group to which they have been assigned.'], 'extra': {} }, 'q14': { 'comments': [], 'value': 'Experiment - A researcher randomly assigns treatments to study subjects, this includes field or lab experiments. This is also known as an intervention experiment and includes randomized controlled trials.', 'extra': {} }, 'q17': { 'comments': [], 'value': 'We will use block randomization, where each participant will be randomly assigned to one of the four equally sized, predetermined blocks. The random number list used to create these four blocks will be created using the web applications available at http://random.org. ', 'extra': {} }, 'q16': { 'comments': [], 'value': { 'question': { 'comments': [], 'value': 'We have a between subjects design with 1 factor (sugar by mass) with 4 levels. ', 'extra': {} }, 'uploader16': { 'comments': [], 'value': '', 'extra': {} } }, 'extra': {} }, 'q11': { 'comments': [], 'value': 'We manipulated the percentage of sugar by mass added to brownies. The four levels of this categorical variable are: 15%, 20%, 25%, or 40% cane sugar by mass. ', 'extra': {} }, 'q10': { 'comments': [], 'value': 'We will post participant sign-up slots by week on the preceding Friday night, with 20 spots posted per week. We will post 20 new slots each week if, on that Friday night, we are below 320 participants. ', 'extra': {} }, 'q13': { 'comments': [], 'value': 'We will take the mean of the two questions above to create a single measure of brownie enjoyment.', 'extra': {} }, 'q12': { 'comments': [], 'value': 'The single outcome variable will be the perceived tastiness of the single brownie each participant will eat. We will measure this by asking participants How much did you enjoy eating the brownie (on a scale of 1-7, 1 being not at all, 7 being a great deal) and How good did the brownie taste (on a scale of 1-7, 1 being very bad, 7 being very good). ', 'extra': {} }, 'q19': { 'comments': [], 'value': { 'q19a': { 'value': "We will use a one-way between subjects ANOVA to analyze our results. The manipulated, categorical independent variable is 'sugar' whereas the dependent variable is our taste index. ", 'extra': {} }, 'uploader19': { 'value': '', 'extra': {} }, }, 'extra': {} } }
nwokeo/supysonic
refs/heads/master
venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/specifiers.py
1107
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import abc import functools import itertools import re from ._compat import string_types, with_metaclass from .version import Version, LegacyVersion, parse class InvalidSpecifier(ValueError): """ An invalid specifier was found, users should refer to PEP 440. """ class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): @abc.abstractmethod def __str__(self): """ Returns the str representation of this Specifier like object. This should be representative of the Specifier itself. """ @abc.abstractmethod def __hash__(self): """ Returns a hash value for this Specifier like object. """ @abc.abstractmethod def __eq__(self, other): """ Returns a boolean representing whether or not the two Specifier like objects are equal. """ @abc.abstractmethod def __ne__(self, other): """ Returns a boolean representing whether or not the two Specifier like objects are not equal. """ @abc.abstractproperty def prereleases(self): """ Returns whether or not pre-releases as a whole are allowed by this specifier. """ @prereleases.setter def prereleases(self, value): """ Sets whether or not pre-releases as a whole are allowed by this specifier. """ @abc.abstractmethod def contains(self, item, prereleases=None): """ Determines if the given item is contained within this specifier. """ @abc.abstractmethod def filter(self, iterable, prereleases=None): """ Takes an iterable of items and filters them so that only items which are contained within this specifier are allowed in it. """ class _IndividualSpecifier(BaseSpecifier): _operators = {} def __init__(self, spec="", prereleases=None): match = self._regex.search(spec) if not match: raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec)) self._spec = ( match.group("operator").strip(), match.group("version").strip(), ) # Store whether or not this Specifier should accept prereleases self._prereleases = prereleases def __repr__(self): pre = ( ", prereleases={0!r}".format(self.prereleases) if self._prereleases is not None else "" ) return "<{0}({1!r}{2})>".format( self.__class__.__name__, str(self), pre, ) def __str__(self): return "{0}{1}".format(*self._spec) def __hash__(self): return hash(self._spec) def __eq__(self, other): if isinstance(other, string_types): try: other = self.__class__(other) except InvalidSpecifier: return NotImplemented elif not isinstance(other, self.__class__): return NotImplemented return self._spec == other._spec def __ne__(self, other): if isinstance(other, string_types): try: other = self.__class__(other) except InvalidSpecifier: return NotImplemented elif not isinstance(other, self.__class__): return NotImplemented return self._spec != other._spec def _get_operator(self, op): return getattr(self, "_compare_{0}".format(self._operators[op])) def _coerce_version(self, version): if not isinstance(version, (LegacyVersion, Version)): version = parse(version) return version @property def operator(self): return self._spec[0] @property def version(self): return self._spec[1] @property def prereleases(self): return self._prereleases @prereleases.setter def prereleases(self, value): self._prereleases = value def __contains__(self, item): return self.contains(item) def contains(self, item, prereleases=None): # Determine if prereleases are to be allowed or not. if prereleases is None: prereleases = self.prereleases # Normalize item to a Version or LegacyVersion, this allows us to have # a shortcut for ``"2.0" in Specifier(">=2") item = self._coerce_version(item) # Determine if we should be supporting prereleases in this specifier # or not, if we do not support prereleases than we can short circuit # logic if this version is a prereleases. if item.is_prerelease and not prereleases: return False # Actually do the comparison to determine if this item is contained # within this Specifier or not. return self._get_operator(self.operator)(item, self.version) def filter(self, iterable, prereleases=None): yielded = False found_prereleases = [] kw = {"prereleases": prereleases if prereleases is not None else True} # Attempt to iterate over all the values in the iterable and if any of # them match, yield them. for version in iterable: parsed_version = self._coerce_version(version) if self.contains(parsed_version, **kw): # If our version is a prerelease, and we were not set to allow # prereleases, then we'll store it for later incase nothing # else matches this specifier. if (parsed_version.is_prerelease and not (prereleases or self.prereleases)): found_prereleases.append(version) # Either this is not a prerelease, or we should have been # accepting prereleases from the begining. else: yielded = True yield version # Now that we've iterated over everything, determine if we've yielded # any values, and if we have not and we have any prereleases stored up # then we will go ahead and yield the prereleases. if not yielded and found_prereleases: for version in found_prereleases: yield version class LegacySpecifier(_IndividualSpecifier): _regex_str = ( r""" (?P<operator>(==|!=|<=|>=|<|>)) \s* (?P<version> [^,;\s)]* # Since this is a "legacy" specifier, and the version # string can be just about anything, we match everything # except for whitespace, a semi-colon for marker support, # a closing paren since versions can be enclosed in # them, and a comma since it's a version separator. ) """ ) _regex = re.compile( r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) _operators = { "==": "equal", "!=": "not_equal", "<=": "less_than_equal", ">=": "greater_than_equal", "<": "less_than", ">": "greater_than", } def _coerce_version(self, version): if not isinstance(version, LegacyVersion): version = LegacyVersion(str(version)) return version def _compare_equal(self, prospective, spec): return prospective == self._coerce_version(spec) def _compare_not_equal(self, prospective, spec): return prospective != self._coerce_version(spec) def _compare_less_than_equal(self, prospective, spec): return prospective <= self._coerce_version(spec) def _compare_greater_than_equal(self, prospective, spec): return prospective >= self._coerce_version(spec) def _compare_less_than(self, prospective, spec): return prospective < self._coerce_version(spec) def _compare_greater_than(self, prospective, spec): return prospective > self._coerce_version(spec) def _require_version_compare(fn): @functools.wraps(fn) def wrapped(self, prospective, spec): if not isinstance(prospective, Version): return False return fn(self, prospective, spec) return wrapped class Specifier(_IndividualSpecifier): _regex_str = ( r""" (?P<operator>(~=|==|!=|<=|>=|<|>|===)) (?P<version> (?: # The identity operators allow for an escape hatch that will # do an exact string match of the version you wish to install. # This will not be parsed by PEP 440 and we cannot determine # any semantic meaning from it. This operator is discouraged # but included entirely as an escape hatch. (?<====) # Only match for the identity operator \s* [^\s]* # We just match everything, except for whitespace # since we are only testing for strict identity. ) | (?: # The (non)equality operators allow for wild card and local # versions to be specified so we have to define these two # operators separately to enable that. (?<===|!=) # Only match for equals and not equals \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)* # release (?: # pre release [-_\.]? (a|b|c|rc|alpha|beta|pre|preview) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? # You cannot use a wild card and a dev or local version # together so group them with a | and make them optional. (?: (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local | \.\* # Wild card syntax of .* )? ) | (?: # The compatible operator requires at least two digits in the # release segment. (?<=~=) # Only match for the compatible operator \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) (?: # pre release [-_\.]? (a|b|c|rc|alpha|beta|pre|preview) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release ) | (?: # All other operators only allow a sub set of what the # (non)equality operators do. Specifically they do not allow # local versions to be specified nor do they allow the prefix # matching wild cards. (?<!==|!=|~=) # We have special cases for these # operators so we want to make sure they # don't match here. \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)* # release (?: # pre release [-_\.]? (a|b|c|rc|alpha|beta|pre|preview) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release ) ) """ ) _regex = re.compile( r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) _operators = { "~=": "compatible", "==": "equal", "!=": "not_equal", "<=": "less_than_equal", ">=": "greater_than_equal", "<": "less_than", ">": "greater_than", "===": "arbitrary", } @_require_version_compare def _compare_compatible(self, prospective, spec): # Compatible releases have an equivalent combination of >= and ==. That # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to # implement this in terms of the other specifiers instead of # implementing it ourselves. The only thing we need to do is construct # the other specifiers. # We want everything but the last item in the version, but we want to # ignore post and dev releases and we want to treat the pre-release as # it's own separate segment. prefix = ".".join( list( itertools.takewhile( lambda x: (not x.startswith("post") and not x.startswith("dev")), _version_split(spec), ) )[:-1] ) # Add the prefix notation to the end of our string prefix += ".*" return (self._get_operator(">=")(prospective, spec) and self._get_operator("==")(prospective, prefix)) @_require_version_compare def _compare_equal(self, prospective, spec): # We need special logic to handle prefix matching if spec.endswith(".*"): # In the case of prefix matching we want to ignore local segment. prospective = Version(prospective.public) # Split the spec out by dots, and pretend that there is an implicit # dot in between a release segment and a pre-release segment. spec = _version_split(spec[:-2]) # Remove the trailing .* # Split the prospective version out by dots, and pretend that there # is an implicit dot in between a release segment and a pre-release # segment. prospective = _version_split(str(prospective)) # Shorten the prospective version to be the same length as the spec # so that we can determine if the specifier is a prefix of the # prospective version or not. prospective = prospective[:len(spec)] # Pad out our two sides with zeros so that they both equal the same # length. spec, prospective = _pad_version(spec, prospective) else: # Convert our spec string into a Version spec = Version(spec) # If the specifier does not have a local segment, then we want to # act as if the prospective version also does not have a local # segment. if not spec.local: prospective = Version(prospective.public) return prospective == spec @_require_version_compare def _compare_not_equal(self, prospective, spec): return not self._compare_equal(prospective, spec) @_require_version_compare def _compare_less_than_equal(self, prospective, spec): return prospective <= Version(spec) @_require_version_compare def _compare_greater_than_equal(self, prospective, spec): return prospective >= Version(spec) @_require_version_compare def _compare_less_than(self, prospective, spec): # Convert our spec to a Version instance, since we'll want to work with # it as a version. spec = Version(spec) # Check to see if the prospective version is less than the spec # version. If it's not we can short circuit and just return False now # instead of doing extra unneeded work. if not prospective < spec: return False # This special case is here so that, unless the specifier itself # includes is a pre-release version, that we do not accept pre-release # versions for the version mentioned in the specifier (e.g. <3.1 should # not match 3.1.dev0, but should match 3.0.dev0). if not spec.is_prerelease and prospective.is_prerelease: if Version(prospective.base_version) == Version(spec.base_version): return False # If we've gotten to here, it means that prospective version is both # less than the spec version *and* it's not a pre-release of the same # version in the spec. return True @_require_version_compare def _compare_greater_than(self, prospective, spec): # Convert our spec to a Version instance, since we'll want to work with # it as a version. spec = Version(spec) # Check to see if the prospective version is greater than the spec # version. If it's not we can short circuit and just return False now # instead of doing extra unneeded work. if not prospective > spec: return False # This special case is here so that, unless the specifier itself # includes is a post-release version, that we do not accept # post-release versions for the version mentioned in the specifier # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). if not spec.is_postrelease and prospective.is_postrelease: if Version(prospective.base_version) == Version(spec.base_version): return False # Ensure that we do not allow a local version of the version mentioned # in the specifier, which is techincally greater than, to match. if prospective.local is not None: if Version(prospective.base_version) == Version(spec.base_version): return False # If we've gotten to here, it means that prospective version is both # greater than the spec version *and* it's not a pre-release of the # same version in the spec. return True def _compare_arbitrary(self, prospective, spec): return str(prospective).lower() == str(spec).lower() @property def prereleases(self): # If there is an explicit prereleases set for this, then we'll just # blindly use that. if self._prereleases is not None: return self._prereleases # Look at all of our specifiers and determine if they are inclusive # operators, and if they are if they are including an explicit # prerelease. operator, version = self._spec if operator in ["==", ">=", "<=", "~=", "==="]: # The == specifier can include a trailing .*, if it does we # want to remove before parsing. if operator == "==" and version.endswith(".*"): version = version[:-2] # Parse the version, and if it is a pre-release than this # specifier allows pre-releases. if parse(version).is_prerelease: return True return False @prereleases.setter def prereleases(self, value): self._prereleases = value _prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") def _version_split(version): result = [] for item in version.split("."): match = _prefix_regex.search(item) if match: result.extend(match.groups()) else: result.append(item) return result def _pad_version(left, right): left_split, right_split = [], [] # Get the release segment of our versions left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) # Get the rest of our versions left_split.append(left[len(left_split[0]):]) right_split.append(right[len(right_split[0]):]) # Insert our padding left_split.insert( 1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])), ) right_split.insert( 1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])), ) return ( list(itertools.chain(*left_split)), list(itertools.chain(*right_split)), ) class SpecifierSet(BaseSpecifier): def __init__(self, specifiers="", prereleases=None): # Split on , to break each indidivual specifier into it's own item, and # strip each item to remove leading/trailing whitespace. specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] # Parsed each individual specifier, attempting first to make it a # Specifier and falling back to a LegacySpecifier. parsed = set() for specifier in specifiers: try: parsed.add(Specifier(specifier)) except InvalidSpecifier: parsed.add(LegacySpecifier(specifier)) # Turn our parsed specifiers into a frozen set and save them for later. self._specs = frozenset(parsed) # Store our prereleases value so we can use it later to determine if # we accept prereleases or not. self._prereleases = prereleases def __repr__(self): pre = ( ", prereleases={0!r}".format(self.prereleases) if self._prereleases is not None else "" ) return "<SpecifierSet({0!r}{1})>".format(str(self), pre) def __str__(self): return ",".join(sorted(str(s) for s in self._specs)) def __hash__(self): return hash(self._specs) def __and__(self, other): if isinstance(other, string_types): other = SpecifierSet(other) elif not isinstance(other, SpecifierSet): return NotImplemented specifier = SpecifierSet() specifier._specs = frozenset(self._specs | other._specs) if self._prereleases is None and other._prereleases is not None: specifier._prereleases = other._prereleases elif self._prereleases is not None and other._prereleases is None: specifier._prereleases = self._prereleases elif self._prereleases == other._prereleases: specifier._prereleases = self._prereleases else: raise ValueError( "Cannot combine SpecifierSets with True and False prerelease " "overrides." ) return specifier def __eq__(self, other): if isinstance(other, string_types): other = SpecifierSet(other) elif isinstance(other, _IndividualSpecifier): other = SpecifierSet(str(other)) elif not isinstance(other, SpecifierSet): return NotImplemented return self._specs == other._specs def __ne__(self, other): if isinstance(other, string_types): other = SpecifierSet(other) elif isinstance(other, _IndividualSpecifier): other = SpecifierSet(str(other)) elif not isinstance(other, SpecifierSet): return NotImplemented return self._specs != other._specs def __len__(self): return len(self._specs) def __iter__(self): return iter(self._specs) @property def prereleases(self): # If we have been given an explicit prerelease modifier, then we'll # pass that through here. if self._prereleases is not None: return self._prereleases # If we don't have any specifiers, and we don't have a forced value, # then we'll just return None since we don't know if this should have # pre-releases or not. if not self._specs: return None # Otherwise we'll see if any of the given specifiers accept # prereleases, if any of them do we'll return True, otherwise False. return any(s.prereleases for s in self._specs) @prereleases.setter def prereleases(self, value): self._prereleases = value def __contains__(self, item): return self.contains(item) def contains(self, item, prereleases=None): # Ensure that our item is a Version or LegacyVersion instance. if not isinstance(item, (LegacyVersion, Version)): item = parse(item) # Determine if we're forcing a prerelease or not, if we're not forcing # one for this particular filter call, then we'll use whatever the # SpecifierSet thinks for whether or not we should support prereleases. if prereleases is None: prereleases = self.prereleases # We can determine if we're going to allow pre-releases by looking to # see if any of the underlying items supports them. If none of them do # and this item is a pre-release then we do not allow it and we can # short circuit that here. # Note: This means that 1.0.dev1 would not be contained in something # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 if not prereleases and item.is_prerelease: return False # We simply dispatch to the underlying specs here to make sure that the # given version is contained within all of them. # Note: This use of all() here means that an empty set of specifiers # will always return True, this is an explicit design decision. return all( s.contains(item, prereleases=prereleases) for s in self._specs ) def filter(self, iterable, prereleases=None): # Determine if we're forcing a prerelease or not, if we're not forcing # one for this particular filter call, then we'll use whatever the # SpecifierSet thinks for whether or not we should support prereleases. if prereleases is None: prereleases = self.prereleases # If we have any specifiers, then we want to wrap our iterable in the # filter method for each one, this will act as a logical AND amongst # each specifier. if self._specs: for spec in self._specs: iterable = spec.filter(iterable, prereleases=bool(prereleases)) return iterable # If we do not have any specifiers, then we need to have a rough filter # which will filter out any pre-releases, unless there are no final # releases, and which will filter out LegacyVersion in general. else: filtered = [] found_prereleases = [] for item in iterable: # Ensure that we some kind of Version class for this item. if not isinstance(item, (LegacyVersion, Version)): parsed_version = parse(item) else: parsed_version = item # Filter out any item which is parsed as a LegacyVersion if isinstance(parsed_version, LegacyVersion): continue # Store any item which is a pre-release for later unless we've # already found a final version or we are accepting prereleases if parsed_version.is_prerelease and not prereleases: if not filtered: found_prereleases.append(item) else: filtered.append(item) # If we've found no items except for pre-releases, then we'll go # ahead and use the pre-releases if not filtered and found_prereleases and prereleases is None: return found_prereleases return filtered
usc-isi/extra-specs
refs/heads/master
nova/tests/api/openstack/compute/contrib/test_flavorextradata.py
2
# Copyright 2012 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import json import webob from nova.compute import instance_types from nova import test from nova.tests.api.openstack import fakes def fake_get_instance_type_by_flavor_id(flavorid): return { 'id': flavorid, 'flavorid': str(flavorid), 'root_gb': 1, 'ephemeral_gb': 1, 'name': u'test', 'deleted': False, 'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1), 'updated_at': None, 'memory_mb': 512, 'vcpus': 1, 'swap': 512, 'rxtx_factor': 1.0, 'extra_specs': {}, 'deleted_at': None, 'vcpu_weight': None } def fake_get_all_types(inactive=0, filters=None): return { 'fake1': fake_get_instance_type_by_flavor_id(1), 'fake2': fake_get_instance_type_by_flavor_id(2) } class FlavorextradataTest(test.TestCase): def setUp(self): super(FlavorextradataTest, self).setUp() self.stubs.Set(instance_types, 'get_instance_type_by_flavor_id', fake_get_instance_type_by_flavor_id) self.stubs.Set(instance_types, 'get_all_types', fake_get_all_types) def _verify_server_response(self, flavor, expected): for key in expected: self.assertEquals(flavor[key], expected[key]) def test_show(self): expected = { 'flavor': { 'id': '1', 'name': 'test', 'ram': 512, 'vcpus': 1, 'disk': 1, 'OS-FLV-EXT-DATA:ephemeral': 1, 'swap': 512, 'rxtx_factor': 1, } } url = '/v2/fake/flavors/1' req = webob.Request.blank(url) req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) body = json.loads(res.body) self._verify_server_response(body['flavor'], expected['flavor']) def test_detail(self): expected = [ { 'id': '1', 'name': 'test', 'ram': 512, 'vcpus': 1, 'disk': 1, 'OS-FLV-EXT-DATA:ephemeral': 1, 'swap': 512, 'rxtx_factor': 1, }, { 'id': '2', 'name': 'test', 'ram': 512, 'vcpus': 1, 'disk': 1, 'OS-FLV-EXT-DATA:ephemeral': 1, 'swap': 512, 'rxtx_factor': 1, }, ] url = '/v2/fake/flavors/detail' req = webob.Request.blank(url) req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) body = json.loads(res.body) for i, flavor in enumerate(body['flavors']): self._verify_server_response(flavor, expected[i])
savanu/servo
refs/heads/master
tests/wpt/web-platform-tests/tools/wpt/stability.py
22
import os import sys from collections import OrderedDict, defaultdict from mozlog import reader from mozlog.formatters import JSONFormatter, TbplFormatter from mozlog.handlers import BaseHandler, LogLevelFilter, StreamHandler from markdown import markdown_adjust, table from wptrunner import wptrunner class LogActionFilter(BaseHandler): """Handler that filters out messages not of a given set of actions. Subclasses BaseHandler. :param inner: Handler to use for messages that pass this filter :param actions: List of actions for which to fire the handler """ def __init__(self, inner, actions): """Extend BaseHandler and set inner and actions props on self.""" BaseHandler.__init__(self, inner) self.inner = inner self.actions = actions def __call__(self, item): """Invoke handler if action is in list passed as constructor param.""" if item["action"] in self.actions: return self.inner(item) class LogHandler(reader.LogHandler): """Handle updating test and subtest status in log. Subclasses reader.LogHandler. """ def __init__(self): self.results = OrderedDict() def find_or_create_test(self, data): test_name = data["test"] if self.results.get(test_name): return self.results[test_name] test = { "subtests": OrderedDict(), "status": defaultdict(int) } self.results[test_name] = test return test def find_or_create_subtest(self, data): test = self.find_or_create_test(data) subtest_name = data["subtest"] if test["subtests"].get(subtest_name): return test["subtests"][subtest_name] subtest = { "status": defaultdict(int), "messages": set() } test["subtests"][subtest_name] = subtest return subtest def test_status(self, data): subtest = self.find_or_create_subtest(data) subtest["status"][data["status"]] += 1 if data.get("message"): subtest["messages"].add(data["message"]) def test_end(self, data): test = self.find_or_create_test(data) test["status"][data["status"]] += 1 def is_inconsistent(results_dict, iterations): """Return whether or not a single test is inconsistent.""" return len(results_dict) > 1 or sum(results_dict.values()) != iterations def process_results(log, iterations): """Process test log and return overall results and list of inconsistent tests.""" inconsistent = [] handler = LogHandler() reader.handle_log(reader.read(log), handler) results = handler.results for test_name, test in results.iteritems(): if is_inconsistent(test["status"], iterations): inconsistent.append((test_name, None, test["status"], [])) for subtest_name, subtest in test["subtests"].iteritems(): if is_inconsistent(subtest["status"], iterations): inconsistent.append((test_name, subtest_name, subtest["status"], subtest["messages"])) return results, inconsistent def err_string(results_dict, iterations): """Create and return string with errors from test run.""" rv = [] total_results = sum(results_dict.values()) for key, value in sorted(results_dict.items()): rv.append("%s%s" % (key, ": %s/%s" % (value, iterations) if value != iterations else "")) if total_results < iterations: rv.append("MISSING: %s/%s" % (iterations - total_results, iterations)) rv = ", ".join(rv) if is_inconsistent(results_dict, iterations): rv = "**%s**" % rv return rv def write_inconsistent(log, inconsistent, iterations): """Output inconsistent tests to logger.error.""" log("## Unstable results ##\n") strings = [( "`%s`" % markdown_adjust(test), ("`%s`" % markdown_adjust(subtest)) if subtest else "", err_string(results, iterations), ("`%s`" % markdown_adjust(";".join(messages))) if len(messages) else "") for test, subtest, results, messages in inconsistent] table(["Test", "Subtest", "Results", "Messages"], strings, log) def write_results(log, results, iterations, pr_number=None, use_details=False): log("## All results ##\n") if use_details: log("<details>\n") log("<summary>%i %s ran</summary>\n\n" % (len(results), "tests" if len(results) > 1 else "test")) for test_name, test in results.iteritems(): baseurl = "http://w3c-test.org/submissions" if "https" in os.path.splitext(test_name)[0].split(".")[1:]: baseurl = "https://w3c-test.org/submissions" title = test_name if use_details: log("<details>\n") if pr_number: title = "<a href=\"%s/%s%s\">%s</a>" % (baseurl, pr_number, test_name, title) log('<summary>%s</summary>\n\n' % title) else: log("### %s ###" % title) strings = [("", err_string(test["status"], iterations), "")] strings.extend((( ("`%s`" % markdown_adjust(subtest_name)) if subtest else "", err_string(subtest["status"], iterations), ("`%s`" % markdown_adjust(';'.join(subtest["messages"]))) if len(subtest["messages"]) else "") for subtest_name, subtest in test["subtests"].items())) table(["Subtest", "Results", "Messages"], strings, log) if use_details: log("</details>\n") if use_details: log("</details>\n") def run(venv, logger, **kwargs): kwargs["pause_after_test"] = False if kwargs["repeat"] == 1: kwargs["repeat"] = 10 handler = LogActionFilter( LogLevelFilter( StreamHandler( sys.stdout, TbplFormatter() ), "WARNING"), ["log", "process_output"]) # There is a public API for this in the next mozlog initial_handlers = logger._state.handlers logger._state.handlers = [] with open("raw.log", "wb") as log: # Setup logging for wptrunner that keeps process output and # warning+ level logs only logger.add_handler(handler) logger.add_handler(StreamHandler(log, JSONFormatter())) wptrunner.run_tests(**kwargs) logger._state.handlers = initial_handlers with open("raw.log", "rb") as log: results, inconsistent = process_results(log, kwargs["repeat"]) return kwargs["repeat"], results, inconsistent
mikey1234/script.module.urlresolver
refs/heads/master
lib/urlresolver/plugins/megarelease.py
3
''' Megarelease urlresolver plugin Copyright (C) 2013 Vinnydude This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' from t0mm0.common.net import Net from urlresolver.plugnplay.interfaces import UrlResolver from urlresolver.plugnplay.interfaces import PluginSettings from urlresolver.plugnplay import Plugin import re, os, time, xbmcgui, xbmc import xbmcgui from urlresolver import common from lib import jsunpack #SET ERROR_LOGO# THANKS TO VOINAGE, BSTRDMKR, ELDORADO error_logo = os.path.join(common.addon_path, 'resources', 'images', 'redx.png') net = Net() class MegareleaseResolver(Plugin, UrlResolver, PluginSettings): implements = [UrlResolver, PluginSettings] name = "megarelease" def __init__(self): p = self.get_setting('priority') or 100 self.priority = int(p) self.net = Net() def get_media_url(self, host, media_id): try: url = self.get_url(host, media_id) html = self.net.http_GET(url).content dialog = xbmcgui.DialogProgress() dialog.create('Resolving', 'Resolving Megarelease Link...') dialog.update(0) data = {} r = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', html) for name, value in r: data[name] = value data.update({'plugins_are_not_allowed_plus_ban':2}) captchaimg = re.search('<script type="text/javascript" src="(http://www.google.com.+?)">', html) if captchaimg: dialog.close() html = self.net.http_GET(captchaimg.group(1)).content part = re.search("challenge \: \\'(.+?)\\'", html) captchaimg = 'http://www.google.com/recaptcha/api/image?c='+part.group(1) img = xbmcgui.ControlImage(450,15,400,130,captchaimg) wdlg = xbmcgui.WindowDialog() wdlg.addControl(img) wdlg.show() time.sleep(3) kb = xbmc.Keyboard('', 'Type the letters in the image', False) kb.doModal() capcode = kb.getText() if (kb.isConfirmed()): userInput = kb.getText() if userInput != '': solution = kb.getText() elif userInput == '': raise Exception ('You must enter text in the image to access video') else: raise Exception ('Captcha Error') wdlg.close() dialog.close() dialog.create('Resolving', 'Resolving Megarelease Link...') dialog.update(50) data.update({'recaptcha_challenge_field':part.group(1),'recaptcha_response_field':solution}) html = net.http_POST(url, data).content if re.findall('err', html): raise Exception('Wrong Captcha') sPattern = '<script type=(?:"|\')text/javascript(?:"|\')>(eval\(' sPattern += 'function\(p,a,c,k,e,d\)(?!.+player_ads.+).+np_vid.+?)' sPattern += '\s+?</script>' r = re.search(sPattern, html, re.DOTALL + re.IGNORECASE) if r: sJavascript = r.group(1) sUnpacked = jsunpack.unpack(sJavascript) sPattern = '<embed id="np_vid"type="video/divx"src="(.+?)' sPattern += '"custommode=' r = re.search(sPattern, sUnpacked) if r: dialog.update(100) dialog.close() return r.group(1) else: num = re.compile('false\|(.+?)\|(.+?)\|(.+?)\|(.+?)\|divx').findall(html) common.addon.log('NUM: '+str(num)) for u1, u2, u3, u4 in num: urlz = u4+'.'+u3+'.'+u2+'.'+u1 pre = 'http://'+urlz+':182/d/' preb = re.compile('custommode\|(.+?)\|(.+?)\|182').findall(html) for ext, link in preb: r = pre+link+'/video.'+ext dialog.update(100) dialog.close() return r except Exception, e: common.addon.log('**** Megarelease Error occured: %s' % e) common.addon.show_small_popup('Error', str(e), 5000, '') return self.unresolvable(code=0, msg='Exception: %s' % e) def get_url(self, host, media_id): return 'http://megarelease.org/%s' % media_id def get_host_and_id(self, url): r = re.search('//(.+?)/([0-9a-zA-Z]+)',url) if r: return r.groups() else: return False return('host', 'media_id') def valid_url(self, url, host): if self.get_setting('enabled') == 'false': return False return (re.match('http://(www.)?megarelease.org/' + '[0-9A-Za-z]+', url) or 'megarelease' in host)
imito/odin
refs/heads/master
tests/test_negative_binomial_disp.py
1
from __future__ import absolute_import, division, print_function import os import numpy as np import tensorflow as tf import torch from odin.bay.distributions import NegativeBinomialDisp, ZeroInflated from odin.stats import describe from scvi.models.log_likelihood import log_nb_positive, log_zinb_positive os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true' tf.random.set_seed(8) np.random.seed(8) torch.manual_seed(8) def torch_nb(mean, disp): px_rate = torch.Tensor(mean) px_r = torch.Tensor(disp) p = px_rate / (px_rate + px_r) r = px_r l_train = torch.distributions.Gamma(concentration=r, rate=(1 - p) / p).sample() l_train = torch.clamp(l_train, max=1e18) X = torch.distributions.Poisson(l_train).sample() return X shape = (12000, 800) x = np.random.randint(1, 20, size=shape).astype('float32') mean = np.random.randint(1, 20, size=shape).astype('float32') disp = np.random.randint(1, 20, size=shape).astype('float32') disp_col = np.random.randint(1, 20, size=shape[1]).astype('float32') disp_row = np.random.randint(1, 20, size=shape[0]).astype('float32') pi = np.random.rand(*shape).astype('float32') # constant dispersion (only for tensorflow) nb = NegativeBinomialDisp(loc=mean, disp=2) llk1 = tf.reduce_sum(nb.log_prob(x), axis=1).numpy() print(llk1) # broadcast disp in column nb = NegativeBinomialDisp(loc=mean, disp=disp_col) llk1 = tf.reduce_sum(nb.log_prob(x), axis=1).numpy() llk2 = log_nb_positive(x=torch.Tensor(x), mu=torch.Tensor(mean), theta=torch.Tensor(disp_col)).numpy() print(np.all(np.isclose(llk1, llk2))) # broadcast disp in row try: nb = NegativeBinomialDisp(loc=mean, disp=disp_row) llk1 = tf.reduce_sum(nb.log_prob(x), axis=1).numpy() llk2 = log_nb_positive(x=torch.Tensor(x), mu=torch.Tensor(mean), theta=torch.Tensor(disp_row)).numpy() print(np.all(np.isclose(llk1, llk2))) except: print("NOT POSSIBLE TO BROADCAST the first dimension") # all disp available nb = NegativeBinomialDisp(loc=mean, disp=disp) llk1 = tf.reduce_sum(nb.log_prob(x), axis=1).numpy() llk2 = log_nb_positive(x=torch.Tensor(x), mu=torch.Tensor(mean), theta=torch.Tensor(disp)).numpy() print(np.all(np.isclose(llk1, llk2))) s1 = nb.sample().numpy() s2 = torch_nb(mean, disp).numpy() print(describe(s1)) print(describe(s2)) zinb = ZeroInflated(nb, probs=pi) llk1 = tf.reduce_sum(zinb.log_prob(x), axis=1).numpy() llk2 = log_zinb_positive(x=torch.Tensor(x), mu=torch.Tensor(mean), theta=torch.Tensor(disp), pi=torch.Tensor(pi)).numpy() print(llk1) print(llk2)
wwj718/murp-edx
refs/heads/master
common/djangoapps/student/tests/tests.py
2
""" This file demonstrates writing tests using the unittest module. These will pass when you run "manage.py test". Replace this with more appropriate tests for your application. """ import logging import unittest from datetime import datetime, timedelta import pytz from django.conf import settings from django.test import TestCase from django.test.utils import override_settings from django.test.client import RequestFactory, Client from django.contrib.auth.models import User, AnonymousUser from django.core.urlresolvers import reverse, NoReverseMatch from django.http import HttpResponse from unittest.case import SkipTest from xmodule.modulestore.tests.factories import CourseFactory from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from courseware.tests.tests import TEST_DATA_MIXED_MODULESTORE from opaque_keys.edx.locations import SlashSeparatedCourseKey from mock import Mock, patch from student.models import anonymous_id_for_user, user_by_anonymous_id, CourseEnrollment, unique_id_for_user from student.views import (process_survey_link, _cert_info, change_enrollment, complete_course_mode_info) from student.tests.factories import UserFactory, CourseModeFactory from certificates.models import CertificateStatuses from certificates.tests.factories import GeneratedCertificateFactory import shoppingcart log = logging.getLogger(__name__) class CourseEndingTest(TestCase): """Test things related to course endings: certificates, surveys, etc""" def test_process_survey_link(self): username = "fred" user = Mock(username=username) id = unique_id_for_user(user) link1 = "http://www.mysurvey.com" self.assertEqual(process_survey_link(link1, user), link1) link2 = "http://www.mysurvey.com?unique={UNIQUE_ID}" link2_expected = "http://www.mysurvey.com?unique={UNIQUE_ID}".format(UNIQUE_ID=id) self.assertEqual(process_survey_link(link2, user), link2_expected) def test_cert_info(self): user = Mock(username="fred") survey_url = "http://a_survey.com" course = Mock(end_of_course_survey_url=survey_url) self.assertEqual(_cert_info(user, course, None), {'status': 'processing', 'show_disabled_download_button': False, 'show_download_url': False, 'show_survey_button': False, }) cert_status = {'status': 'unavailable'} self.assertEqual(_cert_info(user, course, cert_status), {'status': 'processing', 'show_disabled_download_button': False, 'show_download_url': False, 'show_survey_button': False, 'mode': None }) cert_status = {'status': 'generating', 'grade': '67', 'mode': 'honor'} self.assertEqual(_cert_info(user, course, cert_status), {'status': 'generating', 'show_disabled_download_button': True, 'show_download_url': False, 'show_survey_button': True, 'survey_url': survey_url, 'grade': '67', 'mode': 'honor' }) cert_status = {'status': 'regenerating', 'grade': '67', 'mode': 'verified'} self.assertEqual(_cert_info(user, course, cert_status), {'status': 'generating', 'show_disabled_download_button': True, 'show_download_url': False, 'show_survey_button': True, 'survey_url': survey_url, 'grade': '67', 'mode': 'verified' }) download_url = 'http://s3.edx/cert' cert_status = {'status': 'downloadable', 'grade': '67', 'download_url': download_url, 'mode': 'honor'} self.assertEqual(_cert_info(user, course, cert_status), {'status': 'ready', 'show_disabled_download_button': False, 'show_download_url': True, 'download_url': download_url, 'show_survey_button': True, 'survey_url': survey_url, 'grade': '67', 'mode': 'honor' }) cert_status = {'status': 'notpassing', 'grade': '67', 'download_url': download_url, 'mode': 'honor'} self.assertEqual(_cert_info(user, course, cert_status), {'status': 'notpassing', 'show_disabled_download_button': False, 'show_download_url': False, 'show_survey_button': True, 'survey_url': survey_url, 'grade': '67', 'mode': 'honor' }) # Test a course that doesn't have a survey specified course2 = Mock(end_of_course_survey_url=None) cert_status = {'status': 'notpassing', 'grade': '67', 'download_url': download_url, 'mode': 'honor'} self.assertEqual(_cert_info(user, course2, cert_status), {'status': 'notpassing', 'show_disabled_download_button': False, 'show_download_url': False, 'show_survey_button': False, 'grade': '67', 'mode': 'honor' }) @override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE) class DashboardTest(TestCase): """ Tests for dashboard utility functions """ # arbitrary constant COURSE_SLUG = "100" COURSE_NAME = "test_course" COURSE_ORG = "EDX" def setUp(self): self.course = CourseFactory.create(org=self.COURSE_ORG, display_name=self.COURSE_NAME, number=self.COURSE_SLUG) self.assertIsNotNone(self.course) self.user = UserFactory.create(username="jack", email="[email protected]", password='test') CourseModeFactory.create( course_id=self.course.id, mode_slug='honor', mode_display_name='Honor Code', ) self.client = Client() @unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms') def check_verification_status_on(self, mode, value): """ Check that the css class and the status message are in the dashboard html. """ CourseEnrollment.enroll(self.user, self.course.location.course_key, mode=mode) response = self.client.get(reverse('dashboard')) self.assertContains(response, "class=\"course {0}\"".format(mode)) self.assertContains(response, value) @patch.dict("django.conf.settings.FEATURES", {'ENABLE_VERIFIED_CERTIFICATES': True}) def test_verification_status_visible(self): """ Test that the certificate verification status for courses is visible on the dashboard. """ self.client.login(username="jack", password="test") self.check_verification_status_on('verified', 'You\'re enrolled as a verified student') self.check_verification_status_on('honor', 'You\'re enrolled as an honor code student') self.check_verification_status_on('audit', 'You\'re auditing this course') @unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms') def check_verification_status_off(self, mode, value): """ Check that the css class and the status message are not in the dashboard html. """ CourseEnrollment.enroll(self.user, self.course.location.course_key, mode=mode) response = self.client.get(reverse('dashboard')) self.assertNotContains(response, "class=\"course {0}\"".format(mode)) self.assertNotContains(response, value) @patch.dict("django.conf.settings.FEATURES", {'ENABLE_VERIFIED_CERTIFICATES': False}) def test_verification_status_invisible(self): """ Test that the certificate verification status for courses is not visible on the dashboard if the verified certificates setting is off. """ self.client.login(username="jack", password="test") self.check_verification_status_off('verified', 'You\'re enrolled as a verified student') self.check_verification_status_off('honor', 'You\'re enrolled as an honor code student') self.check_verification_status_off('audit', 'You\'re auditing this course') def test_course_mode_info(self): verified_mode = CourseModeFactory.create( course_id=self.course.id, mode_slug='verified', mode_display_name='Verified', expiration_datetime=datetime.now(pytz.UTC) + timedelta(days=1) ) enrollment = CourseEnrollment.enroll(self.user, self.course.id) course_mode_info = complete_course_mode_info(self.course.id, enrollment) self.assertTrue(course_mode_info['show_upsell']) self.assertEquals(course_mode_info['days_for_upsell'], 1) verified_mode.expiration_datetime = datetime.now(pytz.UTC) + timedelta(days=-1) verified_mode.save() course_mode_info = complete_course_mode_info(self.course.id, enrollment) self.assertFalse(course_mode_info['show_upsell']) self.assertIsNone(course_mode_info['days_for_upsell']) @unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms') def test_refundable(self): verified_mode = CourseModeFactory.create( course_id=self.course.id, mode_slug='verified', mode_display_name='Verified', expiration_datetime=datetime.now(pytz.UTC) + timedelta(days=1) ) enrollment = CourseEnrollment.enroll(self.user, self.course.id, mode='verified') self.assertTrue(enrollment.refundable()) verified_mode.expiration_datetime = datetime.now(pytz.UTC) - timedelta(days=1) verified_mode.save() self.assertFalse(enrollment.refundable()) @unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms') def test_refundable_when_certificate_exists(self): verified_mode = CourseModeFactory.create( course_id=self.course.id, mode_slug='verified', mode_display_name='Verified', expiration_datetime=datetime.now(pytz.UTC) + timedelta(days=1) ) enrollment = CourseEnrollment.enroll(self.user, self.course.id, mode='verified') self.assertTrue(enrollment.refundable()) generated_certificate = GeneratedCertificateFactory.create( user=self.user, course_id=self.course.id, status=CertificateStatuses.downloadable, mode='verified' ) self.assertFalse(enrollment.refundable()) class EnrollInCourseTest(TestCase): """Tests enrolling and unenrolling in courses.""" def setUp(self): patcher = patch('student.models.tracker') self.mock_tracker = patcher.start() self.addCleanup(patcher.stop) def test_enrollment(self): user = User.objects.create_user("joe", "[email protected]", "password") course_id = SlashSeparatedCourseKey("edX", "Test101", "2013") course_id_partial = SlashSeparatedCourseKey("edX", "Test101", None) # Test basic enrollment self.assertFalse(CourseEnrollment.is_enrolled(user, course_id)) self.assertFalse(CourseEnrollment.is_enrolled_by_partial(user, course_id_partial)) CourseEnrollment.enroll(user, course_id) self.assertTrue(CourseEnrollment.is_enrolled(user, course_id)) self.assertTrue(CourseEnrollment.is_enrolled_by_partial(user, course_id_partial)) self.assert_enrollment_event_was_emitted(user, course_id) # Enrolling them again should be harmless CourseEnrollment.enroll(user, course_id) self.assertTrue(CourseEnrollment.is_enrolled(user, course_id)) self.assertTrue(CourseEnrollment.is_enrolled_by_partial(user, course_id_partial)) self.assert_no_events_were_emitted() # Now unenroll the user CourseEnrollment.unenroll(user, course_id) self.assertFalse(CourseEnrollment.is_enrolled(user, course_id)) self.assertFalse(CourseEnrollment.is_enrolled_by_partial(user, course_id_partial)) self.assert_unenrollment_event_was_emitted(user, course_id) # Unenrolling them again should also be harmless CourseEnrollment.unenroll(user, course_id) self.assertFalse(CourseEnrollment.is_enrolled(user, course_id)) self.assertFalse(CourseEnrollment.is_enrolled_by_partial(user, course_id_partial)) self.assert_no_events_were_emitted() # The enrollment record should still exist, just be inactive enrollment_record = CourseEnrollment.objects.get( user=user, course_id=course_id ) self.assertFalse(enrollment_record.is_active) # Make sure mode is updated properly if user unenrolls & re-enrolls enrollment = CourseEnrollment.enroll(user, course_id, "verified") self.assertEquals(enrollment.mode, "verified") CourseEnrollment.unenroll(user, course_id) enrollment = CourseEnrollment.enroll(user, course_id, "audit") self.assertTrue(CourseEnrollment.is_enrolled(user, course_id)) self.assertEquals(enrollment.mode, "audit") def assert_no_events_were_emitted(self): """Ensures no events were emitted since the last event related assertion""" self.assertFalse(self.mock_tracker.emit.called) # pylint: disable=maybe-no-member self.mock_tracker.reset_mock() def assert_enrollment_event_was_emitted(self, user, course_key): """Ensures an enrollment event was emitted since the last event related assertion""" self.mock_tracker.emit.assert_called_once_with( # pylint: disable=maybe-no-member 'edx.course.enrollment.activated', { 'course_id': course_key.to_deprecated_string(), 'user_id': user.pk, 'mode': 'honor' } ) self.mock_tracker.reset_mock() def assert_unenrollment_event_was_emitted(self, user, course_key): """Ensures an unenrollment event was emitted since the last event related assertion""" self.mock_tracker.emit.assert_called_once_with( # pylint: disable=maybe-no-member 'edx.course.enrollment.deactivated', { 'course_id': course_key.to_deprecated_string(), 'user_id': user.pk, 'mode': 'honor' } ) self.mock_tracker.reset_mock() def test_enrollment_non_existent_user(self): # Testing enrollment of newly unsaved user (i.e. no database entry) user = User(username="rusty", email="[email protected]") course_id = SlashSeparatedCourseKey("edX", "Test101", "2013") self.assertFalse(CourseEnrollment.is_enrolled(user, course_id)) # Unenroll does nothing CourseEnrollment.unenroll(user, course_id) self.assert_no_events_were_emitted() # Implicit save() happens on new User object when enrolling, so this # should still work CourseEnrollment.enroll(user, course_id) self.assertTrue(CourseEnrollment.is_enrolled(user, course_id)) self.assert_enrollment_event_was_emitted(user, course_id) def test_enrollment_by_email(self): user = User.objects.create(username="jack", email="[email protected]") course_id = SlashSeparatedCourseKey("edX", "Test101", "2013") CourseEnrollment.enroll_by_email("[email protected]", course_id) self.assertTrue(CourseEnrollment.is_enrolled(user, course_id)) self.assert_enrollment_event_was_emitted(user, course_id) # This won't throw an exception, even though the user is not found self.assertIsNone( CourseEnrollment.enroll_by_email("[email protected]", course_id) ) self.assert_no_events_were_emitted() self.assertRaises( User.DoesNotExist, CourseEnrollment.enroll_by_email, "[email protected]", course_id, ignore_errors=False ) self.assert_no_events_were_emitted() # Now unenroll them by email CourseEnrollment.unenroll_by_email("[email protected]", course_id) self.assertFalse(CourseEnrollment.is_enrolled(user, course_id)) self.assert_unenrollment_event_was_emitted(user, course_id) # Harmless second unenroll CourseEnrollment.unenroll_by_email("[email protected]", course_id) self.assertFalse(CourseEnrollment.is_enrolled(user, course_id)) self.assert_no_events_were_emitted() # Unenroll on non-existent user shouldn't throw an error CourseEnrollment.unenroll_by_email("[email protected]", course_id) self.assert_no_events_were_emitted() def test_enrollment_multiple_classes(self): user = User(username="rusty", email="[email protected]") course_id1 = SlashSeparatedCourseKey("edX", "Test101", "2013") course_id2 = SlashSeparatedCourseKey("MITx", "6.003z", "2012") CourseEnrollment.enroll(user, course_id1) self.assert_enrollment_event_was_emitted(user, course_id1) CourseEnrollment.enroll(user, course_id2) self.assert_enrollment_event_was_emitted(user, course_id2) self.assertTrue(CourseEnrollment.is_enrolled(user, course_id1)) self.assertTrue(CourseEnrollment.is_enrolled(user, course_id2)) CourseEnrollment.unenroll(user, course_id1) self.assert_unenrollment_event_was_emitted(user, course_id1) self.assertFalse(CourseEnrollment.is_enrolled(user, course_id1)) self.assertTrue(CourseEnrollment.is_enrolled(user, course_id2)) CourseEnrollment.unenroll(user, course_id2) self.assert_unenrollment_event_was_emitted(user, course_id2) self.assertFalse(CourseEnrollment.is_enrolled(user, course_id1)) self.assertFalse(CourseEnrollment.is_enrolled(user, course_id2)) def test_activation(self): user = User.objects.create(username="jack", email="[email protected]") course_id = SlashSeparatedCourseKey("edX", "Test101", "2013") self.assertFalse(CourseEnrollment.is_enrolled(user, course_id)) # Creating an enrollment doesn't actually enroll a student # (calling CourseEnrollment.enroll() would have) enrollment = CourseEnrollment.get_or_create_enrollment(user, course_id) self.assertFalse(CourseEnrollment.is_enrolled(user, course_id)) self.assert_no_events_were_emitted() # Until you explicitly activate it enrollment.activate() self.assertTrue(CourseEnrollment.is_enrolled(user, course_id)) self.assert_enrollment_event_was_emitted(user, course_id) # Activating something that's already active does nothing enrollment.activate() self.assertTrue(CourseEnrollment.is_enrolled(user, course_id)) self.assert_no_events_were_emitted() # Now deactive enrollment.deactivate() self.assertFalse(CourseEnrollment.is_enrolled(user, course_id)) self.assert_unenrollment_event_was_emitted(user, course_id) # Deactivating something that's already inactive does nothing enrollment.deactivate() self.assertFalse(CourseEnrollment.is_enrolled(user, course_id)) self.assert_no_events_were_emitted() # A deactivated enrollment should be activated if enroll() is called # for that user/course_id combination CourseEnrollment.enroll(user, course_id) self.assertTrue(CourseEnrollment.is_enrolled(user, course_id)) self.assert_enrollment_event_was_emitted(user, course_id) @override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE) @unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms') class ChangeEnrollmentViewTest(ModuleStoreTestCase): """Tests the student.views.change_enrollment view""" def setUp(self): super(ChangeEnrollmentViewTest, self).setUp() self.course = CourseFactory.create() self.user = UserFactory.create(password='secret') self.client.login(username=self.user.username, password='secret') self.url = reverse('change_enrollment') def enroll_through_view(self, course): response = self.client.post( reverse('change_enrollment'), { 'course_id': course.id.to_deprecated_string(), 'enrollment_action': 'enroll' } ) return response def test_enroll_as_honor(self): """Tests that a student can successfully enroll through this view""" response = self.enroll_through_view(self.course) self.assertEqual(response.status_code, 200) enrollment_mode, is_active = CourseEnrollment.enrollment_mode_for_user( self.user, self.course.id ) self.assertTrue(is_active) self.assertEqual(enrollment_mode, u'honor') def test_cannot_enroll_if_already_enrolled(self): """ Tests that a student will not be able to enroll through this view if they are already enrolled in the course """ CourseEnrollment.enroll(self.user, self.course.id) self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course.id)) # now try to enroll that student response = self.enroll_through_view(self.course) self.assertEqual(response.status_code, 400) def test_change_to_honor_if_verified(self): """ Tests that a student that is a currently enrolled verified student cannot accidentally change their enrollment to verified """ CourseEnrollment.enroll(self.user, self.course.id, mode=u'verified') self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course.id)) # now try to enroll the student in the honor mode: response = self.enroll_through_view(self.course) self.assertEqual(response.status_code, 400) enrollment_mode, is_active = CourseEnrollment.enrollment_mode_for_user( self.user, self.course.id ) self.assertTrue(is_active) self.assertEqual(enrollment_mode, u'verified') def test_change_to_honor_if_verified_not_active(self): """ Tests that one can renroll for a course if one has already unenrolled """ # enroll student CourseEnrollment.enroll(self.user, self.course.id, mode=u'verified') # now unenroll student: CourseEnrollment.unenroll(self.user, self.course.id) # check that they are verified but inactive enrollment_mode, is_active = CourseEnrollment.enrollment_mode_for_user( self.user, self.course.id ) self.assertFalse(is_active) self.assertEqual(enrollment_mode, u'verified') # now enroll them through the view: response = self.enroll_through_view(self.course) self.assertEqual(response.status_code, 200) enrollment_mode, is_active = CourseEnrollment.enrollment_mode_for_user( self.user, self.course.id ) self.assertTrue(is_active) self.assertEqual(enrollment_mode, u'honor') @override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE) class PaidRegistrationTest(ModuleStoreTestCase): """ Tests for paid registration functionality (not verified student), involves shoppingcart """ # arbitrary constant COURSE_SLUG = "100" COURSE_NAME = "test_course" COURSE_ORG = "EDX" def setUp(self): # Create course self.req_factory = RequestFactory() self.course = CourseFactory.create(org=self.COURSE_ORG, display_name=self.COURSE_NAME, number=self.COURSE_SLUG) self.assertIsNotNone(self.course) self.user = User.objects.create(username="jack", email="[email protected]") @unittest.skipUnless(settings.FEATURES.get('ENABLE_SHOPPING_CART'), "Shopping Cart not enabled in settings") def test_change_enrollment_add_to_cart(self): request = self.req_factory.post(reverse('change_enrollment'), {'course_id': self.course.id.to_deprecated_string(), 'enrollment_action': 'add_to_cart'}) request.user = self.user response = change_enrollment(request) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, reverse('shoppingcart.views.show_cart')) self.assertTrue(shoppingcart.models.PaidCourseRegistration.contained_in_order( shoppingcart.models.Order.get_cart_for_user(self.user), self.course.id)) @override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE) class AnonymousLookupTable(TestCase): """ Tests for anonymous_id_functions """ # arbitrary constant COURSE_SLUG = "100" COURSE_NAME = "test_course" COURSE_ORG = "EDX" def setUp(self): self.course = CourseFactory.create(org=self.COURSE_ORG, display_name=self.COURSE_NAME, number=self.COURSE_SLUG) self.assertIsNotNone(self.course) self.user = UserFactory() CourseModeFactory.create( course_id=self.course.id, mode_slug='honor', mode_display_name='Honor Code', ) patcher = patch('student.models.tracker') patcher.start() self.addCleanup(patcher.stop) def test_for_unregistered_user(self): # same path as for logged out user self.assertEqual(None, anonymous_id_for_user(AnonymousUser(), self.course.id)) self.assertIsNone(user_by_anonymous_id(None)) def test_roundtrip_for_logged_user(self): enrollment = CourseEnrollment.enroll(self.user, self.course.id) anonymous_id = anonymous_id_for_user(self.user, self.course.id) real_user = user_by_anonymous_id(anonymous_id) self.assertEqual(self.user, real_user) self.assertEqual(anonymous_id, anonymous_id_for_user(self.user, self.course.id, save=False))
asherkhb/coge
refs/heads/master
bin/last_wrapper/Bio/Nexus/Nodes.py
3
# Copyright 2005-2008 by Frank Kauff & Cymon J. Cox. All rights reserved. # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. # # Nodes.py # # Provides functionality of a linked list. # Each node has one (or none) predecessor, and an arbitrary number of successors. # Nodes can store arbitrary data in a NodeData class. # # Subclassed by Nexus.Trees to store phylogenetic trees. # # Bug reports to Frank Kauff ([email protected]) # class ChainException(Exception): pass class NodeException(Exception): pass class Chain(object): """Stores a list of nodes that are linked together.""" def __init__(self): """Initiates a node chain: (self).""" self.chain={} self.id=-1 def _get_id(self): """Gets a new id for a node in the chain.""" self.id+=1 return self.id def all_ids(self): """Return a list of all node ids.""" return self.chain.keys() def add(self,node,prev=None): """Attaches node to another: (self, node, prev).""" if prev is not None and prev not in self.chain: raise ChainException('Unknown predecessor: '+str(prev)) else: id=self._get_id() node.set_id(id) node.set_prev(prev) if prev is not None: self.chain[prev].add_succ(id) self.chain[id]=node return id def collapse(self,id): """Deletes node from chain and relinks successors to predecessor: collapse(self, id).""" if id not in self.chain: raise ChainException('Unknown ID: '+str(id)) prev_id=self.chain[id].get_prev() self.chain[prev_id].remove_succ(id) succ_ids=self.chain[id].get_succ() for i in succ_ids: self.chain[i].set_prev(prev_id) self.chain[prev_id].add_succ(succ_ids) node=self.chain[id] self.kill(id) return node def kill(self,id): """Kills a node from chain without caring to what it is connected: kill(self,id).""" if id not in self.chain: raise ChainException('Unknown ID: '+str(id)) else: del self.chain[id] def unlink(self,id): """Disconnects node from his predecessor: unlink(self,id).""" if id not in self.chain: raise ChainException('Unknown ID: '+str(id)) else: prev_id=self.chain[id].prev if prev_id is not None: self.chain[prev_id].succ.pop(self.chain[prev_id].succ.index(id)) self.chain[id].prev=None return prev_id def link(self, parent,child): """Connects son to parent: link(self,son,parent).""" if child not in self.chain: raise ChainException('Unknown ID: '+str(child)) elif parent not in self.chain: raise ChainException('Unknown ID: '+str(parent)) else: self.unlink(child) self.chain[parent].succ.append(child) self.chain[child].set_prev(parent) def is_parent_of(self,parent,grandchild): """Check if grandchild is a subnode of parent: is_parent_of(self,parent,grandchild).""" if grandchild==parent or grandchild in self.chain[parent].get_succ(): return True else: for sn in self.chain[parent].get_succ(): if self.is_parent_of(sn,grandchild): return True else: return False def trace(self,start,finish): """Returns a list of all node_ids between two nodes (excluding start, including end): trace(start,end).""" if start not in self.chain or finish not in self.chain: raise NodeException('Unknown node.') if not self.is_parent_of(start,finish) or start==finish: return [] for sn in self.chain[start].get_succ(): if self.is_parent_of(sn,finish): return [sn]+self.trace(sn,finish) class Node(object): """A single node.""" def __init__(self,data=None): """Represents a node with one predecessor and multiple successors: (self, data=None).""" self.id=None self.data=data self.prev=None self.succ=[] def set_id(self,id): """Sets the id of a node, if not set yet: (self,id).""" if self.id is not None: raise NodeException('Node id cannot be changed.') self.id=id def get_id(self): """Returns the node's id: (self).""" return self.id def get_succ(self): """Returns a list of the node's successors: (self).""" return self.succ def get_prev(self): """Returns the id of the node's predecessor: (self).""" return self.prev def add_succ(self,id): """Adds a node id to the node's successors: (self,id).""" if isinstance(id,type([])): self.succ.extend(id) else: self.succ.append(id) def remove_succ(self,id): """Removes a node id from the node's successors: (self,id).""" self.succ.remove(id) def set_succ(self,new_succ): """Sets the node's successors: (self,new_succ).""" if not isinstance(new_succ,type([])): raise NodeException('Node successor must be of list type.') self.succ=new_succ def set_prev(self,id): """Sets the node's predecessor: (self,id).""" self.prev=id def get_data(self): """Returns a node's data: (self).""" return self.data def set_data(self,data): """Sets a node's data: (self,data).""" self.data=data
sbktechnology/sap_frappe
refs/heads/master
frappe/model/utils/__init__.py
44
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import frappe, json from frappe import _ """ Model utilities, unclassified functions """ def set_default(doc, key): """Set is_default property of given doc and unset all others filtered by given key.""" if not doc.is_default: frappe.db.set(doc, "is_default", 1) frappe.db.sql("""update `tab%s` set `is_default`=0 where `%s`=%s and name!=%s""" % (doc.doctype, key, "%s", "%s"), (doc.get(key), doc.name))
Argon-Zhou/django
refs/heads/master
django/db/backends/postgresql_psycopg2/operations.py
207
from __future__ import unicode_literals from psycopg2.extras import Inet from django.conf import settings from django.db.backends.base.operations import BaseDatabaseOperations class DatabaseOperations(BaseDatabaseOperations): def unification_cast_sql(self, output_field): internal_type = output_field.get_internal_type() if internal_type in ("GenericIPAddressField", "IPAddressField", "TimeField", "UUIDField"): # PostgreSQL will resolve a union as type 'text' if input types are # 'unknown'. # http://www.postgresql.org/docs/9.4/static/typeconv-union-case.html # These fields cannot be implicitly cast back in the default # PostgreSQL configuration so we need to explicitly cast them. # We must also remove components of the type within brackets: # varchar(255) -> varchar. return 'CAST(%%s AS %s)' % output_field.db_type(self.connection).split('(')[0] return '%s' def date_extract_sql(self, lookup_type, field_name): # http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT if lookup_type == 'week_day': # For consistency across backends, we return Sunday=1, Saturday=7. return "EXTRACT('dow' FROM %s) + 1" % field_name else: return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name) def date_trunc_sql(self, lookup_type, field_name): # http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name) def _convert_field_to_tz(self, field_name, tzname): if settings.USE_TZ: field_name = "%s AT TIME ZONE %%s" % field_name params = [tzname] else: params = [] return field_name, params def datetime_cast_date_sql(self, field_name, tzname): field_name, params = self._convert_field_to_tz(field_name, tzname) sql = '(%s)::date' % field_name return sql, params def datetime_extract_sql(self, lookup_type, field_name, tzname): field_name, params = self._convert_field_to_tz(field_name, tzname) sql = self.date_extract_sql(lookup_type, field_name) return sql, params def datetime_trunc_sql(self, lookup_type, field_name, tzname): field_name, params = self._convert_field_to_tz(field_name, tzname) # http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC sql = "DATE_TRUNC('%s', %s)" % (lookup_type, field_name) return sql, params def deferrable_sql(self): return " DEFERRABLE INITIALLY DEFERRED" def lookup_cast(self, lookup_type, internal_type=None): lookup = '%s' # Cast text lookups to text to allow things like filter(x__contains=4) if lookup_type in ('iexact', 'contains', 'icontains', 'startswith', 'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'): if internal_type in ('IPAddressField', 'GenericIPAddressField'): lookup = "HOST(%s)" else: lookup = "%s::text" # Use UPPER(x) for case-insensitive lookups; it's faster. if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'): lookup = 'UPPER(%s)' % lookup return lookup def last_insert_id(self, cursor, table_name, pk_name): # Use pg_get_serial_sequence to get the underlying sequence name # from the table name and column name (available since PostgreSQL 8) cursor.execute("SELECT CURRVAL(pg_get_serial_sequence('%s','%s'))" % ( self.quote_name(table_name), pk_name)) return cursor.fetchone()[0] def no_limit_value(self): return None def prepare_sql_script(self, sql): return [sql] def quote_name(self, name): if name.startswith('"') and name.endswith('"'): return name # Quoting once is enough. return '"%s"' % name def set_time_zone_sql(self): return "SET TIME ZONE %s" def sql_flush(self, style, tables, sequences, allow_cascade=False): if tables: # Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows # us to truncate tables referenced by a foreign key in any other # table. tables_sql = ', '.join( style.SQL_FIELD(self.quote_name(table)) for table in tables) if allow_cascade: sql = ['%s %s %s;' % ( style.SQL_KEYWORD('TRUNCATE'), tables_sql, style.SQL_KEYWORD('CASCADE'), )] else: sql = ['%s %s;' % ( style.SQL_KEYWORD('TRUNCATE'), tables_sql, )] sql.extend(self.sequence_reset_by_name_sql(style, sequences)) return sql else: return [] def sequence_reset_by_name_sql(self, style, sequences): # 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements # to reset sequence indices sql = [] for sequence_info in sequences: table_name = sequence_info['table'] column_name = sequence_info['column'] if not (column_name and len(column_name) > 0): # This will be the case if it's an m2m using an autogenerated # intermediate table (see BaseDatabaseIntrospection.sequence_list) column_name = 'id' sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" % (style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(self.quote_name(table_name)), style.SQL_FIELD(column_name)) ) return sql def tablespace_sql(self, tablespace, inline=False): if inline: return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace) else: return "TABLESPACE %s" % self.quote_name(tablespace) def sequence_reset_sql(self, style, model_list): from django.db import models output = [] qn = self.quote_name for model in model_list: # Use `coalesce` to set the sequence for each model to the max pk value if there are records, # or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true # if there are records (as the max pk value is already in use), otherwise set it to false. # Use pg_get_serial_sequence to get the underlying sequence name from the table name # and column name (available since PostgreSQL 8) for f in model._meta.local_fields: if isinstance(f, models.AutoField): output.append( "%s setval(pg_get_serial_sequence('%s','%s'), " "coalesce(max(%s), 1), max(%s) %s null) %s %s;" % ( style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(qn(model._meta.db_table)), style.SQL_FIELD(f.column), style.SQL_FIELD(qn(f.column)), style.SQL_FIELD(qn(f.column)), style.SQL_KEYWORD('IS NOT'), style.SQL_KEYWORD('FROM'), style.SQL_TABLE(qn(model._meta.db_table)), ) ) break # Only one AutoField is allowed per model, so don't bother continuing. for f in model._meta.many_to_many: if not f.remote_field.through: output.append( "%s setval(pg_get_serial_sequence('%s','%s'), " "coalesce(max(%s), 1), max(%s) %s null) %s %s;" % ( style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(qn(f.m2m_db_table())), style.SQL_FIELD('id'), style.SQL_FIELD(qn('id')), style.SQL_FIELD(qn('id')), style.SQL_KEYWORD('IS NOT'), style.SQL_KEYWORD('FROM'), style.SQL_TABLE(qn(f.m2m_db_table())) ) ) return output def prep_for_iexact_query(self, x): return x def max_name_length(self): """ Returns the maximum length of an identifier. Note that the maximum length of an identifier is 63 by default, but can be changed by recompiling PostgreSQL after editing the NAMEDATALEN macro in src/include/pg_config_manual.h . This implementation simply returns 63, but can easily be overridden by a custom database backend that inherits most of its behavior from this one. """ return 63 def distinct_sql(self, fields): if fields: return 'DISTINCT ON (%s)' % ', '.join(fields) else: return 'DISTINCT' def last_executed_query(self, cursor, sql, params): # http://initd.org/psycopg/docs/cursor.html#cursor.query # The query attribute is a Psycopg extension to the DB API 2.0. if cursor.query is not None: return cursor.query.decode('utf-8') return None def return_insert_id(self): return "RETURNING %s", () def bulk_insert_sql(self, fields, num_values): items_sql = "(%s)" % ", ".join(["%s"] * len(fields)) return "VALUES " + ", ".join([items_sql] * num_values) def adapt_datefield_value(self, value): return value def adapt_datetimefield_value(self, value): return value def adapt_timefield_value(self, value): return value def adapt_ipaddressfield_value(self, value): if value: return Inet(value) return None
koyuawsmbrtn/eclock
refs/heads/master
windows/Python27/Lib/xml/dom/domreg.py
238
"""Registration facilities for DOM. This module should not be used directly. Instead, the functions getDOMImplementation and registerDOMImplementation should be imported from xml.dom.""" from xml.dom.minicompat import * # isinstance, StringTypes # This is a list of well-known implementations. Well-known names # should be published by posting to [email protected], and are # subsequently recorded in this file. well_known_implementations = { 'minidom':'xml.dom.minidom', '4DOM': 'xml.dom.DOMImplementation', } # DOM implementations not officially registered should register # themselves with their registered = {} def registerDOMImplementation(name, factory): """registerDOMImplementation(name, factory) Register the factory function with the name. The factory function should return an object which implements the DOMImplementation interface. The factory function can either return the same object, or a new one (e.g. if that implementation supports some customization).""" registered[name] = factory def _good_enough(dom, features): "_good_enough(dom, features) -> Return 1 if the dom offers the features" for f,v in features: if not dom.hasFeature(f,v): return 0 return 1 def getDOMImplementation(name = None, features = ()): """getDOMImplementation(name = None, features = ()) -> DOM implementation. Return a suitable DOM implementation. The name is either well-known, the module name of a DOM implementation, or None. If it is not None, imports the corresponding module and returns DOMImplementation object if the import succeeds. If name is not given, consider the available implementations to find one with the required feature set. If no implementation can be found, raise an ImportError. The features list must be a sequence of (feature, version) pairs which are passed to hasFeature.""" import os creator = None mod = well_known_implementations.get(name) if mod: mod = __import__(mod, {}, {}, ['getDOMImplementation']) return mod.getDOMImplementation() elif name: return registered[name]() elif "PYTHON_DOM" in os.environ: return getDOMImplementation(name = os.environ["PYTHON_DOM"]) # User did not specify a name, try implementations in arbitrary # order, returning the one that has the required features if isinstance(features, StringTypes): features = _parse_feature_string(features) for creator in registered.values(): dom = creator() if _good_enough(dom, features): return dom for creator in well_known_implementations.keys(): try: dom = getDOMImplementation(name = creator) except StandardError: # typically ImportError, or AttributeError continue if _good_enough(dom, features): return dom raise ImportError,"no suitable DOM implementation found" def _parse_feature_string(s): features = [] parts = s.split() i = 0 length = len(parts) while i < length: feature = parts[i] if feature[0] in "0123456789": raise ValueError, "bad feature name: %r" % (feature,) i = i + 1 version = None if i < length: v = parts[i] if v[0] in "0123456789": i = i + 1 version = v features.append((feature, version)) return tuple(features)
Chaozz/happygg
refs/heads/master
src_tests/lib/googletest/test/gtest_env_var_test.py
343
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Verifies that Google Test correctly parses environment variables.""" __author__ = '[email protected] (Zhanyong Wan)' import os import gtest_test_utils IS_WINDOWS = os.name == 'nt' IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux' COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_') environ = os.environ.copy() def AssertEq(expected, actual): if expected != actual: print('Expected: %s' % (expected,)) print(' Actual: %s' % (actual,)) raise AssertionError def SetEnvVar(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] def GetFlag(flag): """Runs gtest_env_var_test_ and returns its output.""" args = [COMMAND] if flag is not None: args += [flag] return gtest_test_utils.Subprocess(args, env=environ).output def TestFlag(flag, test_val, default_val): """Verifies that the given flag is affected by the corresponding env var.""" env_var = 'GTEST_' + flag.upper() SetEnvVar(env_var, test_val) AssertEq(test_val, GetFlag(flag)) SetEnvVar(env_var, None) AssertEq(default_val, GetFlag(flag)) class GTestEnvVarTest(gtest_test_utils.TestCase): def testEnvVarAffectsFlag(self): """Tests that environment variable should affect the corresponding flag.""" TestFlag('break_on_failure', '1', '0') TestFlag('color', 'yes', 'auto') TestFlag('filter', 'FooTest.Bar', '*') SetEnvVar('XML_OUTPUT_FILE', None) # For 'output' test TestFlag('output', 'xml:tmp/foo.xml', '') TestFlag('print_time', '0', '1') TestFlag('repeat', '999', '1') TestFlag('throw_on_failure', '1', '0') TestFlag('death_test_style', 'threadsafe', 'fast') TestFlag('catch_exceptions', '0', '1') if IS_LINUX: TestFlag('death_test_use_fork', '1', '0') TestFlag('stack_trace_depth', '0', '100') def testXmlOutputFile(self): """Tests that $XML_OUTPUT_FILE affects the output flag.""" SetEnvVar('GTEST_OUTPUT', None) SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml') AssertEq('xml:tmp/bar.xml', GetFlag('output')) def testXmlOutputFileOverride(self): """Tests that $XML_OUTPUT_FILE is overridden by $GTEST_OUTPUT""" SetEnvVar('GTEST_OUTPUT', 'xml:tmp/foo.xml') SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml') AssertEq('xml:tmp/foo.xml', GetFlag('output')) if __name__ == '__main__': gtest_test_utils.Main()
tangyiyong/odoo
refs/heads/8.0
addons/website_event/__init__.py
1577
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import controllers import models
mrquim/repository.mrquim
refs/heads/master
repo/plugin.video.live.streamspro/pyaesnew/aes.py
177
# The MIT License (MIT) # # Copyright (c) 2014 Richard Moore # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # This is a pure-Python implementation of the AES algorithm and AES common # modes of operation. # See: https://en.wikipedia.org/wiki/Advanced_Encryption_Standard # Honestly, the best description of the modes of operations are the wonderful # diagrams on Wikipedia. They explain in moments what my words could never # achieve. Hence the inline documentation here is sparer than I'd prefer. # See: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation # Also useful, PyCrypto, a crypto library implemented in C with Python bindings: # https://www.dlitz.net/software/pycrypto/ # Supported key sizes: # 128-bit # 192-bit # 256-bit # Supported modes of operation: # ECB - Electronic Codebook # CBC - Cipher-Block Chaining # CFB - Cipher Feedback # OFB - Output Feedback # CTR - Counter # See the README.md for API details and general information. import copy import struct __all__ = ["AES", "AESModeOfOperationCTR", "AESModeOfOperationCBC", "AESModeOfOperationCFB", "AESModeOfOperationECB", "AESModeOfOperationOFB", "AESModesOfOperation", "Counter"] def _compact_word(word): return (word[0] << 24) | (word[1] << 16) | (word[2] << 8) | word[3] def _string_to_bytes(text): return list(ord(c) for c in text) def _bytes_to_string(binary): return "".join(chr(b) for b in binary) def _concat_list(a, b): return a + b # Python 3 compatibility try: xrange except Exception: xrange = range # Python 3 supports bytes, which is already an array of integers def _string_to_bytes(text): if isinstance(text, bytes): return text return [ord(c) for c in text] # In Python 3, we return bytes def _bytes_to_string(binary): return bytes(binary) # Python 3 cannot concatenate a list onto a bytes, so we bytes-ify it first def _concat_list(a, b): return a + bytes(b) # Based *largely* on the Rijndael implementation # See: http://csrc.nist.gov/publications/fips/fips197/fips-197.pdf class AES(object): '''Encapsulates the AES block cipher. You generally should not need this. Use the AESModeOfOperation classes below instead.''' # Number of rounds by keysize number_of_rounds = {16: 10, 24: 12, 32: 14} # Round constant words rcon = [ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91 ] # S-box and Inverse S-box (S is for Substitution) S = [ 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 ] Si =[ 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d ] # Transformations for encryption T1 = [ 0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d, 0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554, 0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d, 0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a, 0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87, 0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b, 0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea, 0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b, 0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a, 0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f, 0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108, 0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f, 0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e, 0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5, 0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d, 0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f, 0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e, 0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb, 0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce, 0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497, 0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c, 0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed, 0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b, 0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a, 0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16, 0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594, 0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81, 0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3, 0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a, 0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504, 0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163, 0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d, 0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f, 0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739, 0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47, 0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395, 0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f, 0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883, 0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c, 0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76, 0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e, 0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4, 0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6, 0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b, 0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7, 0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0, 0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25, 0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818, 0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72, 0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651, 0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21, 0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85, 0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa, 0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12, 0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0, 0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9, 0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133, 0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7, 0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920, 0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a, 0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17, 0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8, 0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11, 0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a ] T2 = [ 0xa5c66363, 0x84f87c7c, 0x99ee7777, 0x8df67b7b, 0x0dfff2f2, 0xbdd66b6b, 0xb1de6f6f, 0x5491c5c5, 0x50603030, 0x03020101, 0xa9ce6767, 0x7d562b2b, 0x19e7fefe, 0x62b5d7d7, 0xe64dabab, 0x9aec7676, 0x458fcaca, 0x9d1f8282, 0x4089c9c9, 0x87fa7d7d, 0x15effafa, 0xebb25959, 0xc98e4747, 0x0bfbf0f0, 0xec41adad, 0x67b3d4d4, 0xfd5fa2a2, 0xea45afaf, 0xbf239c9c, 0xf753a4a4, 0x96e47272, 0x5b9bc0c0, 0xc275b7b7, 0x1ce1fdfd, 0xae3d9393, 0x6a4c2626, 0x5a6c3636, 0x417e3f3f, 0x02f5f7f7, 0x4f83cccc, 0x5c683434, 0xf451a5a5, 0x34d1e5e5, 0x08f9f1f1, 0x93e27171, 0x73abd8d8, 0x53623131, 0x3f2a1515, 0x0c080404, 0x5295c7c7, 0x65462323, 0x5e9dc3c3, 0x28301818, 0xa1379696, 0x0f0a0505, 0xb52f9a9a, 0x090e0707, 0x36241212, 0x9b1b8080, 0x3ddfe2e2, 0x26cdebeb, 0x694e2727, 0xcd7fb2b2, 0x9fea7575, 0x1b120909, 0x9e1d8383, 0x74582c2c, 0x2e341a1a, 0x2d361b1b, 0xb2dc6e6e, 0xeeb45a5a, 0xfb5ba0a0, 0xf6a45252, 0x4d763b3b, 0x61b7d6d6, 0xce7db3b3, 0x7b522929, 0x3edde3e3, 0x715e2f2f, 0x97138484, 0xf5a65353, 0x68b9d1d1, 0x00000000, 0x2cc1eded, 0x60402020, 0x1fe3fcfc, 0xc879b1b1, 0xedb65b5b, 0xbed46a6a, 0x468dcbcb, 0xd967bebe, 0x4b723939, 0xde944a4a, 0xd4984c4c, 0xe8b05858, 0x4a85cfcf, 0x6bbbd0d0, 0x2ac5efef, 0xe54faaaa, 0x16edfbfb, 0xc5864343, 0xd79a4d4d, 0x55663333, 0x94118585, 0xcf8a4545, 0x10e9f9f9, 0x06040202, 0x81fe7f7f, 0xf0a05050, 0x44783c3c, 0xba259f9f, 0xe34ba8a8, 0xf3a25151, 0xfe5da3a3, 0xc0804040, 0x8a058f8f, 0xad3f9292, 0xbc219d9d, 0x48703838, 0x04f1f5f5, 0xdf63bcbc, 0xc177b6b6, 0x75afdada, 0x63422121, 0x30201010, 0x1ae5ffff, 0x0efdf3f3, 0x6dbfd2d2, 0x4c81cdcd, 0x14180c0c, 0x35261313, 0x2fc3ecec, 0xe1be5f5f, 0xa2359797, 0xcc884444, 0x392e1717, 0x5793c4c4, 0xf255a7a7, 0x82fc7e7e, 0x477a3d3d, 0xacc86464, 0xe7ba5d5d, 0x2b321919, 0x95e67373, 0xa0c06060, 0x98198181, 0xd19e4f4f, 0x7fa3dcdc, 0x66442222, 0x7e542a2a, 0xab3b9090, 0x830b8888, 0xca8c4646, 0x29c7eeee, 0xd36bb8b8, 0x3c281414, 0x79a7dede, 0xe2bc5e5e, 0x1d160b0b, 0x76addbdb, 0x3bdbe0e0, 0x56643232, 0x4e743a3a, 0x1e140a0a, 0xdb924949, 0x0a0c0606, 0x6c482424, 0xe4b85c5c, 0x5d9fc2c2, 0x6ebdd3d3, 0xef43acac, 0xa6c46262, 0xa8399191, 0xa4319595, 0x37d3e4e4, 0x8bf27979, 0x32d5e7e7, 0x438bc8c8, 0x596e3737, 0xb7da6d6d, 0x8c018d8d, 0x64b1d5d5, 0xd29c4e4e, 0xe049a9a9, 0xb4d86c6c, 0xfaac5656, 0x07f3f4f4, 0x25cfeaea, 0xafca6565, 0x8ef47a7a, 0xe947aeae, 0x18100808, 0xd56fbaba, 0x88f07878, 0x6f4a2525, 0x725c2e2e, 0x24381c1c, 0xf157a6a6, 0xc773b4b4, 0x5197c6c6, 0x23cbe8e8, 0x7ca1dddd, 0x9ce87474, 0x213e1f1f, 0xdd964b4b, 0xdc61bdbd, 0x860d8b8b, 0x850f8a8a, 0x90e07070, 0x427c3e3e, 0xc471b5b5, 0xaacc6666, 0xd8904848, 0x05060303, 0x01f7f6f6, 0x121c0e0e, 0xa3c26161, 0x5f6a3535, 0xf9ae5757, 0xd069b9b9, 0x91178686, 0x5899c1c1, 0x273a1d1d, 0xb9279e9e, 0x38d9e1e1, 0x13ebf8f8, 0xb32b9898, 0x33221111, 0xbbd26969, 0x70a9d9d9, 0x89078e8e, 0xa7339494, 0xb62d9b9b, 0x223c1e1e, 0x92158787, 0x20c9e9e9, 0x4987cece, 0xffaa5555, 0x78502828, 0x7aa5dfdf, 0x8f038c8c, 0xf859a1a1, 0x80098989, 0x171a0d0d, 0xda65bfbf, 0x31d7e6e6, 0xc6844242, 0xb8d06868, 0xc3824141, 0xb0299999, 0x775a2d2d, 0x111e0f0f, 0xcb7bb0b0, 0xfca85454, 0xd66dbbbb, 0x3a2c1616 ] T3 = [ 0x63a5c663, 0x7c84f87c, 0x7799ee77, 0x7b8df67b, 0xf20dfff2, 0x6bbdd66b, 0x6fb1de6f, 0xc55491c5, 0x30506030, 0x01030201, 0x67a9ce67, 0x2b7d562b, 0xfe19e7fe, 0xd762b5d7, 0xabe64dab, 0x769aec76, 0xca458fca, 0x829d1f82, 0xc94089c9, 0x7d87fa7d, 0xfa15effa, 0x59ebb259, 0x47c98e47, 0xf00bfbf0, 0xadec41ad, 0xd467b3d4, 0xa2fd5fa2, 0xafea45af, 0x9cbf239c, 0xa4f753a4, 0x7296e472, 0xc05b9bc0, 0xb7c275b7, 0xfd1ce1fd, 0x93ae3d93, 0x266a4c26, 0x365a6c36, 0x3f417e3f, 0xf702f5f7, 0xcc4f83cc, 0x345c6834, 0xa5f451a5, 0xe534d1e5, 0xf108f9f1, 0x7193e271, 0xd873abd8, 0x31536231, 0x153f2a15, 0x040c0804, 0xc75295c7, 0x23654623, 0xc35e9dc3, 0x18283018, 0x96a13796, 0x050f0a05, 0x9ab52f9a, 0x07090e07, 0x12362412, 0x809b1b80, 0xe23ddfe2, 0xeb26cdeb, 0x27694e27, 0xb2cd7fb2, 0x759fea75, 0x091b1209, 0x839e1d83, 0x2c74582c, 0x1a2e341a, 0x1b2d361b, 0x6eb2dc6e, 0x5aeeb45a, 0xa0fb5ba0, 0x52f6a452, 0x3b4d763b, 0xd661b7d6, 0xb3ce7db3, 0x297b5229, 0xe33edde3, 0x2f715e2f, 0x84971384, 0x53f5a653, 0xd168b9d1, 0x00000000, 0xed2cc1ed, 0x20604020, 0xfc1fe3fc, 0xb1c879b1, 0x5bedb65b, 0x6abed46a, 0xcb468dcb, 0xbed967be, 0x394b7239, 0x4ade944a, 0x4cd4984c, 0x58e8b058, 0xcf4a85cf, 0xd06bbbd0, 0xef2ac5ef, 0xaae54faa, 0xfb16edfb, 0x43c58643, 0x4dd79a4d, 0x33556633, 0x85941185, 0x45cf8a45, 0xf910e9f9, 0x02060402, 0x7f81fe7f, 0x50f0a050, 0x3c44783c, 0x9fba259f, 0xa8e34ba8, 0x51f3a251, 0xa3fe5da3, 0x40c08040, 0x8f8a058f, 0x92ad3f92, 0x9dbc219d, 0x38487038, 0xf504f1f5, 0xbcdf63bc, 0xb6c177b6, 0xda75afda, 0x21634221, 0x10302010, 0xff1ae5ff, 0xf30efdf3, 0xd26dbfd2, 0xcd4c81cd, 0x0c14180c, 0x13352613, 0xec2fc3ec, 0x5fe1be5f, 0x97a23597, 0x44cc8844, 0x17392e17, 0xc45793c4, 0xa7f255a7, 0x7e82fc7e, 0x3d477a3d, 0x64acc864, 0x5de7ba5d, 0x192b3219, 0x7395e673, 0x60a0c060, 0x81981981, 0x4fd19e4f, 0xdc7fa3dc, 0x22664422, 0x2a7e542a, 0x90ab3b90, 0x88830b88, 0x46ca8c46, 0xee29c7ee, 0xb8d36bb8, 0x143c2814, 0xde79a7de, 0x5ee2bc5e, 0x0b1d160b, 0xdb76addb, 0xe03bdbe0, 0x32566432, 0x3a4e743a, 0x0a1e140a, 0x49db9249, 0x060a0c06, 0x246c4824, 0x5ce4b85c, 0xc25d9fc2, 0xd36ebdd3, 0xacef43ac, 0x62a6c462, 0x91a83991, 0x95a43195, 0xe437d3e4, 0x798bf279, 0xe732d5e7, 0xc8438bc8, 0x37596e37, 0x6db7da6d, 0x8d8c018d, 0xd564b1d5, 0x4ed29c4e, 0xa9e049a9, 0x6cb4d86c, 0x56faac56, 0xf407f3f4, 0xea25cfea, 0x65afca65, 0x7a8ef47a, 0xaee947ae, 0x08181008, 0xbad56fba, 0x7888f078, 0x256f4a25, 0x2e725c2e, 0x1c24381c, 0xa6f157a6, 0xb4c773b4, 0xc65197c6, 0xe823cbe8, 0xdd7ca1dd, 0x749ce874, 0x1f213e1f, 0x4bdd964b, 0xbddc61bd, 0x8b860d8b, 0x8a850f8a, 0x7090e070, 0x3e427c3e, 0xb5c471b5, 0x66aacc66, 0x48d89048, 0x03050603, 0xf601f7f6, 0x0e121c0e, 0x61a3c261, 0x355f6a35, 0x57f9ae57, 0xb9d069b9, 0x86911786, 0xc15899c1, 0x1d273a1d, 0x9eb9279e, 0xe138d9e1, 0xf813ebf8, 0x98b32b98, 0x11332211, 0x69bbd269, 0xd970a9d9, 0x8e89078e, 0x94a73394, 0x9bb62d9b, 0x1e223c1e, 0x87921587, 0xe920c9e9, 0xce4987ce, 0x55ffaa55, 0x28785028, 0xdf7aa5df, 0x8c8f038c, 0xa1f859a1, 0x89800989, 0x0d171a0d, 0xbfda65bf, 0xe631d7e6, 0x42c68442, 0x68b8d068, 0x41c38241, 0x99b02999, 0x2d775a2d, 0x0f111e0f, 0xb0cb7bb0, 0x54fca854, 0xbbd66dbb, 0x163a2c16 ] T4 = [ 0x6363a5c6, 0x7c7c84f8, 0x777799ee, 0x7b7b8df6, 0xf2f20dff, 0x6b6bbdd6, 0x6f6fb1de, 0xc5c55491, 0x30305060, 0x01010302, 0x6767a9ce, 0x2b2b7d56, 0xfefe19e7, 0xd7d762b5, 0xababe64d, 0x76769aec, 0xcaca458f, 0x82829d1f, 0xc9c94089, 0x7d7d87fa, 0xfafa15ef, 0x5959ebb2, 0x4747c98e, 0xf0f00bfb, 0xadadec41, 0xd4d467b3, 0xa2a2fd5f, 0xafafea45, 0x9c9cbf23, 0xa4a4f753, 0x727296e4, 0xc0c05b9b, 0xb7b7c275, 0xfdfd1ce1, 0x9393ae3d, 0x26266a4c, 0x36365a6c, 0x3f3f417e, 0xf7f702f5, 0xcccc4f83, 0x34345c68, 0xa5a5f451, 0xe5e534d1, 0xf1f108f9, 0x717193e2, 0xd8d873ab, 0x31315362, 0x15153f2a, 0x04040c08, 0xc7c75295, 0x23236546, 0xc3c35e9d, 0x18182830, 0x9696a137, 0x05050f0a, 0x9a9ab52f, 0x0707090e, 0x12123624, 0x80809b1b, 0xe2e23ddf, 0xebeb26cd, 0x2727694e, 0xb2b2cd7f, 0x75759fea, 0x09091b12, 0x83839e1d, 0x2c2c7458, 0x1a1a2e34, 0x1b1b2d36, 0x6e6eb2dc, 0x5a5aeeb4, 0xa0a0fb5b, 0x5252f6a4, 0x3b3b4d76, 0xd6d661b7, 0xb3b3ce7d, 0x29297b52, 0xe3e33edd, 0x2f2f715e, 0x84849713, 0x5353f5a6, 0xd1d168b9, 0x00000000, 0xeded2cc1, 0x20206040, 0xfcfc1fe3, 0xb1b1c879, 0x5b5bedb6, 0x6a6abed4, 0xcbcb468d, 0xbebed967, 0x39394b72, 0x4a4ade94, 0x4c4cd498, 0x5858e8b0, 0xcfcf4a85, 0xd0d06bbb, 0xefef2ac5, 0xaaaae54f, 0xfbfb16ed, 0x4343c586, 0x4d4dd79a, 0x33335566, 0x85859411, 0x4545cf8a, 0xf9f910e9, 0x02020604, 0x7f7f81fe, 0x5050f0a0, 0x3c3c4478, 0x9f9fba25, 0xa8a8e34b, 0x5151f3a2, 0xa3a3fe5d, 0x4040c080, 0x8f8f8a05, 0x9292ad3f, 0x9d9dbc21, 0x38384870, 0xf5f504f1, 0xbcbcdf63, 0xb6b6c177, 0xdada75af, 0x21216342, 0x10103020, 0xffff1ae5, 0xf3f30efd, 0xd2d26dbf, 0xcdcd4c81, 0x0c0c1418, 0x13133526, 0xecec2fc3, 0x5f5fe1be, 0x9797a235, 0x4444cc88, 0x1717392e, 0xc4c45793, 0xa7a7f255, 0x7e7e82fc, 0x3d3d477a, 0x6464acc8, 0x5d5de7ba, 0x19192b32, 0x737395e6, 0x6060a0c0, 0x81819819, 0x4f4fd19e, 0xdcdc7fa3, 0x22226644, 0x2a2a7e54, 0x9090ab3b, 0x8888830b, 0x4646ca8c, 0xeeee29c7, 0xb8b8d36b, 0x14143c28, 0xdede79a7, 0x5e5ee2bc, 0x0b0b1d16, 0xdbdb76ad, 0xe0e03bdb, 0x32325664, 0x3a3a4e74, 0x0a0a1e14, 0x4949db92, 0x06060a0c, 0x24246c48, 0x5c5ce4b8, 0xc2c25d9f, 0xd3d36ebd, 0xacacef43, 0x6262a6c4, 0x9191a839, 0x9595a431, 0xe4e437d3, 0x79798bf2, 0xe7e732d5, 0xc8c8438b, 0x3737596e, 0x6d6db7da, 0x8d8d8c01, 0xd5d564b1, 0x4e4ed29c, 0xa9a9e049, 0x6c6cb4d8, 0x5656faac, 0xf4f407f3, 0xeaea25cf, 0x6565afca, 0x7a7a8ef4, 0xaeaee947, 0x08081810, 0xbabad56f, 0x787888f0, 0x25256f4a, 0x2e2e725c, 0x1c1c2438, 0xa6a6f157, 0xb4b4c773, 0xc6c65197, 0xe8e823cb, 0xdddd7ca1, 0x74749ce8, 0x1f1f213e, 0x4b4bdd96, 0xbdbddc61, 0x8b8b860d, 0x8a8a850f, 0x707090e0, 0x3e3e427c, 0xb5b5c471, 0x6666aacc, 0x4848d890, 0x03030506, 0xf6f601f7, 0x0e0e121c, 0x6161a3c2, 0x35355f6a, 0x5757f9ae, 0xb9b9d069, 0x86869117, 0xc1c15899, 0x1d1d273a, 0x9e9eb927, 0xe1e138d9, 0xf8f813eb, 0x9898b32b, 0x11113322, 0x6969bbd2, 0xd9d970a9, 0x8e8e8907, 0x9494a733, 0x9b9bb62d, 0x1e1e223c, 0x87879215, 0xe9e920c9, 0xcece4987, 0x5555ffaa, 0x28287850, 0xdfdf7aa5, 0x8c8c8f03, 0xa1a1f859, 0x89898009, 0x0d0d171a, 0xbfbfda65, 0xe6e631d7, 0x4242c684, 0x6868b8d0, 0x4141c382, 0x9999b029, 0x2d2d775a, 0x0f0f111e, 0xb0b0cb7b, 0x5454fca8, 0xbbbbd66d, 0x16163a2c ] # Transformations for decryption T5 = [ 0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96, 0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393, 0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25, 0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f, 0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1, 0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6, 0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da, 0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844, 0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd, 0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4, 0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45, 0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94, 0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7, 0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a, 0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5, 0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c, 0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1, 0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a, 0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75, 0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051, 0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46, 0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff, 0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77, 0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb, 0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000, 0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e, 0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927, 0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a, 0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e, 0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16, 0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d, 0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8, 0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd, 0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34, 0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163, 0xd731dcca, 0x42638510, 0x13972240, 0x84c61120, 0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d, 0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0, 0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422, 0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef, 0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36, 0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4, 0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662, 0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5, 0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3, 0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b, 0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8, 0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6, 0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6, 0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0, 0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815, 0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f, 0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df, 0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f, 0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e, 0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713, 0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89, 0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c, 0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf, 0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86, 0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f, 0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541, 0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190, 0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742 ] T6 = [ 0x5051f4a7, 0x537e4165, 0xc31a17a4, 0x963a275e, 0xcb3bab6b, 0xf11f9d45, 0xabacfa58, 0x934be303, 0x552030fa, 0xf6ad766d, 0x9188cc76, 0x25f5024c, 0xfc4fe5d7, 0xd7c52acb, 0x80263544, 0x8fb562a3, 0x49deb15a, 0x6725ba1b, 0x9845ea0e, 0xe15dfec0, 0x02c32f75, 0x12814cf0, 0xa38d4697, 0xc66bd3f9, 0xe7038f5f, 0x9515929c, 0xebbf6d7a, 0xda955259, 0x2dd4be83, 0xd3587421, 0x2949e069, 0x448ec9c8, 0x6a75c289, 0x78f48e79, 0x6b99583e, 0xdd27b971, 0xb6bee14f, 0x17f088ad, 0x66c920ac, 0xb47dce3a, 0x1863df4a, 0x82e51a31, 0x60975133, 0x4562537f, 0xe0b16477, 0x84bb6bae, 0x1cfe81a0, 0x94f9082b, 0x58704868, 0x198f45fd, 0x8794de6c, 0xb7527bf8, 0x23ab73d3, 0xe2724b02, 0x57e31f8f, 0x2a6655ab, 0x07b2eb28, 0x032fb5c2, 0x9a86c57b, 0xa5d33708, 0xf2302887, 0xb223bfa5, 0xba02036a, 0x5ced1682, 0x2b8acf1c, 0x92a779b4, 0xf0f307f2, 0xa14e69e2, 0xcd65daf4, 0xd50605be, 0x1fd13462, 0x8ac4a6fe, 0x9d342e53, 0xa0a2f355, 0x32058ae1, 0x75a4f6eb, 0x390b83ec, 0xaa4060ef, 0x065e719f, 0x51bd6e10, 0xf93e218a, 0x3d96dd06, 0xaedd3e05, 0x464de6bd, 0xb591548d, 0x0571c45d, 0x6f0406d4, 0xff605015, 0x241998fb, 0x97d6bde9, 0xcc894043, 0x7767d99e, 0xbdb0e842, 0x8807898b, 0x38e7195b, 0xdb79c8ee, 0x47a17c0a, 0xe97c420f, 0xc9f8841e, 0x00000000, 0x83098086, 0x48322bed, 0xac1e1170, 0x4e6c5a72, 0xfbfd0eff, 0x560f8538, 0x1e3daed5, 0x27362d39, 0x640a0fd9, 0x21685ca6, 0xd19b5b54, 0x3a24362e, 0xb10c0a67, 0x0f9357e7, 0xd2b4ee96, 0x9e1b9b91, 0x4f80c0c5, 0xa261dc20, 0x695a774b, 0x161c121a, 0x0ae293ba, 0xe5c0a02a, 0x433c22e0, 0x1d121b17, 0x0b0e090d, 0xadf28bc7, 0xb92db6a8, 0xc8141ea9, 0x8557f119, 0x4caf7507, 0xbbee99dd, 0xfda37f60, 0x9ff70126, 0xbc5c72f5, 0xc544663b, 0x345bfb7e, 0x768b4329, 0xdccb23c6, 0x68b6edfc, 0x63b8e4f1, 0xcad731dc, 0x10426385, 0x40139722, 0x2084c611, 0x7d854a24, 0xf8d2bb3d, 0x11aef932, 0x6dc729a1, 0x4b1d9e2f, 0xf3dcb230, 0xec0d8652, 0xd077c1e3, 0x6c2bb316, 0x99a970b9, 0xfa119448, 0x2247e964, 0xc4a8fc8c, 0x1aa0f03f, 0xd8567d2c, 0xef223390, 0xc787494e, 0xc1d938d1, 0xfe8ccaa2, 0x3698d40b, 0xcfa6f581, 0x28a57ade, 0x26dab78e, 0xa43fadbf, 0xe42c3a9d, 0x0d507892, 0x9b6a5fcc, 0x62547e46, 0xc2f68d13, 0xe890d8b8, 0x5e2e39f7, 0xf582c3af, 0xbe9f5d80, 0x7c69d093, 0xa96fd52d, 0xb3cf2512, 0x3bc8ac99, 0xa710187d, 0x6ee89c63, 0x7bdb3bbb, 0x09cd2678, 0xf46e5918, 0x01ec9ab7, 0xa8834f9a, 0x65e6956e, 0x7eaaffe6, 0x0821bccf, 0xe6ef15e8, 0xd9bae79b, 0xce4a6f36, 0xd4ea9f09, 0xd629b07c, 0xaf31a4b2, 0x312a3f23, 0x30c6a594, 0xc035a266, 0x37744ebc, 0xa6fc82ca, 0xb0e090d0, 0x1533a7d8, 0x4af10498, 0xf741ecda, 0x0e7fcd50, 0x2f1791f6, 0x8d764dd6, 0x4d43efb0, 0x54ccaa4d, 0xdfe49604, 0xe39ed1b5, 0x1b4c6a88, 0xb8c12c1f, 0x7f466551, 0x049d5eea, 0x5d018c35, 0x73fa8774, 0x2efb0b41, 0x5ab3671d, 0x5292dbd2, 0x33e91056, 0x136dd647, 0x8c9ad761, 0x7a37a10c, 0x8e59f814, 0x89eb133c, 0xeecea927, 0x35b761c9, 0xede11ce5, 0x3c7a47b1, 0x599cd2df, 0x3f55f273, 0x791814ce, 0xbf73c737, 0xea53f7cd, 0x5b5ffdaa, 0x14df3d6f, 0x867844db, 0x81caaff3, 0x3eb968c4, 0x2c382434, 0x5fc2a340, 0x72161dc3, 0x0cbce225, 0x8b283c49, 0x41ff0d95, 0x7139a801, 0xde080cb3, 0x9cd8b4e4, 0x906456c1, 0x617bcb84, 0x70d532b6, 0x74486c5c, 0x42d0b857 ] T7 = [ 0xa75051f4, 0x65537e41, 0xa4c31a17, 0x5e963a27, 0x6bcb3bab, 0x45f11f9d, 0x58abacfa, 0x03934be3, 0xfa552030, 0x6df6ad76, 0x769188cc, 0x4c25f502, 0xd7fc4fe5, 0xcbd7c52a, 0x44802635, 0xa38fb562, 0x5a49deb1, 0x1b6725ba, 0x0e9845ea, 0xc0e15dfe, 0x7502c32f, 0xf012814c, 0x97a38d46, 0xf9c66bd3, 0x5fe7038f, 0x9c951592, 0x7aebbf6d, 0x59da9552, 0x832dd4be, 0x21d35874, 0x692949e0, 0xc8448ec9, 0x896a75c2, 0x7978f48e, 0x3e6b9958, 0x71dd27b9, 0x4fb6bee1, 0xad17f088, 0xac66c920, 0x3ab47dce, 0x4a1863df, 0x3182e51a, 0x33609751, 0x7f456253, 0x77e0b164, 0xae84bb6b, 0xa01cfe81, 0x2b94f908, 0x68587048, 0xfd198f45, 0x6c8794de, 0xf8b7527b, 0xd323ab73, 0x02e2724b, 0x8f57e31f, 0xab2a6655, 0x2807b2eb, 0xc2032fb5, 0x7b9a86c5, 0x08a5d337, 0x87f23028, 0xa5b223bf, 0x6aba0203, 0x825ced16, 0x1c2b8acf, 0xb492a779, 0xf2f0f307, 0xe2a14e69, 0xf4cd65da, 0xbed50605, 0x621fd134, 0xfe8ac4a6, 0x539d342e, 0x55a0a2f3, 0xe132058a, 0xeb75a4f6, 0xec390b83, 0xefaa4060, 0x9f065e71, 0x1051bd6e, 0x8af93e21, 0x063d96dd, 0x05aedd3e, 0xbd464de6, 0x8db59154, 0x5d0571c4, 0xd46f0406, 0x15ff6050, 0xfb241998, 0xe997d6bd, 0x43cc8940, 0x9e7767d9, 0x42bdb0e8, 0x8b880789, 0x5b38e719, 0xeedb79c8, 0x0a47a17c, 0x0fe97c42, 0x1ec9f884, 0x00000000, 0x86830980, 0xed48322b, 0x70ac1e11, 0x724e6c5a, 0xfffbfd0e, 0x38560f85, 0xd51e3dae, 0x3927362d, 0xd9640a0f, 0xa621685c, 0x54d19b5b, 0x2e3a2436, 0x67b10c0a, 0xe70f9357, 0x96d2b4ee, 0x919e1b9b, 0xc54f80c0, 0x20a261dc, 0x4b695a77, 0x1a161c12, 0xba0ae293, 0x2ae5c0a0, 0xe0433c22, 0x171d121b, 0x0d0b0e09, 0xc7adf28b, 0xa8b92db6, 0xa9c8141e, 0x198557f1, 0x074caf75, 0xddbbee99, 0x60fda37f, 0x269ff701, 0xf5bc5c72, 0x3bc54466, 0x7e345bfb, 0x29768b43, 0xc6dccb23, 0xfc68b6ed, 0xf163b8e4, 0xdccad731, 0x85104263, 0x22401397, 0x112084c6, 0x247d854a, 0x3df8d2bb, 0x3211aef9, 0xa16dc729, 0x2f4b1d9e, 0x30f3dcb2, 0x52ec0d86, 0xe3d077c1, 0x166c2bb3, 0xb999a970, 0x48fa1194, 0x642247e9, 0x8cc4a8fc, 0x3f1aa0f0, 0x2cd8567d, 0x90ef2233, 0x4ec78749, 0xd1c1d938, 0xa2fe8cca, 0x0b3698d4, 0x81cfa6f5, 0xde28a57a, 0x8e26dab7, 0xbfa43fad, 0x9de42c3a, 0x920d5078, 0xcc9b6a5f, 0x4662547e, 0x13c2f68d, 0xb8e890d8, 0xf75e2e39, 0xaff582c3, 0x80be9f5d, 0x937c69d0, 0x2da96fd5, 0x12b3cf25, 0x993bc8ac, 0x7da71018, 0x636ee89c, 0xbb7bdb3b, 0x7809cd26, 0x18f46e59, 0xb701ec9a, 0x9aa8834f, 0x6e65e695, 0xe67eaaff, 0xcf0821bc, 0xe8e6ef15, 0x9bd9bae7, 0x36ce4a6f, 0x09d4ea9f, 0x7cd629b0, 0xb2af31a4, 0x23312a3f, 0x9430c6a5, 0x66c035a2, 0xbc37744e, 0xcaa6fc82, 0xd0b0e090, 0xd81533a7, 0x984af104, 0xdaf741ec, 0x500e7fcd, 0xf62f1791, 0xd68d764d, 0xb04d43ef, 0x4d54ccaa, 0x04dfe496, 0xb5e39ed1, 0x881b4c6a, 0x1fb8c12c, 0x517f4665, 0xea049d5e, 0x355d018c, 0x7473fa87, 0x412efb0b, 0x1d5ab367, 0xd25292db, 0x5633e910, 0x47136dd6, 0x618c9ad7, 0x0c7a37a1, 0x148e59f8, 0x3c89eb13, 0x27eecea9, 0xc935b761, 0xe5ede11c, 0xb13c7a47, 0xdf599cd2, 0x733f55f2, 0xce791814, 0x37bf73c7, 0xcdea53f7, 0xaa5b5ffd, 0x6f14df3d, 0xdb867844, 0xf381caaf, 0xc43eb968, 0x342c3824, 0x405fc2a3, 0xc372161d, 0x250cbce2, 0x498b283c, 0x9541ff0d, 0x017139a8, 0xb3de080c, 0xe49cd8b4, 0xc1906456, 0x84617bcb, 0xb670d532, 0x5c74486c, 0x5742d0b8 ] T8 = [ 0xf4a75051, 0x4165537e, 0x17a4c31a, 0x275e963a, 0xab6bcb3b, 0x9d45f11f, 0xfa58abac, 0xe303934b, 0x30fa5520, 0x766df6ad, 0xcc769188, 0x024c25f5, 0xe5d7fc4f, 0x2acbd7c5, 0x35448026, 0x62a38fb5, 0xb15a49de, 0xba1b6725, 0xea0e9845, 0xfec0e15d, 0x2f7502c3, 0x4cf01281, 0x4697a38d, 0xd3f9c66b, 0x8f5fe703, 0x929c9515, 0x6d7aebbf, 0x5259da95, 0xbe832dd4, 0x7421d358, 0xe0692949, 0xc9c8448e, 0xc2896a75, 0x8e7978f4, 0x583e6b99, 0xb971dd27, 0xe14fb6be, 0x88ad17f0, 0x20ac66c9, 0xce3ab47d, 0xdf4a1863, 0x1a3182e5, 0x51336097, 0x537f4562, 0x6477e0b1, 0x6bae84bb, 0x81a01cfe, 0x082b94f9, 0x48685870, 0x45fd198f, 0xde6c8794, 0x7bf8b752, 0x73d323ab, 0x4b02e272, 0x1f8f57e3, 0x55ab2a66, 0xeb2807b2, 0xb5c2032f, 0xc57b9a86, 0x3708a5d3, 0x2887f230, 0xbfa5b223, 0x036aba02, 0x16825ced, 0xcf1c2b8a, 0x79b492a7, 0x07f2f0f3, 0x69e2a14e, 0xdaf4cd65, 0x05bed506, 0x34621fd1, 0xa6fe8ac4, 0x2e539d34, 0xf355a0a2, 0x8ae13205, 0xf6eb75a4, 0x83ec390b, 0x60efaa40, 0x719f065e, 0x6e1051bd, 0x218af93e, 0xdd063d96, 0x3e05aedd, 0xe6bd464d, 0x548db591, 0xc45d0571, 0x06d46f04, 0x5015ff60, 0x98fb2419, 0xbde997d6, 0x4043cc89, 0xd99e7767, 0xe842bdb0, 0x898b8807, 0x195b38e7, 0xc8eedb79, 0x7c0a47a1, 0x420fe97c, 0x841ec9f8, 0x00000000, 0x80868309, 0x2bed4832, 0x1170ac1e, 0x5a724e6c, 0x0efffbfd, 0x8538560f, 0xaed51e3d, 0x2d392736, 0x0fd9640a, 0x5ca62168, 0x5b54d19b, 0x362e3a24, 0x0a67b10c, 0x57e70f93, 0xee96d2b4, 0x9b919e1b, 0xc0c54f80, 0xdc20a261, 0x774b695a, 0x121a161c, 0x93ba0ae2, 0xa02ae5c0, 0x22e0433c, 0x1b171d12, 0x090d0b0e, 0x8bc7adf2, 0xb6a8b92d, 0x1ea9c814, 0xf1198557, 0x75074caf, 0x99ddbbee, 0x7f60fda3, 0x01269ff7, 0x72f5bc5c, 0x663bc544, 0xfb7e345b, 0x4329768b, 0x23c6dccb, 0xedfc68b6, 0xe4f163b8, 0x31dccad7, 0x63851042, 0x97224013, 0xc6112084, 0x4a247d85, 0xbb3df8d2, 0xf93211ae, 0x29a16dc7, 0x9e2f4b1d, 0xb230f3dc, 0x8652ec0d, 0xc1e3d077, 0xb3166c2b, 0x70b999a9, 0x9448fa11, 0xe9642247, 0xfc8cc4a8, 0xf03f1aa0, 0x7d2cd856, 0x3390ef22, 0x494ec787, 0x38d1c1d9, 0xcaa2fe8c, 0xd40b3698, 0xf581cfa6, 0x7ade28a5, 0xb78e26da, 0xadbfa43f, 0x3a9de42c, 0x78920d50, 0x5fcc9b6a, 0x7e466254, 0x8d13c2f6, 0xd8b8e890, 0x39f75e2e, 0xc3aff582, 0x5d80be9f, 0xd0937c69, 0xd52da96f, 0x2512b3cf, 0xac993bc8, 0x187da710, 0x9c636ee8, 0x3bbb7bdb, 0x267809cd, 0x5918f46e, 0x9ab701ec, 0x4f9aa883, 0x956e65e6, 0xffe67eaa, 0xbccf0821, 0x15e8e6ef, 0xe79bd9ba, 0x6f36ce4a, 0x9f09d4ea, 0xb07cd629, 0xa4b2af31, 0x3f23312a, 0xa59430c6, 0xa266c035, 0x4ebc3774, 0x82caa6fc, 0x90d0b0e0, 0xa7d81533, 0x04984af1, 0xecdaf741, 0xcd500e7f, 0x91f62f17, 0x4dd68d76, 0xefb04d43, 0xaa4d54cc, 0x9604dfe4, 0xd1b5e39e, 0x6a881b4c, 0x2c1fb8c1, 0x65517f46, 0x5eea049d, 0x8c355d01, 0x877473fa, 0x0b412efb, 0x671d5ab3, 0xdbd25292, 0x105633e9, 0xd647136d, 0xd7618c9a, 0xa10c7a37, 0xf8148e59, 0x133c89eb, 0xa927eece, 0x61c935b7, 0x1ce5ede1, 0x47b13c7a, 0xd2df599c, 0xf2733f55, 0x14ce7918, 0xc737bf73, 0xf7cdea53, 0xfdaa5b5f, 0x3d6f14df, 0x44db8678, 0xaff381ca, 0x68c43eb9, 0x24342c38, 0xa3405fc2, 0x1dc37216, 0xe2250cbc, 0x3c498b28, 0x0d9541ff, 0xa8017139, 0x0cb3de08, 0xb4e49cd8, 0x56c19064, 0xcb84617b, 0x32b670d5, 0x6c5c7448, 0xb85742d0 ] # Transformations for decryption key expansion U1 = [ 0x00000000, 0x0e090d0b, 0x1c121a16, 0x121b171d, 0x3824342c, 0x362d3927, 0x24362e3a, 0x2a3f2331, 0x70486858, 0x7e416553, 0x6c5a724e, 0x62537f45, 0x486c5c74, 0x4665517f, 0x547e4662, 0x5a774b69, 0xe090d0b0, 0xee99ddbb, 0xfc82caa6, 0xf28bc7ad, 0xd8b4e49c, 0xd6bde997, 0xc4a6fe8a, 0xcaaff381, 0x90d8b8e8, 0x9ed1b5e3, 0x8ccaa2fe, 0x82c3aff5, 0xa8fc8cc4, 0xa6f581cf, 0xb4ee96d2, 0xbae79bd9, 0xdb3bbb7b, 0xd532b670, 0xc729a16d, 0xc920ac66, 0xe31f8f57, 0xed16825c, 0xff0d9541, 0xf104984a, 0xab73d323, 0xa57ade28, 0xb761c935, 0xb968c43e, 0x9357e70f, 0x9d5eea04, 0x8f45fd19, 0x814cf012, 0x3bab6bcb, 0x35a266c0, 0x27b971dd, 0x29b07cd6, 0x038f5fe7, 0x0d8652ec, 0x1f9d45f1, 0x119448fa, 0x4be30393, 0x45ea0e98, 0x57f11985, 0x59f8148e, 0x73c737bf, 0x7dce3ab4, 0x6fd52da9, 0x61dc20a2, 0xad766df6, 0xa37f60fd, 0xb16477e0, 0xbf6d7aeb, 0x955259da, 0x9b5b54d1, 0x894043cc, 0x87494ec7, 0xdd3e05ae, 0xd33708a5, 0xc12c1fb8, 0xcf2512b3, 0xe51a3182, 0xeb133c89, 0xf9082b94, 0xf701269f, 0x4de6bd46, 0x43efb04d, 0x51f4a750, 0x5ffdaa5b, 0x75c2896a, 0x7bcb8461, 0x69d0937c, 0x67d99e77, 0x3daed51e, 0x33a7d815, 0x21bccf08, 0x2fb5c203, 0x058ae132, 0x0b83ec39, 0x1998fb24, 0x1791f62f, 0x764dd68d, 0x7844db86, 0x6a5fcc9b, 0x6456c190, 0x4e69e2a1, 0x4060efaa, 0x527bf8b7, 0x5c72f5bc, 0x0605bed5, 0x080cb3de, 0x1a17a4c3, 0x141ea9c8, 0x3e218af9, 0x302887f2, 0x223390ef, 0x2c3a9de4, 0x96dd063d, 0x98d40b36, 0x8acf1c2b, 0x84c61120, 0xaef93211, 0xa0f03f1a, 0xb2eb2807, 0xbce2250c, 0xe6956e65, 0xe89c636e, 0xfa877473, 0xf48e7978, 0xdeb15a49, 0xd0b85742, 0xc2a3405f, 0xccaa4d54, 0x41ecdaf7, 0x4fe5d7fc, 0x5dfec0e1, 0x53f7cdea, 0x79c8eedb, 0x77c1e3d0, 0x65daf4cd, 0x6bd3f9c6, 0x31a4b2af, 0x3fadbfa4, 0x2db6a8b9, 0x23bfa5b2, 0x09808683, 0x07898b88, 0x15929c95, 0x1b9b919e, 0xa17c0a47, 0xaf75074c, 0xbd6e1051, 0xb3671d5a, 0x99583e6b, 0x97513360, 0x854a247d, 0x8b432976, 0xd134621f, 0xdf3d6f14, 0xcd267809, 0xc32f7502, 0xe9105633, 0xe7195b38, 0xf5024c25, 0xfb0b412e, 0x9ad7618c, 0x94de6c87, 0x86c57b9a, 0x88cc7691, 0xa2f355a0, 0xacfa58ab, 0xbee14fb6, 0xb0e842bd, 0xea9f09d4, 0xe49604df, 0xf68d13c2, 0xf8841ec9, 0xd2bb3df8, 0xdcb230f3, 0xcea927ee, 0xc0a02ae5, 0x7a47b13c, 0x744ebc37, 0x6655ab2a, 0x685ca621, 0x42638510, 0x4c6a881b, 0x5e719f06, 0x5078920d, 0x0a0fd964, 0x0406d46f, 0x161dc372, 0x1814ce79, 0x322bed48, 0x3c22e043, 0x2e39f75e, 0x2030fa55, 0xec9ab701, 0xe293ba0a, 0xf088ad17, 0xfe81a01c, 0xd4be832d, 0xdab78e26, 0xc8ac993b, 0xc6a59430, 0x9cd2df59, 0x92dbd252, 0x80c0c54f, 0x8ec9c844, 0xa4f6eb75, 0xaaffe67e, 0xb8e4f163, 0xb6edfc68, 0x0c0a67b1, 0x02036aba, 0x10187da7, 0x1e1170ac, 0x342e539d, 0x3a275e96, 0x283c498b, 0x26354480, 0x7c420fe9, 0x724b02e2, 0x605015ff, 0x6e5918f4, 0x44663bc5, 0x4a6f36ce, 0x587421d3, 0x567d2cd8, 0x37a10c7a, 0x39a80171, 0x2bb3166c, 0x25ba1b67, 0x0f853856, 0x018c355d, 0x13972240, 0x1d9e2f4b, 0x47e96422, 0x49e06929, 0x5bfb7e34, 0x55f2733f, 0x7fcd500e, 0x71c45d05, 0x63df4a18, 0x6dd64713, 0xd731dcca, 0xd938d1c1, 0xcb23c6dc, 0xc52acbd7, 0xef15e8e6, 0xe11ce5ed, 0xf307f2f0, 0xfd0efffb, 0xa779b492, 0xa970b999, 0xbb6bae84, 0xb562a38f, 0x9f5d80be, 0x91548db5, 0x834f9aa8, 0x8d4697a3 ] U2 = [ 0x00000000, 0x0b0e090d, 0x161c121a, 0x1d121b17, 0x2c382434, 0x27362d39, 0x3a24362e, 0x312a3f23, 0x58704868, 0x537e4165, 0x4e6c5a72, 0x4562537f, 0x74486c5c, 0x7f466551, 0x62547e46, 0x695a774b, 0xb0e090d0, 0xbbee99dd, 0xa6fc82ca, 0xadf28bc7, 0x9cd8b4e4, 0x97d6bde9, 0x8ac4a6fe, 0x81caaff3, 0xe890d8b8, 0xe39ed1b5, 0xfe8ccaa2, 0xf582c3af, 0xc4a8fc8c, 0xcfa6f581, 0xd2b4ee96, 0xd9bae79b, 0x7bdb3bbb, 0x70d532b6, 0x6dc729a1, 0x66c920ac, 0x57e31f8f, 0x5ced1682, 0x41ff0d95, 0x4af10498, 0x23ab73d3, 0x28a57ade, 0x35b761c9, 0x3eb968c4, 0x0f9357e7, 0x049d5eea, 0x198f45fd, 0x12814cf0, 0xcb3bab6b, 0xc035a266, 0xdd27b971, 0xd629b07c, 0xe7038f5f, 0xec0d8652, 0xf11f9d45, 0xfa119448, 0x934be303, 0x9845ea0e, 0x8557f119, 0x8e59f814, 0xbf73c737, 0xb47dce3a, 0xa96fd52d, 0xa261dc20, 0xf6ad766d, 0xfda37f60, 0xe0b16477, 0xebbf6d7a, 0xda955259, 0xd19b5b54, 0xcc894043, 0xc787494e, 0xaedd3e05, 0xa5d33708, 0xb8c12c1f, 0xb3cf2512, 0x82e51a31, 0x89eb133c, 0x94f9082b, 0x9ff70126, 0x464de6bd, 0x4d43efb0, 0x5051f4a7, 0x5b5ffdaa, 0x6a75c289, 0x617bcb84, 0x7c69d093, 0x7767d99e, 0x1e3daed5, 0x1533a7d8, 0x0821bccf, 0x032fb5c2, 0x32058ae1, 0x390b83ec, 0x241998fb, 0x2f1791f6, 0x8d764dd6, 0x867844db, 0x9b6a5fcc, 0x906456c1, 0xa14e69e2, 0xaa4060ef, 0xb7527bf8, 0xbc5c72f5, 0xd50605be, 0xde080cb3, 0xc31a17a4, 0xc8141ea9, 0xf93e218a, 0xf2302887, 0xef223390, 0xe42c3a9d, 0x3d96dd06, 0x3698d40b, 0x2b8acf1c, 0x2084c611, 0x11aef932, 0x1aa0f03f, 0x07b2eb28, 0x0cbce225, 0x65e6956e, 0x6ee89c63, 0x73fa8774, 0x78f48e79, 0x49deb15a, 0x42d0b857, 0x5fc2a340, 0x54ccaa4d, 0xf741ecda, 0xfc4fe5d7, 0xe15dfec0, 0xea53f7cd, 0xdb79c8ee, 0xd077c1e3, 0xcd65daf4, 0xc66bd3f9, 0xaf31a4b2, 0xa43fadbf, 0xb92db6a8, 0xb223bfa5, 0x83098086, 0x8807898b, 0x9515929c, 0x9e1b9b91, 0x47a17c0a, 0x4caf7507, 0x51bd6e10, 0x5ab3671d, 0x6b99583e, 0x60975133, 0x7d854a24, 0x768b4329, 0x1fd13462, 0x14df3d6f, 0x09cd2678, 0x02c32f75, 0x33e91056, 0x38e7195b, 0x25f5024c, 0x2efb0b41, 0x8c9ad761, 0x8794de6c, 0x9a86c57b, 0x9188cc76, 0xa0a2f355, 0xabacfa58, 0xb6bee14f, 0xbdb0e842, 0xd4ea9f09, 0xdfe49604, 0xc2f68d13, 0xc9f8841e, 0xf8d2bb3d, 0xf3dcb230, 0xeecea927, 0xe5c0a02a, 0x3c7a47b1, 0x37744ebc, 0x2a6655ab, 0x21685ca6, 0x10426385, 0x1b4c6a88, 0x065e719f, 0x0d507892, 0x640a0fd9, 0x6f0406d4, 0x72161dc3, 0x791814ce, 0x48322bed, 0x433c22e0, 0x5e2e39f7, 0x552030fa, 0x01ec9ab7, 0x0ae293ba, 0x17f088ad, 0x1cfe81a0, 0x2dd4be83, 0x26dab78e, 0x3bc8ac99, 0x30c6a594, 0x599cd2df, 0x5292dbd2, 0x4f80c0c5, 0x448ec9c8, 0x75a4f6eb, 0x7eaaffe6, 0x63b8e4f1, 0x68b6edfc, 0xb10c0a67, 0xba02036a, 0xa710187d, 0xac1e1170, 0x9d342e53, 0x963a275e, 0x8b283c49, 0x80263544, 0xe97c420f, 0xe2724b02, 0xff605015, 0xf46e5918, 0xc544663b, 0xce4a6f36, 0xd3587421, 0xd8567d2c, 0x7a37a10c, 0x7139a801, 0x6c2bb316, 0x6725ba1b, 0x560f8538, 0x5d018c35, 0x40139722, 0x4b1d9e2f, 0x2247e964, 0x2949e069, 0x345bfb7e, 0x3f55f273, 0x0e7fcd50, 0x0571c45d, 0x1863df4a, 0x136dd647, 0xcad731dc, 0xc1d938d1, 0xdccb23c6, 0xd7c52acb, 0xe6ef15e8, 0xede11ce5, 0xf0f307f2, 0xfbfd0eff, 0x92a779b4, 0x99a970b9, 0x84bb6bae, 0x8fb562a3, 0xbe9f5d80, 0xb591548d, 0xa8834f9a, 0xa38d4697 ] U3 = [ 0x00000000, 0x0d0b0e09, 0x1a161c12, 0x171d121b, 0x342c3824, 0x3927362d, 0x2e3a2436, 0x23312a3f, 0x68587048, 0x65537e41, 0x724e6c5a, 0x7f456253, 0x5c74486c, 0x517f4665, 0x4662547e, 0x4b695a77, 0xd0b0e090, 0xddbbee99, 0xcaa6fc82, 0xc7adf28b, 0xe49cd8b4, 0xe997d6bd, 0xfe8ac4a6, 0xf381caaf, 0xb8e890d8, 0xb5e39ed1, 0xa2fe8cca, 0xaff582c3, 0x8cc4a8fc, 0x81cfa6f5, 0x96d2b4ee, 0x9bd9bae7, 0xbb7bdb3b, 0xb670d532, 0xa16dc729, 0xac66c920, 0x8f57e31f, 0x825ced16, 0x9541ff0d, 0x984af104, 0xd323ab73, 0xde28a57a, 0xc935b761, 0xc43eb968, 0xe70f9357, 0xea049d5e, 0xfd198f45, 0xf012814c, 0x6bcb3bab, 0x66c035a2, 0x71dd27b9, 0x7cd629b0, 0x5fe7038f, 0x52ec0d86, 0x45f11f9d, 0x48fa1194, 0x03934be3, 0x0e9845ea, 0x198557f1, 0x148e59f8, 0x37bf73c7, 0x3ab47dce, 0x2da96fd5, 0x20a261dc, 0x6df6ad76, 0x60fda37f, 0x77e0b164, 0x7aebbf6d, 0x59da9552, 0x54d19b5b, 0x43cc8940, 0x4ec78749, 0x05aedd3e, 0x08a5d337, 0x1fb8c12c, 0x12b3cf25, 0x3182e51a, 0x3c89eb13, 0x2b94f908, 0x269ff701, 0xbd464de6, 0xb04d43ef, 0xa75051f4, 0xaa5b5ffd, 0x896a75c2, 0x84617bcb, 0x937c69d0, 0x9e7767d9, 0xd51e3dae, 0xd81533a7, 0xcf0821bc, 0xc2032fb5, 0xe132058a, 0xec390b83, 0xfb241998, 0xf62f1791, 0xd68d764d, 0xdb867844, 0xcc9b6a5f, 0xc1906456, 0xe2a14e69, 0xefaa4060, 0xf8b7527b, 0xf5bc5c72, 0xbed50605, 0xb3de080c, 0xa4c31a17, 0xa9c8141e, 0x8af93e21, 0x87f23028, 0x90ef2233, 0x9de42c3a, 0x063d96dd, 0x0b3698d4, 0x1c2b8acf, 0x112084c6, 0x3211aef9, 0x3f1aa0f0, 0x2807b2eb, 0x250cbce2, 0x6e65e695, 0x636ee89c, 0x7473fa87, 0x7978f48e, 0x5a49deb1, 0x5742d0b8, 0x405fc2a3, 0x4d54ccaa, 0xdaf741ec, 0xd7fc4fe5, 0xc0e15dfe, 0xcdea53f7, 0xeedb79c8, 0xe3d077c1, 0xf4cd65da, 0xf9c66bd3, 0xb2af31a4, 0xbfa43fad, 0xa8b92db6, 0xa5b223bf, 0x86830980, 0x8b880789, 0x9c951592, 0x919e1b9b, 0x0a47a17c, 0x074caf75, 0x1051bd6e, 0x1d5ab367, 0x3e6b9958, 0x33609751, 0x247d854a, 0x29768b43, 0x621fd134, 0x6f14df3d, 0x7809cd26, 0x7502c32f, 0x5633e910, 0x5b38e719, 0x4c25f502, 0x412efb0b, 0x618c9ad7, 0x6c8794de, 0x7b9a86c5, 0x769188cc, 0x55a0a2f3, 0x58abacfa, 0x4fb6bee1, 0x42bdb0e8, 0x09d4ea9f, 0x04dfe496, 0x13c2f68d, 0x1ec9f884, 0x3df8d2bb, 0x30f3dcb2, 0x27eecea9, 0x2ae5c0a0, 0xb13c7a47, 0xbc37744e, 0xab2a6655, 0xa621685c, 0x85104263, 0x881b4c6a, 0x9f065e71, 0x920d5078, 0xd9640a0f, 0xd46f0406, 0xc372161d, 0xce791814, 0xed48322b, 0xe0433c22, 0xf75e2e39, 0xfa552030, 0xb701ec9a, 0xba0ae293, 0xad17f088, 0xa01cfe81, 0x832dd4be, 0x8e26dab7, 0x993bc8ac, 0x9430c6a5, 0xdf599cd2, 0xd25292db, 0xc54f80c0, 0xc8448ec9, 0xeb75a4f6, 0xe67eaaff, 0xf163b8e4, 0xfc68b6ed, 0x67b10c0a, 0x6aba0203, 0x7da71018, 0x70ac1e11, 0x539d342e, 0x5e963a27, 0x498b283c, 0x44802635, 0x0fe97c42, 0x02e2724b, 0x15ff6050, 0x18f46e59, 0x3bc54466, 0x36ce4a6f, 0x21d35874, 0x2cd8567d, 0x0c7a37a1, 0x017139a8, 0x166c2bb3, 0x1b6725ba, 0x38560f85, 0x355d018c, 0x22401397, 0x2f4b1d9e, 0x642247e9, 0x692949e0, 0x7e345bfb, 0x733f55f2, 0x500e7fcd, 0x5d0571c4, 0x4a1863df, 0x47136dd6, 0xdccad731, 0xd1c1d938, 0xc6dccb23, 0xcbd7c52a, 0xe8e6ef15, 0xe5ede11c, 0xf2f0f307, 0xfffbfd0e, 0xb492a779, 0xb999a970, 0xae84bb6b, 0xa38fb562, 0x80be9f5d, 0x8db59154, 0x9aa8834f, 0x97a38d46 ] U4 = [ 0x00000000, 0x090d0b0e, 0x121a161c, 0x1b171d12, 0x24342c38, 0x2d392736, 0x362e3a24, 0x3f23312a, 0x48685870, 0x4165537e, 0x5a724e6c, 0x537f4562, 0x6c5c7448, 0x65517f46, 0x7e466254, 0x774b695a, 0x90d0b0e0, 0x99ddbbee, 0x82caa6fc, 0x8bc7adf2, 0xb4e49cd8, 0xbde997d6, 0xa6fe8ac4, 0xaff381ca, 0xd8b8e890, 0xd1b5e39e, 0xcaa2fe8c, 0xc3aff582, 0xfc8cc4a8, 0xf581cfa6, 0xee96d2b4, 0xe79bd9ba, 0x3bbb7bdb, 0x32b670d5, 0x29a16dc7, 0x20ac66c9, 0x1f8f57e3, 0x16825ced, 0x0d9541ff, 0x04984af1, 0x73d323ab, 0x7ade28a5, 0x61c935b7, 0x68c43eb9, 0x57e70f93, 0x5eea049d, 0x45fd198f, 0x4cf01281, 0xab6bcb3b, 0xa266c035, 0xb971dd27, 0xb07cd629, 0x8f5fe703, 0x8652ec0d, 0x9d45f11f, 0x9448fa11, 0xe303934b, 0xea0e9845, 0xf1198557, 0xf8148e59, 0xc737bf73, 0xce3ab47d, 0xd52da96f, 0xdc20a261, 0x766df6ad, 0x7f60fda3, 0x6477e0b1, 0x6d7aebbf, 0x5259da95, 0x5b54d19b, 0x4043cc89, 0x494ec787, 0x3e05aedd, 0x3708a5d3, 0x2c1fb8c1, 0x2512b3cf, 0x1a3182e5, 0x133c89eb, 0x082b94f9, 0x01269ff7, 0xe6bd464d, 0xefb04d43, 0xf4a75051, 0xfdaa5b5f, 0xc2896a75, 0xcb84617b, 0xd0937c69, 0xd99e7767, 0xaed51e3d, 0xa7d81533, 0xbccf0821, 0xb5c2032f, 0x8ae13205, 0x83ec390b, 0x98fb2419, 0x91f62f17, 0x4dd68d76, 0x44db8678, 0x5fcc9b6a, 0x56c19064, 0x69e2a14e, 0x60efaa40, 0x7bf8b752, 0x72f5bc5c, 0x05bed506, 0x0cb3de08, 0x17a4c31a, 0x1ea9c814, 0x218af93e, 0x2887f230, 0x3390ef22, 0x3a9de42c, 0xdd063d96, 0xd40b3698, 0xcf1c2b8a, 0xc6112084, 0xf93211ae, 0xf03f1aa0, 0xeb2807b2, 0xe2250cbc, 0x956e65e6, 0x9c636ee8, 0x877473fa, 0x8e7978f4, 0xb15a49de, 0xb85742d0, 0xa3405fc2, 0xaa4d54cc, 0xecdaf741, 0xe5d7fc4f, 0xfec0e15d, 0xf7cdea53, 0xc8eedb79, 0xc1e3d077, 0xdaf4cd65, 0xd3f9c66b, 0xa4b2af31, 0xadbfa43f, 0xb6a8b92d, 0xbfa5b223, 0x80868309, 0x898b8807, 0x929c9515, 0x9b919e1b, 0x7c0a47a1, 0x75074caf, 0x6e1051bd, 0x671d5ab3, 0x583e6b99, 0x51336097, 0x4a247d85, 0x4329768b, 0x34621fd1, 0x3d6f14df, 0x267809cd, 0x2f7502c3, 0x105633e9, 0x195b38e7, 0x024c25f5, 0x0b412efb, 0xd7618c9a, 0xde6c8794, 0xc57b9a86, 0xcc769188, 0xf355a0a2, 0xfa58abac, 0xe14fb6be, 0xe842bdb0, 0x9f09d4ea, 0x9604dfe4, 0x8d13c2f6, 0x841ec9f8, 0xbb3df8d2, 0xb230f3dc, 0xa927eece, 0xa02ae5c0, 0x47b13c7a, 0x4ebc3774, 0x55ab2a66, 0x5ca62168, 0x63851042, 0x6a881b4c, 0x719f065e, 0x78920d50, 0x0fd9640a, 0x06d46f04, 0x1dc37216, 0x14ce7918, 0x2bed4832, 0x22e0433c, 0x39f75e2e, 0x30fa5520, 0x9ab701ec, 0x93ba0ae2, 0x88ad17f0, 0x81a01cfe, 0xbe832dd4, 0xb78e26da, 0xac993bc8, 0xa59430c6, 0xd2df599c, 0xdbd25292, 0xc0c54f80, 0xc9c8448e, 0xf6eb75a4, 0xffe67eaa, 0xe4f163b8, 0xedfc68b6, 0x0a67b10c, 0x036aba02, 0x187da710, 0x1170ac1e, 0x2e539d34, 0x275e963a, 0x3c498b28, 0x35448026, 0x420fe97c, 0x4b02e272, 0x5015ff60, 0x5918f46e, 0x663bc544, 0x6f36ce4a, 0x7421d358, 0x7d2cd856, 0xa10c7a37, 0xa8017139, 0xb3166c2b, 0xba1b6725, 0x8538560f, 0x8c355d01, 0x97224013, 0x9e2f4b1d, 0xe9642247, 0xe0692949, 0xfb7e345b, 0xf2733f55, 0xcd500e7f, 0xc45d0571, 0xdf4a1863, 0xd647136d, 0x31dccad7, 0x38d1c1d9, 0x23c6dccb, 0x2acbd7c5, 0x15e8e6ef, 0x1ce5ede1, 0x07f2f0f3, 0x0efffbfd, 0x79b492a7, 0x70b999a9, 0x6bae84bb, 0x62a38fb5, 0x5d80be9f, 0x548db591, 0x4f9aa883, 0x4697a38d ] def __init__(self, key): if len(key) not in (16, 24, 32): raise ValueError('Invalid key size') rounds = self.number_of_rounds[len(key)] # Encryption round keys self._Ke = [[0] * 4 for i in xrange(rounds + 1)] # Decryption round keys self._Kd = [[0] * 4 for i in xrange(rounds + 1)] round_key_count = (rounds + 1) * 4 KC = len(key) // 4 # Convert the key into ints tk = [ struct.unpack('>i', key[i:i + 4])[0] for i in xrange(0, len(key), 4) ] # Copy values into round key arrays for i in xrange(0, KC): self._Ke[i // 4][i % 4] = tk[i] self._Kd[rounds - (i // 4)][i % 4] = tk[i] # Key expansion (fips-197 section 5.2) rconpointer = 0 t = KC while t < round_key_count: tt = tk[KC - 1] tk[0] ^= ((self.S[(tt >> 16) & 0xFF] << 24) ^ (self.S[(tt >> 8) & 0xFF] << 16) ^ (self.S[ tt & 0xFF] << 8) ^ self.S[(tt >> 24) & 0xFF] ^ (self.rcon[rconpointer] << 24)) rconpointer += 1 if KC != 8: for i in xrange(1, KC): tk[i] ^= tk[i - 1] # Key expansion for 256-bit keys is "slightly different" (fips-197) else: for i in xrange(1, KC // 2): tk[i] ^= tk[i - 1] tt = tk[KC // 2 - 1] tk[KC // 2] ^= (self.S[ tt & 0xFF] ^ (self.S[(tt >> 8) & 0xFF] << 8) ^ (self.S[(tt >> 16) & 0xFF] << 16) ^ (self.S[(tt >> 24) & 0xFF] << 24)) for i in xrange(KC // 2 + 1, KC): tk[i] ^= tk[i - 1] # Copy values into round key arrays j = 0 while j < KC and t < round_key_count: self._Ke[t // 4][t % 4] = tk[j] self._Kd[rounds - (t // 4)][t % 4] = tk[j] j += 1 t += 1 # Inverse-Cipher-ify the decryption round key (fips-197 section 5.3) for r in xrange(1, rounds): for j in xrange(0, 4): tt = self._Kd[r][j] self._Kd[r][j] = (self.U1[(tt >> 24) & 0xFF] ^ self.U2[(tt >> 16) & 0xFF] ^ self.U3[(tt >> 8) & 0xFF] ^ self.U4[ tt & 0xFF]) def encrypt(self, plaintext): 'Encrypt a block of plain text using the AES block cipher.' if len(plaintext) != 16: raise ValueError('wrong block length') rounds = len(self._Ke) - 1 (s1, s2, s3) = [1, 2, 3] a = [0, 0, 0, 0] # Convert plaintext to (ints ^ key) t = [(_compact_word(plaintext[4 * i:4 * i + 4]) ^ self._Ke[0][i]) for i in xrange(0, 4)] # Apply round transforms for r in xrange(1, rounds): for i in xrange(0, 4): a[i] = (self.T1[(t[ i ] >> 24) & 0xFF] ^ self.T2[(t[(i + s1) % 4] >> 16) & 0xFF] ^ self.T3[(t[(i + s2) % 4] >> 8) & 0xFF] ^ self.T4[ t[(i + s3) % 4] & 0xFF] ^ self._Ke[r][i]) t = copy.copy(a) # The last round is special result = [ ] for i in xrange(0, 4): tt = self._Ke[rounds][i] result.append((self.S[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF) result.append((self.S[(t[(i + s1) % 4] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF) result.append((self.S[(t[(i + s2) % 4] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF) result.append((self.S[ t[(i + s3) % 4] & 0xFF] ^ tt ) & 0xFF) return result def decrypt(self, ciphertext): 'Decrypt a block of cipher text using the AES block cipher.' if len(ciphertext) != 16: raise ValueError('wrong block length') rounds = len(self._Kd) - 1 (s1, s2, s3) = [3, 2, 1] a = [0, 0, 0, 0] # Convert ciphertext to (ints ^ key) t = [(_compact_word(ciphertext[4 * i:4 * i + 4]) ^ self._Kd[0][i]) for i in xrange(0, 4)] # Apply round transforms for r in xrange(1, rounds): for i in xrange(0, 4): a[i] = (self.T5[(t[ i ] >> 24) & 0xFF] ^ self.T6[(t[(i + s1) % 4] >> 16) & 0xFF] ^ self.T7[(t[(i + s2) % 4] >> 8) & 0xFF] ^ self.T8[ t[(i + s3) % 4] & 0xFF] ^ self._Kd[r][i]) t = copy.copy(a) # The last round is special result = [ ] for i in xrange(0, 4): tt = self._Kd[rounds][i] result.append((self.Si[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF) result.append((self.Si[(t[(i + s1) % 4] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF) result.append((self.Si[(t[(i + s2) % 4] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF) result.append((self.Si[ t[(i + s3) % 4] & 0xFF] ^ tt ) & 0xFF) return result class Counter(object): '''A counter object for the Counter (CTR) mode of operation. To create a custom counter, you can usually just override the increment method.''' def __init__(self, initial_value = 1): # Convert the value into an array of bytes long self._counter = [ ((initial_value >> i) % 256) for i in xrange(128 - 8, -1, -8) ] value = property(lambda s: s._counter) def increment(self): '''Increment the counter (overflow rolls back to 0).''' for i in xrange(len(self._counter) - 1, -1, -1): self._counter[i] += 1 if self._counter[i] < 256: break # Carry the one self._counter[i] = 0 # Overflow else: self._counter = [ 0 ] * len(self._counter) class AESBlockModeOfOperation(object): '''Super-class for AES modes of operation that require blocks.''' def __init__(self, key): self._aes = AES(key) def decrypt(self, ciphertext): raise Exception('not implemented') def encrypt(self, plaintext): raise Exception('not implemented') class AESStreamModeOfOperation(AESBlockModeOfOperation): '''Super-class for AES modes of operation that are stream-ciphers.''' class AESSegmentModeOfOperation(AESStreamModeOfOperation): '''Super-class for AES modes of operation that segment data.''' segment_bytes = 16 class AESModeOfOperationECB(AESBlockModeOfOperation): '''AES Electronic Codebook Mode of Operation. o Block-cipher, so data must be padded to 16 byte boundaries Security Notes: o This mode is not recommended o Any two identical blocks produce identical encrypted values, exposing data patterns. (See the image of Tux on wikipedia) Also see: o https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Electronic_codebook_.28ECB.29 o See NIST SP800-38A (http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf); section 6.1''' name = "Electronic Codebook (ECB)" def encrypt(self, plaintext): if len(plaintext) != 16: raise ValueError('plaintext block must be 16 bytes') plaintext = _string_to_bytes(plaintext) return _bytes_to_string(self._aes.encrypt(plaintext)) def decrypt(self, ciphertext): if len(ciphertext) != 16: raise ValueError('ciphertext block must be 16 bytes') ciphertext = _string_to_bytes(ciphertext) return _bytes_to_string(self._aes.decrypt(ciphertext)) class AESModeOfOperationCBC(AESBlockModeOfOperation): '''AES Cipher-Block Chaining Mode of Operation. o The Initialization Vector (IV) o Block-cipher, so data must be padded to 16 byte boundaries o An incorrect initialization vector will only cause the first block to be corrupt; all other blocks will be intact o A corrupt bit in the cipher text will cause a block to be corrupted, and the next block to be inverted, but all other blocks will be intact. Security Notes: o This method (and CTR) ARE recommended. Also see: o https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher-block_chaining_.28CBC.29 o See NIST SP800-38A (http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf); section 6.2''' name = "Cipher-Block Chaining (CBC)" def __init__(self, key, iv = None): if iv is None: self._last_cipherblock = [ 0 ] * 16 elif len(iv) != 16: raise ValueError('initialization vector must be 16 bytes') else: self._last_cipherblock = _string_to_bytes(iv) AESBlockModeOfOperation.__init__(self, key) def encrypt(self, plaintext): if len(plaintext) != 16: raise ValueError('plaintext block must be 16 bytes') plaintext = _string_to_bytes(plaintext) precipherblock = [ (p ^ l) for (p, l) in zip(plaintext, self._last_cipherblock) ] self._last_cipherblock = self._aes.encrypt(precipherblock) return _bytes_to_string(self._last_cipherblock) def decrypt(self, ciphertext): if len(ciphertext) != 16: raise ValueError('ciphertext block must be 16 bytes') cipherblock = _string_to_bytes(ciphertext) plaintext = [ (p ^ l) for (p, l) in zip(self._aes.decrypt(cipherblock), self._last_cipherblock) ] self._last_cipherblock = cipherblock return _bytes_to_string(plaintext) class AESModeOfOperationCFB(AESSegmentModeOfOperation): '''AES Cipher Feedback Mode of Operation. o A stream-cipher, so input does not need to be padded to blocks, but does need to be padded to segment_size Also see: o https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_feedback_.28CFB.29 o See NIST SP800-38A (http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf); section 6.3''' name = "Cipher Feedback (CFB)" def __init__(self, key, iv, segment_size = 1): if segment_size == 0: segment_size = 1 if iv is None: self._shift_register = [ 0 ] * 16 elif len(iv) != 16: raise ValueError('initialization vector must be 16 bytes') else: self._shift_register = _string_to_bytes(iv) self._segment_bytes = segment_size AESBlockModeOfOperation.__init__(self, key) segment_bytes = property(lambda s: s._segment_bytes) def encrypt(self, plaintext): if len(plaintext) % self._segment_bytes != 0: raise ValueError('plaintext block must be a multiple of segment_size') plaintext = _string_to_bytes(plaintext) # Break block into segments encrypted = [ ] for i in xrange(0, len(plaintext), self._segment_bytes): plaintext_segment = plaintext[i: i + self._segment_bytes] xor_segment = self._aes.encrypt(self._shift_register)[:len(plaintext_segment)] cipher_segment = [ (p ^ x) for (p, x) in zip(plaintext_segment, xor_segment) ] # Shift the top bits out and the ciphertext in self._shift_register = _concat_list(self._shift_register[len(cipher_segment):], cipher_segment) encrypted.extend(cipher_segment) return _bytes_to_string(encrypted) def decrypt(self, ciphertext): if len(ciphertext) % self._segment_bytes != 0: raise ValueError('ciphertext block must be a multiple of segment_size') ciphertext = _string_to_bytes(ciphertext) # Break block into segments decrypted = [ ] for i in xrange(0, len(ciphertext), self._segment_bytes): cipher_segment = ciphertext[i: i + self._segment_bytes] xor_segment = self._aes.encrypt(self._shift_register)[:len(cipher_segment)] plaintext_segment = [ (p ^ x) for (p, x) in zip(cipher_segment, xor_segment) ] # Shift the top bits out and the ciphertext in self._shift_register = _concat_list(self._shift_register[len(cipher_segment):], cipher_segment) decrypted.extend(plaintext_segment) return _bytes_to_string(decrypted) class AESModeOfOperationOFB(AESStreamModeOfOperation): '''AES Output Feedback Mode of Operation. o A stream-cipher, so input does not need to be padded to blocks, allowing arbitrary length data. o A bit twiddled in the cipher text, twiddles the same bit in the same bit in the plain text, which can be useful for error correction techniques. Also see: o https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Output_feedback_.28OFB.29 o See NIST SP800-38A (http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf); section 6.4''' name = "Output Feedback (OFB)" def __init__(self, key, iv = None): if iv is None: self._last_precipherblock = [ 0 ] * 16 elif len(iv) != 16: raise ValueError('initialization vector must be 16 bytes') else: self._last_precipherblock = _string_to_bytes(iv) self._remaining_block = [ ] AESBlockModeOfOperation.__init__(self, key) def encrypt(self, plaintext): encrypted = [ ] for p in _string_to_bytes(plaintext): if len(self._remaining_block) == 0: self._remaining_block = self._aes.encrypt(self._last_precipherblock) self._last_precipherblock = [ ] precipherbyte = self._remaining_block.pop(0) self._last_precipherblock.append(precipherbyte) cipherbyte = p ^ precipherbyte encrypted.append(cipherbyte) return _bytes_to_string(encrypted) def decrypt(self, ciphertext): # AES-OFB is symetric return self.encrypt(ciphertext) class AESModeOfOperationCTR(AESStreamModeOfOperation): '''AES Counter Mode of Operation. o A stream-cipher, so input does not need to be padded to blocks, allowing arbitrary length data. o The counter must be the same size as the key size (ie. len(key)) o Each block independant of the other, so a corrupt byte will not damage future blocks. o Each block has a uniue counter value associated with it, which contributes to the encrypted value, so no data patterns are leaked. o Also known as: Counter Mode (CM), Integer Counter Mode (ICM) and Segmented Integer Counter (SIC Security Notes: o This method (and CBC) ARE recommended. o Each message block is associated with a counter value which must be unique for ALL messages with the same key. Otherwise security may be compromised. Also see: o https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Counter_.28CTR.29 o See NIST SP800-38A (http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf); section 6.5 and Appendix B for managing the initial counter''' name = "Counter (CTR)" def __init__(self, key, counter = None): AESBlockModeOfOperation.__init__(self, key) if counter is None: counter = Counter() self._counter = counter self._remaining_counter = [ ] def encrypt(self, plaintext): while len(self._remaining_counter) < len(plaintext): self._remaining_counter += self._aes.encrypt(self._counter.value) self._counter.increment() plaintext = _string_to_bytes(plaintext) encrypted = [ (p ^ c) for (p, c) in zip(plaintext, self._remaining_counter) ] self._remaining_counter = self._remaining_counter[len(encrypted):] return _bytes_to_string(encrypted) def decrypt(self, crypttext): # AES-CTR is symetric return self.encrypt(crypttext) # Simple lookup table for each mode AESModesOfOperation = dict( ctr = AESModeOfOperationCTR, cbc = AESModeOfOperationCBC, cfb = AESModeOfOperationCFB, ecb = AESModeOfOperationECB, ofb = AESModeOfOperationOFB, )
frappe/shopping_cart
refs/heads/develop
shopping_cart/templates/utils.py
8
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import json import frappe from frappe import _ from frappe.utils import cint, formatdate from frappe import _ def get_transaction_list(doctype, start, additional_fields=None): # find customer id customer = frappe.db.get_value("Contact", {"email_id": frappe.session.user}, "customer") if customer: if additional_fields: additional_fields = ", " + ", ".join(("`%s`" % f for f in additional_fields)) else: additional_fields = "" transactions = frappe.db.sql("""select name, creation, currency, grand_total_export %s from `tab%s` where customer=%s and docstatus=1 order by creation desc limit %s, 20""" % (additional_fields, doctype, "%s", "%s"), (customer, cint(start)), as_dict=True) for doc in transactions: items = frappe.db.sql_list("""select item_name from `tab%s Item` where parent=%s limit 6""" % (doctype, "%s"), doc.name) doc.items = ", ".join(items[:5]) + ("..." if (len(items) > 5) else "") doc.creation = formatdate(doc.creation) return transactions else: return [] def get_currency_context(): return { "global_number_format": frappe.db.get_default("number_format") or "#,###.##", "currency": frappe.db.get_default("currency"), "currency_symbols": json.dumps(dict(frappe.db.sql("""select name, symbol from tabCurrency where ifnull(enabled,0)=1"""))) } def get_transaction_context(doctype, name): customer = frappe.db.get_value("Contact", {"email_id": frappe.session.user}, "customer") doc = frappe.get_doc(doctype, name) if doc.customer != customer: return { "doc": frappe._dict({"name": _("Not Allowed")}) } else: return { "doc": doc }
natefoo/pulsar
refs/heads/master
test/persistence_test.py
2
from contextlib import contextmanager from os.path import exists, join import time from pulsar.managers.queued import QueueManager from pulsar.managers.stateful import StatefulManagerProxy from pulsar.tools.authorization import get_authorizer from .test_utils import ( temp_directory, TestDependencyManager ) from galaxy.job_metrics import NULL_JOB_INSTRUMENTER from galaxy.util.bunch import Bunch TEST_JOB_ID = "4" TEST_STAGED_FILE = "cow" TEST_COMMAND_TOUCH_FILE = "ran" def test_launched_job_recovery(): """Tests persistence and recovery of launched managers jobs.""" with _app() as app: staging_directory = app.staging_directory queue1 = StatefulManagerProxy(QueueManager('test', app, num_concurrent_jobs=0)) job_id = queue1.setup_job(TEST_JOB_ID, 'tool1', '1.0.0') touch_file = join(staging_directory, TEST_COMMAND_TOUCH_FILE) queue1.preprocess_and_launch(job_id, {"command_line": 'touch %s' % touch_file}) time.sleep(.4) assert not exists(touch_file) queue1.shutdown() _setup_manager_that_executes(app) assert exists(touch_file) def test_preprocessing_job_recovery(): """Tests persistence and recovery of preprocessing managers jobs (clean).""" with _app() as app: _setup_job_with_unexecuted_preprocessing_directive(app) staging_directory = app.staging_directory staged_file = join(staging_directory, TEST_JOB_ID, "inputs", TEST_STAGED_FILE) touch_file = join(staging_directory, TEST_COMMAND_TOUCH_FILE) # File shouldn't have been staged because we hacked stateful proxy manager to not # run preprocess. assert not exists(staged_file) _setup_manager_that_preprocesses(app) assert exists(staged_file) assert not exists(touch_file) _setup_manager_that_executes(app) assert exists(touch_file) def test_preprocessing_job_recovery_dirty(): """Tests persistence and recovery of preprocessing managers jobs (dirty).""" # Same test as above, but simulating existing files from a previous partial # preprocess. with _app() as app: _setup_job_with_unexecuted_preprocessing_directive(app) staging_directory = app.staging_directory staged_file = join(staging_directory, TEST_JOB_ID, "inputs", TEST_STAGED_FILE) touch_file = join(staging_directory, TEST_COMMAND_TOUCH_FILE) # File shouldn't have been staged because we hacked stateful proxy manager to not # run preprocess. assert not exists(staged_file) # write out partial contents, make sure preprocess writes over this with the correct # contents. open(staged_file, "wb").write(b"co") _setup_manager_that_preprocesses(app) assert exists(staged_file) assert open(staged_file, "rb").read() == b"cow file" assert not exists(touch_file) _setup_manager_that_executes(app) assert exists(touch_file) def _setup_manager_that_preprocesses(app): # Setup a manager that will preprocess the job but won't execute it. # Now start a real stateful manager proxy and watch the file get staged. queue2 = StatefulManagerProxy(QueueManager('test', app, num_concurrent_jobs=0)) try: queue2.recover_active_jobs() time.sleep(1) finally: try: queue2.shutdown() except Exception: pass def _setup_job_with_unexecuted_preprocessing_directive(app): staging_directory = app.staging_directory queue1 = DoesntPreprocessStatefulManagerProxy(QueueManager('test', app, num_concurrent_jobs=0)) job_id = queue1.setup_job(TEST_JOB_ID, 'tool1', '1.0.0') action = {"name": TEST_STAGED_FILE, "type": "input", "action": {"action_type": "message", "contents": "cow file"}} remote_staging = {"setup": [action]} touch_file = join(staging_directory, TEST_COMMAND_TOUCH_FILE) queue1.preprocess_and_launch(job_id, {"command_line": "touch '%s'" % touch_file, "remote_staging": remote_staging}) queue1.shutdown() def _setup_manager_that_executes(app): queue2 = StatefulManagerProxy(QueueManager('test', app, num_concurrent_jobs=1)) try: queue2.recover_active_jobs() time.sleep(1) finally: try: queue2.shutdown() except Exception: pass @contextmanager def _app(): with temp_directory() as staging_directory: app = Bunch( staging_directory=staging_directory, persistence_directory=staging_directory, authorizer=get_authorizer(None), dependency_manager=TestDependencyManager(), job_metrics=Bunch(default_job_instrumenter=NULL_JOB_INSTRUMENTER), object_store=None, ) yield app class DoesntPreprocessStatefulManagerProxy(StatefulManagerProxy): def _launch_prepreprocessing_thread(self, job_id, launch_config): pass
repotvsupertuga/tvsupertuga.repository
refs/heads/master
plugin.video.plexus-streams/resources/core/parsers/arenavision-top/cleaner.py
16
# -*- coding: utf-8 -*- """ This plugin is 3rd party and not part of plexus-streams addon Arenavision.in """ def clean(text): text = text.replace(u'\xda','U').replace(u'\xc9','E').replace(u'\xd3','O').replace(u'\xd1','N').replace(u'\xcd','I').replace(u'\xc1','A') return text
dssg/wikienergy
refs/heads/master
disaggregator/build/pandas/pandas/stats/interface.py
14
from pandas.core.api import Series, DataFrame, Panel, MultiIndex from pandas.stats.ols import OLS, MovingOLS from pandas.stats.plm import PanelOLS, MovingPanelOLS, NonPooledPanelOLS import pandas.stats.common as common def ols(**kwargs): """Returns the appropriate OLS object depending on whether you need simple or panel OLS, and a full-sample or rolling/expanding OLS. Will be a normal linear regression or a (pooled) panel regression depending on the type of the inputs: y : Series, x : DataFrame -> OLS y : Series, x : dict of DataFrame -> OLS y : DataFrame, x : DataFrame -> PanelOLS y : DataFrame, x : dict of DataFrame/Panel -> PanelOLS y : Series with MultiIndex, x : Panel/DataFrame + MultiIndex -> PanelOLS Parameters ---------- y: Series or DataFrame See above for types x: Series, DataFrame, dict of Series, dict of DataFrame, Panel weights : Series or ndarray The weights are presumed to be (proportional to) the inverse of the variance of the observations. That is, if the variables are to be transformed by 1/sqrt(W) you must supply weights = 1/W intercept: bool True if you want an intercept. Defaults to True. nw_lags: None or int Number of Newey-West lags. Defaults to None. nw_overlap: bool Whether there are overlaps in the NW lags. Defaults to False. window_type: {'full sample', 'rolling', 'expanding'} 'full sample' by default window: int size of window (for rolling/expanding OLS). If window passed and no explicit window_type, 'rolling" will be used as the window_type Panel OLS options: pool: bool Whether to run pooled panel regression. Defaults to true. entity_effects: bool Whether to account for entity fixed effects. Defaults to false. time_effects: bool Whether to account for time fixed effects. Defaults to false. x_effects: list List of x's to account for fixed effects. Defaults to none. dropped_dummies: dict Key is the name of the variable for the fixed effect. Value is the value of that variable for which we drop the dummy. For entity fixed effects, key equals 'entity'. By default, the first dummy is dropped if no dummy is specified. cluster: {'time', 'entity'} cluster variances Examples -------- # Run simple OLS. result = ols(y=y, x=x) # Run rolling simple OLS with window of size 10. result = ols(y=y, x=x, window_type='rolling', window=10) print(result.beta) result = ols(y=y, x=x, nw_lags=1) # Set up LHS and RHS for data across all items y = A x = {'B' : B, 'C' : C} # Run panel OLS. result = ols(y=y, x=x) # Run expanding panel OLS with window 10 and entity clustering. result = ols(y=y, x=x, cluster='entity', window_type='expanding', window=10) Returns ------- The appropriate OLS object, which allows you to obtain betas and various statistics, such as std err, t-stat, etc. """ if (kwargs.get('cluster') is not None and kwargs.get('nw_lags') is not None): raise ValueError( 'Pandas OLS does not work with Newey-West correction ' 'and clustering.') pool = kwargs.get('pool') if 'pool' in kwargs: del kwargs['pool'] window_type = kwargs.get('window_type') window = kwargs.get('window') if window_type is None: if window is None: window_type = 'full_sample' else: window_type = 'rolling' else: window_type = common._get_window_type(window_type) if window_type != 'full_sample': kwargs['window_type'] = common._get_window_type(window_type) y = kwargs.get('y') x = kwargs.get('x') panel = False if isinstance(y, DataFrame) or (isinstance(y, Series) and isinstance(y.index, MultiIndex)): panel = True if isinstance(x, Panel): panel = True if window_type == 'full_sample': for rolling_field in ('window_type', 'window', 'min_periods'): if rolling_field in kwargs: del kwargs[rolling_field] if panel: if pool is False: klass = NonPooledPanelOLS else: klass = PanelOLS else: klass = OLS else: if panel: if pool is False: klass = NonPooledPanelOLS else: klass = MovingPanelOLS else: klass = MovingOLS return klass(**kwargs)
rvalyi/OpenUpgrade
refs/heads/master
addons/portal_project/project.py
103
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013-TODAY OpenERP S.A (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv class portal_project(osv.Model): """ Update of mail_mail class, to add the signin URL to notifications. """ _inherit = 'project.project' def _get_visibility_selection(self, cr, uid, context=None): """ Override to add portal option. """ selection = super(portal_project, self)._get_visibility_selection(cr, uid, context=context) idx = [item[0] for item in selection].index('public') selection.insert((idx + 1), ('portal', 'Customer related project: visible through portal')) return selection # return [('public', 'All Users'), # ('portal', 'Portal Users and Employees'), # ('employees', 'Employees Only'), # ('followers', 'Followers Only')]
iffy/AutobahnPython
refs/heads/master
autobahn/wamp/test/test_uri_pattern.py
2
############################################################################### # # The MIT License (MIT) # # Copyright (c) Tavendo GmbH # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ############################################################################### from __future__ import absolute_import import sys import inspect from autobahn import wamp from autobahn.wamp.uri import Pattern if sys.version_info < (2, 7): # noinspection PyUnresolvedReferences import unittest2 as unittest else: # from twisted.trial import unittest import unittest class TestUris(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_invalid_uris(self): for u in [u"", u"com.myapp.<product:foo>.update", u"com.myapp.<123:int>.update", u"com.myapp.<:product>.update", u"com.myapp.<product:>.update", u"com.myapp.<int:>.update", ]: self.assertRaises(Exception, Pattern, u, Pattern.URI_TARGET_ENDPOINT) def test_valid_uris(self): for u in [u"com.myapp.proc1", u"123", u"com.myapp.<product:int>.update", ]: p = Pattern(u, Pattern.URI_TARGET_ENDPOINT) self.assertIsInstance(p, Pattern) def test_parse_uris(self): tests = [ (u"com.myapp.<product:int>.update", [ (u"com.myapp.0.update", {u'product': 0}), (u"com.myapp.123456.update", {u'product': 123456}), (u"com.myapp.aaa.update", None), (u"com.myapp..update", None), (u"com.myapp.0.delete", None), ] ), (u"com.myapp.<product:string>.update", [ (u"com.myapp.box.update", {u'product': u'box'}), (u"com.myapp.123456.update", {u'product': u'123456'}), (u"com.myapp..update", None), ] ) ] for test in tests: pat = Pattern(test[0], Pattern.URI_TARGET_ENDPOINT) for ptest in test[1]: uri = ptest[0] kwargs_should = ptest[1] if kwargs_should is not None: args_is, kwargs_is = pat.match(uri) self.assertEqual(kwargs_is, kwargs_should) else: self.assertRaises(Exception, pat.match, uri) class TestDecorators(unittest.TestCase): def test_decorate_endpoint(self): @wamp.register(u"com.calculator.square") def square(_): pass self.assertTrue(hasattr(square, '_wampuris')) self.assertTrue(type(square._wampuris) == list) self.assertEqual(len(square._wampuris), 1) self.assertIsInstance(square._wampuris[0], Pattern) self.assertTrue(square._wampuris[0].is_endpoint()) self.assertFalse(square._wampuris[0].is_handler()) self.assertFalse(square._wampuris[0].is_exception()) self.assertEqual(square._wampuris[0].uri(), u"com.calculator.square") self.assertEqual(square._wampuris[0]._type, Pattern.URI_TYPE_EXACT) # noinspection PyUnusedLocal @wamp.register(u"com.myapp.product.<product:int>.update") def update_product(product=None, label=None): pass self.assertTrue(hasattr(update_product, '_wampuris')) self.assertTrue(type(update_product._wampuris) == list) self.assertEqual(len(update_product._wampuris), 1) self.assertIsInstance(update_product._wampuris[0], Pattern) self.assertTrue(update_product._wampuris[0].is_endpoint()) self.assertFalse(update_product._wampuris[0].is_handler()) self.assertFalse(update_product._wampuris[0].is_exception()) self.assertEqual(update_product._wampuris[0].uri(), u"com.myapp.product.<product:int>.update") self.assertEqual(update_product._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD) # noinspection PyUnusedLocal @wamp.register(u"com.myapp.<category:string>.<cid:int>.update") def update(category=None, cid=None): pass self.assertTrue(hasattr(update, '_wampuris')) self.assertTrue(type(update._wampuris) == list) self.assertEqual(len(update._wampuris), 1) self.assertIsInstance(update._wampuris[0], Pattern) self.assertTrue(update._wampuris[0].is_endpoint()) self.assertFalse(update._wampuris[0].is_handler()) self.assertFalse(update._wampuris[0].is_exception()) self.assertEqual(update._wampuris[0].uri(), u"com.myapp.<category:string>.<cid:int>.update") self.assertEqual(update._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD) def test_decorate_handler(self): @wamp.subscribe(u"com.myapp.on_shutdown") def on_shutdown(): pass self.assertTrue(hasattr(on_shutdown, '_wampuris')) self.assertTrue(type(on_shutdown._wampuris) == list) self.assertEqual(len(on_shutdown._wampuris), 1) self.assertIsInstance(on_shutdown._wampuris[0], Pattern) self.assertFalse(on_shutdown._wampuris[0].is_endpoint()) self.assertTrue(on_shutdown._wampuris[0].is_handler()) self.assertFalse(on_shutdown._wampuris[0].is_exception()) self.assertEqual(on_shutdown._wampuris[0].uri(), u"com.myapp.on_shutdown") self.assertEqual(on_shutdown._wampuris[0]._type, Pattern.URI_TYPE_EXACT) # noinspection PyUnusedLocal @wamp.subscribe(u"com.myapp.product.<product:int>.on_update") def on_product_update(product=None, label=None): pass self.assertTrue(hasattr(on_product_update, '_wampuris')) self.assertTrue(type(on_product_update._wampuris) == list) self.assertEqual(len(on_product_update._wampuris), 1) self.assertIsInstance(on_product_update._wampuris[0], Pattern) self.assertFalse(on_product_update._wampuris[0].is_endpoint()) self.assertTrue(on_product_update._wampuris[0].is_handler()) self.assertFalse(on_product_update._wampuris[0].is_exception()) self.assertEqual(on_product_update._wampuris[0].uri(), u"com.myapp.product.<product:int>.on_update") self.assertEqual(on_product_update._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD) @wamp.subscribe(u"com.myapp.<category:string>.<cid:int>.on_update") def on_update(category=None, cid=None, label=None): pass self.assertTrue(hasattr(on_update, '_wampuris')) self.assertTrue(type(on_update._wampuris) == list) self.assertEqual(len(on_update._wampuris), 1) self.assertIsInstance(on_update._wampuris[0], Pattern) self.assertFalse(on_update._wampuris[0].is_endpoint()) self.assertTrue(on_update._wampuris[0].is_handler()) self.assertFalse(on_update._wampuris[0].is_exception()) self.assertEqual(on_update._wampuris[0].uri(), u"com.myapp.<category:string>.<cid:int>.on_update") self.assertEqual(on_update._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD) def test_decorate_exception(self): @wamp.error(u"com.myapp.error") class AppError(Exception): pass self.assertTrue(hasattr(AppError, '_wampuris')) self.assertTrue(type(AppError._wampuris) == list) self.assertEqual(len(AppError._wampuris), 1) self.assertIsInstance(AppError._wampuris[0], Pattern) self.assertFalse(AppError._wampuris[0].is_endpoint()) self.assertFalse(AppError._wampuris[0].is_handler()) self.assertTrue(AppError._wampuris[0].is_exception()) self.assertEqual(AppError._wampuris[0].uri(), u"com.myapp.error") self.assertEqual(AppError._wampuris[0]._type, Pattern.URI_TYPE_EXACT) @wamp.error(u"com.myapp.product.<product:int>.product_inactive") class ProductInactiveError(Exception): pass self.assertTrue(hasattr(ProductInactiveError, '_wampuris')) self.assertTrue(type(ProductInactiveError._wampuris) == list) self.assertEqual(len(ProductInactiveError._wampuris), 1) self.assertIsInstance(ProductInactiveError._wampuris[0], Pattern) self.assertFalse(ProductInactiveError._wampuris[0].is_endpoint()) self.assertFalse(ProductInactiveError._wampuris[0].is_handler()) self.assertTrue(ProductInactiveError._wampuris[0].is_exception()) self.assertEqual(ProductInactiveError._wampuris[0].uri(), u"com.myapp.product.<product:int>.product_inactive") self.assertEqual(ProductInactiveError._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD) @wamp.error(u"com.myapp.<category:string>.<product:int>.inactive") class ObjectInactiveError(Exception): pass self.assertTrue(hasattr(ObjectInactiveError, '_wampuris')) self.assertTrue(type(ObjectInactiveError._wampuris) == list) self.assertEqual(len(ObjectInactiveError._wampuris), 1) self.assertIsInstance(ObjectInactiveError._wampuris[0], Pattern) self.assertFalse(ObjectInactiveError._wampuris[0].is_endpoint()) self.assertFalse(ObjectInactiveError._wampuris[0].is_handler()) self.assertTrue(ObjectInactiveError._wampuris[0].is_exception()) self.assertEqual(ObjectInactiveError._wampuris[0].uri(), u"com.myapp.<category:string>.<product:int>.inactive") self.assertEqual(ObjectInactiveError._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD) def test_match_decorated_endpoint(self): @wamp.register(u"com.calculator.square") def square(x): return x args, kwargs = square._wampuris[0].match(u"com.calculator.square") self.assertEqual(square(666, **kwargs), 666) @wamp.register(u"com.myapp.product.<product:int>.update") def update_product(product=None, label=None): return product, label args, kwargs = update_product._wampuris[0].match(u"com.myapp.product.123456.update") kwargs['label'] = "foobar" self.assertEqual(update_product(**kwargs), (123456, "foobar")) @wamp.register(u"com.myapp.<category:string>.<cid:int>.update") def update(category=None, cid=None, label=None): return category, cid, label args, kwargs = update._wampuris[0].match(u"com.myapp.product.123456.update") kwargs['label'] = "foobar" self.assertEqual(update(**kwargs), ("product", 123456, "foobar")) def test_match_decorated_handler(self): @wamp.subscribe(u"com.myapp.on_shutdown") def on_shutdown(): pass args, kwargs = on_shutdown._wampuris[0].match(u"com.myapp.on_shutdown") self.assertEqual(on_shutdown(**kwargs), None) @wamp.subscribe(u"com.myapp.product.<product:int>.on_update") def on_product_update(product=None, label=None): return product, label args, kwargs = on_product_update._wampuris[0].match(u"com.myapp.product.123456.on_update") kwargs['label'] = "foobar" self.assertEqual(on_product_update(**kwargs), (123456, "foobar")) @wamp.subscribe(u"com.myapp.<category:string>.<cid:int>.on_update") def on_update(category=None, cid=None, label=None): return category, cid, label args, kwargs = on_update._wampuris[0].match(u"com.myapp.product.123456.on_update") kwargs['label'] = "foobar" self.assertEqual(on_update(**kwargs), ("product", 123456, "foobar")) def test_match_decorated_exception(self): @wamp.error(u"com.myapp.error") class AppError(Exception): def __init__(self, msg): Exception.__init__(self, msg) def __eq__(self, other): return self.__class__ == other.__class__ and \ self.args == other.args args, kwargs = AppError._wampuris[0].match(u"com.myapp.error") # noinspection PyArgumentList self.assertEqual(AppError(u"fuck", **kwargs), AppError(u"fuck")) @wamp.error(u"com.myapp.product.<product:int>.product_inactive") class ProductInactiveError(Exception): def __init__(self, msg, product=None): Exception.__init__(self, msg) self.product = product def __eq__(self, other): return self.__class__ == other.__class__ and \ self.args == other.args and \ self.product == other.product args, kwargs = ProductInactiveError._wampuris[0].match(u"com.myapp.product.123456.product_inactive") self.assertEqual(ProductInactiveError("fuck", **kwargs), ProductInactiveError("fuck", 123456)) @wamp.error(u"com.myapp.<category:string>.<product:int>.inactive") class ObjectInactiveError(Exception): def __init__(self, msg, category=None, product=None): Exception.__init__(self, msg) self.category = category self.product = product def __eq__(self, other): return self.__class__ == other.__class__ and \ self.args == other.args and \ self.category == other.category and \ self.product == other.product args, kwargs = ObjectInactiveError._wampuris[0].match(u"com.myapp.product.123456.inactive") self.assertEqual(ObjectInactiveError("fuck", **kwargs), ObjectInactiveError("fuck", "product", 123456)) class KwException(Exception): def __init__(self, *args, **kwargs): Exception.__init__(self, *args) self.kwargs = kwargs # what if the WAMP error message received # contains args/kwargs that cannot be # consumed by the constructor of the exception # class defined for the WAMP error URI? # 1. we can bail out (but we are already signaling an error) # 2. we can require a generic constructor # 3. we can map only unconsumed args/kwargs to generic attributes # 4. we can silently drop unconsumed args/kwargs def getargs(fun): try: argspec = inspect.getargspec(fun) except: if fun == Exception.__init__: # `inspect.getargspec(Exception.__init__)` does work on PyPy, but not # on CPython, since `Exception.__init__` is C code in CPython that # cannot be reflected upon. argspec = inspect.ArgSpec(args=['self'], varargs='args', keywords=None, defaults=None) else: raise Exception("could not inspect function {0}".format(fun)) args = argspec.args[:-len(argspec.defaults)] kwargs = argspec.args[-len(argspec.defaults):] return args, kwargs, argspec.varargs, argspec.keywords class MockSession(object): def __init__(self): self._ecls_to_uri_pat = {} self._uri_to_ecls = {} def define(self, exception, error=None): if error is None: assert(hasattr(exception, '_wampuris')) self._ecls_to_uri_pat[exception] = exception._wampuris self._uri_to_ecls[exception._wampuris[0].uri()] = exception else: assert(not hasattr(exception, '_wampuris')) self._ecls_to_uri_pat[exception] = [Pattern(error, Pattern.URI_TARGET_HANDLER)] self._uri_to_ecls[error] = exception def map_error(self, error, args=None, kwargs=None): # FIXME: # 1. map to ecls based on error URI wildcard/prefix # 2. extract additional args/kwargs from error URI if error in self._uri_to_ecls: ecls = self._uri_to_ecls[error] try: # the following might fail, eg. TypeError when # signature of exception constructor is incompatible # with args/kwargs or when the exception constructor raises if kwargs: if args: exc = ecls(*args, **kwargs) else: exc = ecls(**kwargs) else: if args: exc = ecls(*args) else: exc = ecls() except Exception: # FIXME: log e exc = KwException(error, *args, **kwargs) else: # this never fails args = args or [] kwargs = kwargs or {} exc = KwException(error, *args, **kwargs) return exc class TestDecoratorsAdvanced(unittest.TestCase): def test_decorate_exception_non_exception(self): def test(): # noinspection PyUnusedLocal @wamp.error(u"com.test.error") class Foo(object): pass self.assertRaises(Exception, test) def test_decorate_endpoint_multiple(self): # noinspection PyUnusedLocal @wamp.register(u"com.oldapp.oldproc") @wamp.register(u"com.calculator.square") def square(x): pass self.assertTrue(hasattr(square, '_wampuris')) self.assertTrue(type(square._wampuris) == list) self.assertEqual(len(square._wampuris), 2) for i in range(2): self.assertIsInstance(square._wampuris[i], Pattern) self.assertTrue(square._wampuris[i].is_endpoint()) self.assertFalse(square._wampuris[i].is_handler()) self.assertFalse(square._wampuris[i].is_exception()) self.assertEqual(square._wampuris[i]._type, Pattern.URI_TYPE_EXACT) self.assertEqual(square._wampuris[0].uri(), u"com.calculator.square") self.assertEqual(square._wampuris[1].uri(), u"com.oldapp.oldproc") def test_marshal_decorated_exception(self): @wamp.error(u"com.myapp.error") class AppError(Exception): pass try: raise AppError("fuck") except Exception as e: self.assertEqual(e._wampuris[0].uri(), u"com.myapp.error") @wamp.error(u"com.myapp.product.<product:int>.product_inactive") class ProductInactiveError(Exception): def __init__(self, msg, product=None): Exception.__init__(self, msg) self.product = product try: raise ProductInactiveError("fuck", 123456) except Exception as e: self.assertEqual(e._wampuris[0].uri(), u"com.myapp.product.<product:int>.product_inactive") session = MockSession() session.define(AppError) def test_define_exception_undecorated(self): session = MockSession() class AppError(Exception): pass # defining an undecorated exception requires # an URI to be provided self.assertRaises(Exception, session.define, AppError) session.define(AppError, u"com.myapp.error") exc = session.map_error(u"com.myapp.error") self.assertIsInstance(exc, AppError) def test_define_exception_decorated(self): session = MockSession() @wamp.error(u"com.myapp.error") class AppError(Exception): pass # when defining a decorated exception # an URI must not be provided self.assertRaises(Exception, session.define, AppError, u"com.myapp.error") session.define(AppError) exc = session.map_error(u"com.myapp.error") self.assertIsInstance(exc, AppError) def test_map_exception_undefined(self): session = MockSession() exc = session.map_error(u"com.myapp.error") self.assertIsInstance(exc, Exception) def test_map_exception_args(self): session = MockSession() @wamp.error(u"com.myapp.error") class AppError(Exception): pass @wamp.error(u"com.myapp.error.product_inactive") class ProductInactiveError(Exception): def __init__(self, product=None): self.product = product # define exceptions in mock session session.define(AppError) session.define(ProductInactiveError) for test in [ # (u"com.myapp.foo.error", [], {}, KwException), (u"com.myapp.error", [], {}, AppError), (u"com.myapp.error", ["you are doing it wrong"], {}, AppError), (u"com.myapp.error", ["you are doing it wrong", 1, 2, 3], {}, AppError), (u"com.myapp.error.product_inactive", [], {}, ProductInactiveError), (u"com.myapp.error.product_inactive", [], {"product": 123456}, ProductInactiveError), ]: error, args, kwargs, ecls = test exc = session.map_error(error, args, kwargs) self.assertIsInstance(exc, ecls) self.assertEqual(list(exc.args), args) if __name__ == '__main__': unittest.main()
OCA/server-tools
refs/heads/12.0
attachment_synchronize/models/__init__.py
2
from . import attachment_queue, attachment_synchronize_task, storage_backend
dreamsxin/kbengine
refs/heads/master
kbe/src/lib/python/Lib/test/test_uu.py
107
""" Tests for uu module. Nick Mathewson """ import unittest from test import support import sys, os import uu from io import BytesIO import io plaintext = b"The smooth-scaled python crept over the sleeping dog\n" encodedtext = b"""\ M5&AE('-M;V]T:\"US8V%L960@<'ET:&]N(&-R97!T(&]V97(@=&AE('-L965P (:6YG(&1O9PH """ # Stolen from io.py class FakeIO(io.TextIOWrapper): """Text I/O implementation using an in-memory buffer. Can be a used as a drop-in replacement for sys.stdin and sys.stdout. """ # XXX This is really slow, but fully functional def __init__(self, initial_value="", encoding="utf-8", errors="strict", newline="\n"): super(FakeIO, self).__init__(io.BytesIO(), encoding=encoding, errors=errors, newline=newline) self._encoding = encoding self._errors = errors if initial_value: if not isinstance(initial_value, str): initial_value = str(initial_value) self.write(initial_value) self.seek(0) def getvalue(self): self.flush() return self.buffer.getvalue().decode(self._encoding, self._errors) def encodedtextwrapped(mode, filename): return (bytes("begin %03o %s\n" % (mode, filename), "ascii") + encodedtext + b"\n \nend\n") class UUTest(unittest.TestCase): def test_encode(self): inp = io.BytesIO(plaintext) out = io.BytesIO() uu.encode(inp, out, "t1") self.assertEqual(out.getvalue(), encodedtextwrapped(0o666, "t1")) inp = io.BytesIO(plaintext) out = io.BytesIO() uu.encode(inp, out, "t1", 0o644) self.assertEqual(out.getvalue(), encodedtextwrapped(0o644, "t1")) def test_decode(self): inp = io.BytesIO(encodedtextwrapped(0o666, "t1")) out = io.BytesIO() uu.decode(inp, out) self.assertEqual(out.getvalue(), plaintext) inp = io.BytesIO( b"UUencoded files may contain many lines,\n" + b"even some that have 'begin' in them.\n" + encodedtextwrapped(0o666, "t1") ) out = io.BytesIO() uu.decode(inp, out) self.assertEqual(out.getvalue(), plaintext) def test_truncatedinput(self): inp = io.BytesIO(b"begin 644 t1\n" + encodedtext) out = io.BytesIO() try: uu.decode(inp, out) self.fail("No exception raised") except uu.Error as e: self.assertEqual(str(e), "Truncated input file") def test_missingbegin(self): inp = io.BytesIO(b"") out = io.BytesIO() try: uu.decode(inp, out) self.fail("No exception raised") except uu.Error as e: self.assertEqual(str(e), "No valid begin line found in input file") class UUStdIOTest(unittest.TestCase): def setUp(self): self.stdin = sys.stdin self.stdout = sys.stdout def tearDown(self): sys.stdin = self.stdin sys.stdout = self.stdout def test_encode(self): sys.stdin = FakeIO(plaintext.decode("ascii")) sys.stdout = FakeIO() uu.encode("-", "-", "t1", 0o666) self.assertEqual(sys.stdout.getvalue(), encodedtextwrapped(0o666, "t1").decode("ascii")) def test_decode(self): sys.stdin = FakeIO(encodedtextwrapped(0o666, "t1").decode("ascii")) sys.stdout = FakeIO() uu.decode("-", "-") stdout = sys.stdout sys.stdout = self.stdout sys.stdin = self.stdin self.assertEqual(stdout.getvalue(), plaintext.decode("ascii")) class UUFileTest(unittest.TestCase): def _kill(self, f): # close and remove file if f is None: return try: f.close() except (SystemExit, KeyboardInterrupt): raise except: pass try: os.unlink(f.name) except (SystemExit, KeyboardInterrupt): raise except: pass def setUp(self): self.tmpin = support.TESTFN + "i" self.tmpout = support.TESTFN + "o" def tearDown(self): del self.tmpin del self.tmpout def test_encode(self): fin = fout = None try: support.unlink(self.tmpin) fin = open(self.tmpin, 'wb') fin.write(plaintext) fin.close() fin = open(self.tmpin, 'rb') fout = open(self.tmpout, 'wb') uu.encode(fin, fout, self.tmpin, mode=0o644) fin.close() fout.close() fout = open(self.tmpout, 'rb') s = fout.read() fout.close() self.assertEqual(s, encodedtextwrapped(0o644, self.tmpin)) # in_file and out_file as filenames uu.encode(self.tmpin, self.tmpout, self.tmpin, mode=0o644) fout = open(self.tmpout, 'rb') s = fout.read() fout.close() self.assertEqual(s, encodedtextwrapped(0o644, self.tmpin)) finally: self._kill(fin) self._kill(fout) def test_decode(self): f = None try: support.unlink(self.tmpin) f = open(self.tmpin, 'wb') f.write(encodedtextwrapped(0o644, self.tmpout)) f.close() f = open(self.tmpin, 'rb') uu.decode(f) f.close() f = open(self.tmpout, 'rb') s = f.read() f.close() self.assertEqual(s, plaintext) # XXX is there an xp way to verify the mode? finally: self._kill(f) def test_decode_filename(self): f = None try: support.unlink(self.tmpin) f = open(self.tmpin, 'wb') f.write(encodedtextwrapped(0o644, self.tmpout)) f.close() uu.decode(self.tmpin) f = open(self.tmpout, 'rb') s = f.read() f.close() self.assertEqual(s, plaintext) finally: self._kill(f) def test_decodetwice(self): # Verify that decode() will refuse to overwrite an existing file f = None try: f = io.BytesIO(encodedtextwrapped(0o644, self.tmpout)) f = open(self.tmpin, 'rb') uu.decode(f) f.close() f = open(self.tmpin, 'rb') self.assertRaises(uu.Error, uu.decode, f) f.close() finally: self._kill(f) def test_main(): support.run_unittest(UUTest, UUStdIOTest, UUFileTest, ) if __name__=="__main__": test_main()
AlanJAS/iknowUruguay
refs/heads/master
recursos/0uruguay/datos/levels.py
1
# -*- coding: utf-8 -*- from gettext import gettext as _ LEVEL1 = [ 7, _('Departments'), ['lineasDepto'], [], [ (_('Artigas'), _("It's easy")), (_('Salto'), _("It's easy")), (_('Paysandú'), _("It's easy")), (_('Río Negro'), _("It's easy")), (_('Rivera'), _("It's easy")), (_('Tacuarembó'), _("It's easy")), (_('Soriano'), _("It's easy")), (_('Colonia'), _("It's easy")), (_('Florida'), _("It's easy")), (_('Flores'), _("It's easy")), (_('San José'), _("It's easy")), (_('Durazno'), _("It's easy")), (_('Treinta y Tres'), _("It's easy")), (_('Cerro Largo'), _("It's easy")), (_('Rocha'), _("It's easy")), (_('Lavalleja'), _("It's easy")), (_('Maldonado'), _("It's easy")), (_('Canelones'), _("It's easy")), (_('Montevideo'), _("It's easy")) ] ] LEVEL2 = [ 2, _('Departmental capitals'), ['lineasDepto', 'capitales'], [], [ (_('Artigas'), _("It's easy")), (_('Salto'), _("It's easy")), (_('Paysandú'), _("It's easy")), (_('Fray Bentos'), _("It's easy")), (_('Rivera'), _("It's easy")), (_('Tacuarembó'), _("It's easy")), (_('Mercedes'), _("It's easy")), (_('Colonia del Sacramento'), _("It's easy")), (_('Florida'), _("It's easy")), (_('Trinidad'), _("It's easy")), (_('San José de Mayo'), _("It's easy")), (_('Durazno'), _("It's easy")), (_('Treinta y Tres'), _("It's easy")), (_('Melo'), _("It's easy")), (_('Rocha'), _("It's easy")), (_('Minas'), _("It's easy")), (_('Maldonado'), _("It's easy")), (_('Canelones'), _("It's easy")), (_('Montevideo'), _("It's easy")) ] ] LEVEL3 = [ 2, _('Cities'), ['lineasDepto', 'capitales', 'ciudades'], [], [ (_('Aiguá'), _("It's in %s") % _("Maldonado")), (_('Ansina'), _("It's in %s") % _("Tacuarembó")), (_('Artigas'), _("It's easy")), (_('Atlántida'), _("It's in %s") % _("Canelones")), (_('Baltasar Brum'), _("It's in %s") % _("Artigas")), (_('Bella Unión'), _('Is north')), (_('Belén'), _("It's in %s") % _("Salto")), (_('Canelones'), _("It's easy")), (_('Cardona'), _("It's in %s") % _("Soriano")), (_('Carmelo'), _("It's in %s") % _("Colonia")), (_('Castillos'), _("It's in %s") % _("Rocha")), (_('Casupá'), _("It's in %s") % _("Florida")), (_('Cebollatí'), _("It's in %s") % _("Rocha")), (_('Cerro Chato'), _("It's in %s") % _("Treinta y Tres")), (_('Chuy'), _("It's near Brazil")), (_('Colonia del Sacramento'), _('Try again')), (_('Constitución'), _("It's in %s") % _("Salto")), (_('Dolores'), _("It's in %s") % _("Soriano")), (_('Durazno'), _("It's in %s") % _("the center")), (_('Florida'), _("It's easy")), (_('Fraile Muerto'), _("It's in %s") % _("Cerro Largo")), (_('Fray Bentos'), _("It's in %s") % _("Rio Negro")), (_('Guichón'), _("It's in %s") % _("Paysandú")), (_('José Batlle y Ordóñez'), _("It's in %s") % _("Lavalleja")), (_('José Pedro Varela'), _("It's in %s") % _("Lavalleja")), (_('José Enrique Rodó'), _("It's in %s") % _("Soriano")), (_('Juan Lacaze'), _("It's in %s") % _("Colonia")), (_('La Coronilla'), _("It's in %s") % _("Rocha")), (_('La Paloma'), _("It's in %s") % _("Rocha")), (_('Las Piedras'), _("It's in %s") % _("Canelones")), (_('Lascano'), _("It's in %s") % _("Rocha")), (_('Libertad'), _("It's in %s") % _("San José")), (_('Maldonado'), _("It's easy")), (_('Melo'), _("It's the capital of\nthe department")), (_('Mercedes'), _("It's the capital of\nthe department")), (_('Minas'), _("It's the capital of\nthe department")), (_('Minas de Corrales'), _("It's in %s") % _("Rivera")), (_('Montevideo'), _("It's easy")), (_('Nueva Helvecia'), _("It's in %s") % _("Colonia")), (_('Nueva Palmira'), _("It's in %s") % _("Colonia")), (_('Nuevo Berlín'), _("It's in %s") % _("Rio Negro")), (_('Ombúes de Lavalle'), _("It's in %s") % _("Colonia")), (_('Palmitas'), _("It's in %s") % _("Soriano")), (_('Pan de Azúcar'), _("It's in %s") % _("Maldonado")), (_('Pando'), _("It's in %s") % _("Canelones")), (_('Paso de los Toros'), _("It's in %s") % _("Tacuarembó")), (_('Paysandú'), _("It's easy")), (_('Piriápolis'), _("It's in %s") % _("Maldonado")), (_('Pueblo del Carmen'), _("It's in %s") % _("Durazno")), (_('Punta del Este'), _("It's in %s") % _("Maldonado")), (_('Quebracho'), _("It's in %s") % _("Paysandú")), (_('Rivera'), _("It's near Brazil")), (_('Rocha'), _("It's the capital of\nthe department")), (_('Rosario'), _("It's in %s") % _("Colonia")), (_('Río Branco'), _("It's in %s") % _("Cerro Largo")), (_('Salto'), _("It's easy")), (_('San Carlos'), _("It's in %s") % _("Maldonado")), (_('San Gregorio de Polanco'), _("It's in %s") % _("Tacuarembó")), (_('San José de Mayo'), _("It's the capital of\nthe department")), (_('San Ramón'), _("It's in %s") % _("Canelones")), (_('Santa Clara de Olimar'), _("It's in %s") % _("Treinta y Tres")), (_('Santa Lucía'), _("It's in %s") % _("Canelones")), (_('Sarandí del Yi'), _("It's in %s") % _("Durazno")), (_('Sarandí Grande'), _("It's in %s") % _("Florida")), (_('Tacuarembó'), _("It's easy")), (_('Tala'), _("It's in %s") % _("Canelones")), (_('Tarariras'), _("It's in %s") % _("Colonia")), (_('Tomás Gomensoro'), _("It's in %s") % _("Artigas")), (_('Tranqueras'), _("It's in %s") % _("Rivera")), (_('Treinta y Tres'), _("It's the capital of\nthe department")), (_('Trinidad'), _("It's the capital of\nthe department")), (_('Vergara'), _("It's in %s") % _("Treinta y Tres")), (_('Vichadero'), _("It's in %s") % _("Rivera")), (_('Young'), _("It's in %s") % _("Rio Negro")) ] ] LEVEL4 = [ 15, _('Waterways'), ['lineasDepto', 'rios'], [], [ (_('Río Arapey Chico'), _('Is north')), (_('Río Arapey Grande'), _('Flows into the %s') % _('Uruguay')), (_('Río Cebollatí'), _('Flows into the %s') % _('Laguna Merín')), (_('Río Cuareim'), _('Is north')), (_('Río Daymán'), _('Flows into the %s') % _('Uruguay')), (_('Río Negro'), _('Has three dams')), (_('Río Olimar Chico'), _("It's in %s") % _("Treinta y Tres")), (_('Río Olimar Grande'), _("It's in %s") % _("Treinta y Tres")), (_('Río Queguay Chico'), _("It's in %s") % _("Paysandú")), (_('Río Queguay'), _('Flows into the %s') % _('Uruguay')), (_('Río Rosario'), _("It's in %s") % _("Colonia")), (_('Río San José'), _("It's in %s") % _("San José")), (_('Río San Juan'), _("It's in %s") % _("Colonia")), (_('Río San Salvador'), _("It's in %s") % _("Soriano")), (_('Río Santa Lucía'), _("It's easy")), (_('Río Santa Lucía Chico'), _("It's in %s") % _("Canelones")), (_('Río Tacuarembó'), _("It's in %s") % _("Tacuarembó")), (_('Río Tacuarí'), _('Flows into the %s') % _('Laguna Merín')), (_('Río Uruguay'), _("It's easy")), (_('Río Yaguarón'), _("It's near Brazil")), (_('Río Yi'), _("It's in %s") % _("Durazno")), (_('Río de la Plata'), _("It's easy")), (_('Atlantic Ocean'), _("It's easy")), (_('Laguna Merín'), _("It's in the east")), (_('Laguna Negra'), _("It's in %s") % _("Rocha")), (_('Laguna de Castillos'), _("It's in %s") % _("Rocha")), (_('Laguna de Rocha'), _("It's in %s") % _("Rocha")), (_('Laguna del Sauce'), _("It's in %s") % _("Maldonado")) ] ] LEVEL5 = [ 1, _('Elevations'), ['cuchillas', 'cerros'], [], [ (_('the %s') % _('Cuchilla de Haedo'), 4, _('Cuchilla de Haedo'), _("It's in the north")), (_('the %s') % _('Cuchilla Negra'), 4, _('Cuchilla Negra'), _("It's on the border\nwith %s") % _('Brasil')), (_('the %s') % _('Cuchilla de Santa Ana'), 4, _('Cuchilla de Santa Ana'), _("It's in %s") % _('Rivera')), (_('the %s') % _('Cuchilla Grande'), 4, _('Cuchilla Grande'), _("It's in the south")), (_('the %s') % _('Cuchilla Grande del Durazno'), 4, _('Cuchilla Grande del Durazno'), _("It's in %s") % _('Durazno')), (_('the %s') % _('Cuchilla Grande Inferior'), 4, _('Cuchilla Grande Inferior'), _('Try again')), (_('the %s') % _('Cerro de Montevideo'), 5, _('Cerro de Montevideo'), _("It's in %s") % _('Montevideo')), (_('the %s') % _('Cerro Pan de Azúcar'), 5, _('Cerro Pan de Azúcar'), _("It's in %s") % _('Maldonado')), (_('the %s') % _('Cerro Verdún'), 5, _('Cerro Verdún'), _("It's in %s") % _('Lavalleja')), (_('the %s') % _('Cerro Arequita'), 5, _('Cerro Arequita'), _("It's in %s") % _('Lavalleja')), (_('the %s') % _('Cerro Catedral'), 5, _('Cerro Catedral'), _("It's in %s") % _('Maldonado')), (_('the %s') % _('Cerro de las Animas'), 5, _('Cerro de las Animas'), _("It's in %s") % _('Maldonado')), (_('the %s') % _('Cerros de Batoví'), 5, _('Cerros de Batoví'), _("It's in the north")), (_('the %s') % _('Cerro Bonito'), 5, _('Cerro Bonito'), _("It's in %s") % _('Cuchilla de Santa Ana')), (_('the %s') % _('Cerro del Medio'), 5, _('Cerro del Medio'), _("It's in %s") % _('Tacuarembó')), (_('the %s') % _('Cerro Chato'), 5, _('Cerro Chato'), _("It's in %s") % _('Durazno')) ] ] LEVEL6 = [ 5, _('Routes'), ['rutas', 'capitales'], ['capitales'], [ (_('Route %s') % 1, _('Try again')), (_('Route %s') % 2, _('Passes through %s') % _('Mercedes')), (_('Route %s') % 3, _('Try again')), (_('Route %s') % 4, _("It's in the north")), (_('Route %s') % 5, _('Try again')), (_('Route %s') % 6, _("Ends in %s") % _('Vichadero')), (_('Route %s') % 7, _('Passes through %s') % _('Melo')), (_('Route %s') % 8, _('Passes through %s') % _('Minas')), (_('Route %s') % 9, _('Passes through %s') % _('Rocha')), (_('Route %s') % 21, _('Try again')), (_('Route %s') % 11, _('Passes through %s') % _('San José') + _('and') + _('Canelones')), (_('Route %s') % 14, _('Try again')), (_('Route %s') % 26, _('Passes through %s') % _('Tacuarembó')), (_('Route %s') % 'Interbalnearia', _('Passes through %s') % _('Punta del Este')) ] ] LEVELS = [LEVEL1, LEVEL2, LEVEL3, LEVEL4, LEVEL5, LEVEL6]
eduNEXT/edunext-platform
refs/heads/master
openedx/core/djangoapps/util/management/commands/reset_db.py
4
# -*- coding: utf-8 -*- """ reset_db ======== Django command to drop and recreate a database. Useful when running tests against a database which may previously have had different migrations applied to it. This handles the one specific use case of the "reset_db" command from django-extensions that we were actually using. originally from http://www.djangosnippets.org/snippets/828/ by dnordberg """ import logging import django from django.conf import settings from django.core.management.base import BaseCommand, CommandError from six.moves import configparser class Command(BaseCommand): help = "Resets the database for this project." def add_arguments(self, parser): parser.add_argument( '-R', '--router', action='store', dest='router', default='default', help='Use this router-database other than defined in settings.py') def handle(self, *args, **options): """ Resets the database for this project. Note: Transaction wrappers are in reverse as a work around for autocommit, anybody know how to do this the right way? """ router = options.get('router') dbinfo = settings.DATABASES.get(router) if dbinfo is None: raise CommandError(u"Unknown database router %s" % router) engine = dbinfo.get('ENGINE').split('.')[-1] user = password = database_name = database_host = database_port = '' if engine == 'mysql': (user, password, database_name, database_host, database_port) = parse_mysql_cnf(dbinfo) user = dbinfo.get('USER') or user password = dbinfo.get('PASSWORD') or password owner = user database_name = dbinfo.get('NAME') or database_name if database_name == '': raise CommandError("You need to specify DATABASE_NAME in your Django settings file.") database_host = dbinfo.get('HOST') or database_host database_port = dbinfo.get('PORT') or database_port verbosity = int(options.get('verbosity', 1)) if engine in ('sqlite3', 'spatialite'): import os try: logging.info(u"Unlinking %s database", engine) os.unlink(database_name) except OSError: pass elif engine in ('mysql',): import MySQLdb as Database kwargs = { 'user': user, 'passwd': password, } if database_host.startswith('/'): kwargs['unix_socket'] = database_host else: kwargs['host'] = database_host if database_port: kwargs['port'] = int(database_port) connection = Database.connect(**kwargs) drop_query = u'DROP DATABASE IF EXISTS `%s`' % database_name utf8_support = 'CHARACTER SET utf8' create_query = u'CREATE DATABASE `%s` %s' % (database_name, utf8_support) logging.info('Executing... "' + drop_query + '"') connection.query(drop_query) logging.info('Executing... "' + create_query + '"') connection.query(create_query) elif engine in ('postgresql', 'postgresql_psycopg2', 'postgis'): if engine == 'postgresql' and django.VERSION < (1, 9): import psycopg as Database # NOQA elif engine in ('postgresql', 'postgresql_psycopg2', 'postgis'): import psycopg2 as Database # NOQA conn_params = {'database': 'template1'} if user: conn_params['user'] = user if password: conn_params['password'] = password if database_host: conn_params['host'] = database_host if database_port: conn_params['port'] = database_port connection = Database.connect(**conn_params) connection.set_isolation_level(0) # autocommit false cursor = connection.cursor() drop_query = u"DROP DATABASE \"%s\";" % database_name logging.info('Executing... "' + drop_query + '"') try: cursor.execute(drop_query) except Database.ProgrammingError as e: logging.exception(u"Error: %s", e) create_query = u"CREATE DATABASE \"%s\"" % database_name if owner: create_query += u" WITH OWNER = \"%s\" " % owner create_query += u" ENCODING = 'UTF8'" if engine == 'postgis' and django.VERSION < (1, 9): # For PostGIS 1.5, fetch template name if it exists from django.contrib.gis.db.backends.postgis.base import DatabaseWrapper postgis_template = DatabaseWrapper(dbinfo).template_postgis if postgis_template is not None: create_query += u' TEMPLATE = %s' % postgis_template if settings.DEFAULT_TABLESPACE: create_query += u' TABLESPACE = %s;' % settings.DEFAULT_TABLESPACE else: create_query += u';' logging.info('Executing... "' + create_query + '"') cursor.execute(create_query) else: raise CommandError(u"Unknown database engine %s" % engine) if verbosity >= 2: print("Reset successful.") def parse_mysql_cnf(dbinfo): """ Attempt to parse mysql database config file for connection settings. Ideally we would hook into django's code to do this, but read_default_file is handled by the mysql C libs so we have to emulate the behaviour Settings that are missing will return '' returns (user, password, database_name, database_host, database_port) """ read_default_file = dbinfo.get('OPTIONS', {}).get('read_default_file') if read_default_file: config = configparser.RawConfigParser({ 'user': '', 'password': '', 'database': '', 'host': '', 'port': '', 'socket': '', }) import os config.read(os.path.expanduser(read_default_file)) try: user = config.get('client', 'user') password = config.get('client', 'password') database_name = config.get('client', 'database') database_host = config.get('client', 'host') database_port = config.get('client', 'port') socket = config.get('client', 'socket') if database_host == 'localhost' and socket: # mysql actually uses a socket if host is localhost database_host = socket return user, password, database_name, database_host, database_port except configparser.NoSectionError: pass return '', '', '', '', ''
AmrThabet/CouchPotatoServer
refs/heads/master
libs/suds/sax/text.py
203
# This program is free software; you can redistribute it and/or modify # it under the terms of the (LGPL) GNU Lesser General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library Lesser General Public License for more details at # ( http://www.gnu.org/licenses/lgpl.html ). # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # written by: Jeff Ortel ( [email protected] ) """ Contains XML text classes. """ from suds import * from suds.sax import * class Text(unicode): """ An XML text object used to represent text content. @ivar lang: The (optional) language flag. @type lang: bool @ivar escaped: The (optional) XML special character escaped flag. @type escaped: bool """ __slots__ = ('lang', 'escaped',) @classmethod def __valid(cls, *args): return ( len(args) and args[0] is not None ) def __new__(cls, *args, **kwargs): if cls.__valid(*args): lang = kwargs.pop('lang', None) escaped = kwargs.pop('escaped', False) result = super(Text, cls).__new__(cls, *args, **kwargs) result.lang = lang result.escaped = escaped else: result = None return result def escape(self): """ Encode (escape) special XML characters. @return: The text with XML special characters escaped. @rtype: L{Text} """ if not self.escaped: post = sax.encoder.encode(self) escaped = ( post != self ) return Text(post, lang=self.lang, escaped=escaped) return self def unescape(self): """ Decode (unescape) special XML characters. @return: The text with escaped XML special characters decoded. @rtype: L{Text} """ if self.escaped: post = sax.encoder.decode(self) return Text(post, lang=self.lang) return self def trim(self): post = self.strip() return Text(post, lang=self.lang, escaped=self.escaped) def __add__(self, other): joined = u''.join((self, other)) result = Text(joined, lang=self.lang, escaped=self.escaped) if isinstance(other, Text): result.escaped = ( self.escaped or other.escaped ) return result def __repr__(self): s = [self] if self.lang is not None: s.append(' [%s]' % self.lang) if self.escaped: s.append(' <escaped>') return ''.join(s) def __getstate__(self): state = {} for k in self.__slots__: state[k] = getattr(self, k) return state def __setstate__(self, state): for k in self.__slots__: setattr(self, k, state[k]) class Raw(Text): """ Raw text which is not XML escaped. This may include I{string} XML. """ def escape(self): return self def unescape(self): return self def __add__(self, other): joined = u''.join((self, other)) return Raw(joined, lang=self.lang)
DanielPWang/Replicating-DeepMind
refs/heads/master
sandbox/example3.py
7
""" First test using convolution """ import theano import theano.tensor as T import numpy as np from theano.tensor.nnet import conv # import theano.printing as tprint import chessboard def shared_dataset(data_xy): """ Transform data into theano.shared. This is important for parallelising computations later """ data_x, data_y = data_xy shared_x = theano.shared(np.asarray(data_x, dtype=theano.config.floatX)) shared_y = theano.shared(np.asarray(data_y, dtype=theano.config.floatX)) return shared_x, shared_y class HiddenLayer: """ Implements hidden layer of """ def __init__(self, input, n_in, n_nodes): self.input = input W_bound = np.sqrt(6. /(n_in+n_nodes)) #: Weight matrix (n_in x n_nodes) W_values = np.asarray(np.random.uniform(high=W_bound, low=-W_bound, size=(n_in, n_nodes)), dtype=theano.config.floatX) self.W = theano.shared(value=W_values, name='W', borrow=True) #: Bias term b_values = np.zeros((n_nodes,), dtype=theano.config.floatX) self.b = theano.shared(value=b_values, name='b', borrow=True) #: Output is just the weighted sum of activations self.output = T.dot(input, self.W) + self.b #all the variables that can change during learning self.params = [self.W, self.b] class OutputLayer: """ Implement last layer of the network. Output values of this layer are the results of the computation. """ def __init__(self, input_from_previous_layer, n_in, n_nodes): self.input=input_from_previous_layer #: Weight matrix (n_in x n_nodes) W_bound = -np.sqrt(6. / (n_in + n_nodes)) W_values = np.asarray(np.random.uniform(high=W_bound, low=-W_bound, size=(n_in, n_nodes)), dtype=theano.config.floatX) self.W = theano.shared(value=W_values, name='W', borrow=True) #: Bias term b_values = np.zeros((n_nodes,), dtype=theano.config.floatX) self.b = theano.shared(value=b_values, name='b', borrow=True) #output using linear rectifier self.threshold = 0 lin_output = T.dot(input_from_previous_layer, self.W) + self.b above_threshold = lin_output > self.threshold self.output = above_threshold * (lin_output - self.threshold) #all the variables that can change during learning self.params = [self.W, self.b] def errors(self, y): """ return the error made in predicting the output value :type y: theano.tensor.TensorType :param y: corresponds to a vector that gives for each example the correct label """ # check if y has same dimension of output if y.ndim != self.output.ndim: raise TypeError('y should have the same shape as self.output', ('y', y.type, 'output', self.output.type)) return np.abs(T.mean(self.output-y)) class ConvolutionalLayer(object): """Pool Layer of a convolutional network """ def __init__(self, input_images, filter_shape, image_shape, stride=4): """ Allocate a LeNetConvPoolLayer with shared variable internal parameters. :type input: theano.tensor.dtensor4 :param input: symbolic image tensor, of shape image_shape :type filter_shape: tuple or list of length 4 :param filter_shape: (number of filters, num input feature maps, filter height,filter width) :type image_shape: tuple or list of length 4 :param image_shape: (batch size, num input feature maps, image height, image width) :type poolsize: tuple or list of length 2 :param poolsize: the downsampling (pooling) factor (#rows,#cols) """ assert image_shape[1] == filter_shape[1] self.input = input # there are "num input feature maps * filter height * filter width" # inputs to each hidden unit self.fan_in = np.prod(filter_shape[1:]) # number of nodes in our layer is nr_of_filters*( (image_size-filter_size)/stride))**2 feature_map_size=1+(image_shape[2]-filter_shape[2])/stride self.fan_out = (filter_shape[0] * feature_map_size * feature_map_size) #we need to define the interval at which we initialize the weights. We use formula from example W_bound = np.sqrt(6. / (self.fan_in + self.fan_out)) # initialize weights with random weights self.W = theano.shared(np.asarray(np.random.uniform(high=W_bound, low=-W_bound, size=filter_shape), dtype=theano.config.floatX), borrow=True) # the bias is a 1D tensor -- one bias per output feature map b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX) self.b = theano.shared(value=b_values, borrow=True) # convolve input feature maps with filters convolution_output = conv.conv2d(input=input_images, filters=self.W, filter_shape=filter_shape, image_shape=image_shape, subsample=(stride , stride)) # add the bias term. Since the bias is a vector (1D array), we first # reshape it to a tensor of shape (1,n_filters,1,1). Each bias will # thus be broadcasted across mini-batches and feature map # width & height self.threshold = 0 activation = convolution_output + self.b.dimshuffle('x', 0, 'x', 'x') #above_threshold = activation > self.threshold #self.output = above_threshold * (activation - self.threshold) self.output=activation # store parameters of this layer self.params = [self.W, self.b] class MLP: """ Class which implements the classification algorithm (neural network in our case) """ def __init__(self, input, input_shape, filter_shapes, strides, n_hidden, n_out): #: Convolutional layer self.conv_layer = ConvolutionalLayer(input, filter_shapes[0], input_shape, strides[0]) flattened_input=self.conv_layer.output.flatten(2) #: Hidden layer implements summation self.hidden_layer = HiddenLayer(flattened_input, self.conv_layer.fan_out, n_hidden) #: Output layer implements summations and rectifier non-linearity self.output_layer = OutputLayer(self.hidden_layer.output, n_hidden, n_out) # L1 norm ; one regularization option is to enforce L1 norm to # be small self.L1 = abs(self.hidden_layer.W).sum() \ + abs(self.output_layer.W).sum() \ + abs(self.conv_layer.W).sum() # square of L2 norm ; one regularization option is to enforce # square of L2 norm to be small self.L2_sqr = (self.hidden_layer.W ** 2).sum() \ + (self.output_layer.W ** 2).sum() \ + (self.conv_layer.W ** 2).sum() self.params = self.hidden_layer.params + self.output_layer.params + self.conv_layer.params def main(): #: Define data sets train_set = (np.array([chessboard.make_chessboard(8), chessboard.make_chessboard(2)]), np.array([[[10.0]], [[20.0]]])) # Transform them to theano.shared train_set_x, train_set_y = shared_dataset(train_set) #test_set_x, test_set_y = shared_dataset(test_set) # This is how you can print weird theano stuff print train_set_x.eval() print train_set_y.eval() # Define some structures to store training data and labels x = T.matrix('x') y = T.matrix('y') index = T.lscalar() input_tensor=x.reshape((1,1,84,84)) # Define the classification algorithm classifier = MLP(input=input_tensor, input_shape=[1, 1, 84, 84], filter_shapes=[[16, 1, 8, 8]], strides=[4], n_hidden=40, n_out=1) #define the cost function using l1 and l2 regularization terms: cost = classifier.output_layer.errors(y) \ + 0.0 * classifier.L1 \ + 0.0 * classifier.L2_sqr print type(cost) # Calculate the derivatives by each existing parameter grads = T.grad(cost, classifier.params) # Define how much we need to change the parameter values learning_rate = 0.0001 updates = [] for param_i, gparam_i in zip(classifier.params, grads): updates.append((param_i, param_i - learning_rate * gparam_i)) #print updates # Train model is a theano.function type object that performs updates on parameter values train_model = theano.function(inputs=[index], outputs=cost, updates=updates, givens={ x: train_set_x[index], y: train_set_y[index]}) # We construct an object of type theano.function, which we call test_model test_model = theano.function( inputs=[index], outputs=[classifier.conv_layer.W, classifier.hidden_layer.b, classifier.output_layer.W, classifier.output_layer.b, classifier.output_layer.output, cost], givens={ x: train_set_x[index], y: train_set_y[index] }) n_train_points = train_set_x.get_value(borrow=True).shape[0] print "nr of training points is ", n_train_points for i in range(n_train_points): result = test_model(i) print "we calculated something: ", result #lets train some iterations: for i in range(10000): cost = train_model(0) cost = train_model(1) result = test_model(0) print "test0: ", result[-3:] result = test_model(1) print "test1: ", result[-3:] #for i in range(n_train_points): # result = test_model(i) # print "we calculated something: ", result if __name__ == '__main__': main()
JTarball/docker-django-polymer
refs/heads/master
docker/app/app/backend/apps/search/redis/redis-py-master/docs/conf.py
48
# -*- coding: utf-8 -*- # # redis-py documentation build configuration file, created by # sphinx-quickstart on Fri Feb 8 00:47:08 2013. # # This file is execfile()d with the current directory set to its containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'redis-py' copyright = u'2013, Andy McCurdy, Mahdi Yusuf' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '2.7.2' # The full version, including alpha/beta/rc tags. release = '2.7.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'redis-pydoc' # -- Options for LaTeX output ------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'redis-py.tex', u'redis-py Documentation', u'Andy McCurdy, Mahdi Yusuf', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output ------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'redis-py', u'redis-py Documentation', [u'Andy McCurdy, Mahdi Yusuf'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ----------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'redis-py', u'redis-py Documentation', u'Andy McCurdy, Mahdi Yusuf', 'redis-py', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote'
ahb0327/intellij-community
refs/heads/master
python/lib/Lib/encodings/hex_codec.py
528
""" Python 'hex_codec' Codec - 2-digit hex content transfer encoding Unlike most of the other codecs which target Unicode, this codec will return Python string objects for both encode and decode. Written by Marc-Andre Lemburg ([email protected]). """ import codecs, binascii ### Codec APIs def hex_encode(input,errors='strict'): """ Encodes the object input and returns a tuple (output object, length consumed). errors defines the error handling to apply. It defaults to 'strict' handling which is the only currently supported error handling for this codec. """ assert errors == 'strict' output = binascii.b2a_hex(input) return (output, len(input)) def hex_decode(input,errors='strict'): """ Decodes the object input and returns a tuple (output object, length consumed). input must be an object which provides the bf_getreadbuf buffer slot. Python strings, buffer objects and memory mapped files are examples of objects providing this slot. errors defines the error handling to apply. It defaults to 'strict' handling which is the only currently supported error handling for this codec. """ assert errors == 'strict' output = binascii.a2b_hex(input) return (output, len(input)) class Codec(codecs.Codec): def encode(self, input,errors='strict'): return hex_encode(input,errors) def decode(self, input,errors='strict'): return hex_decode(input,errors) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): assert self.errors == 'strict' return binascii.b2a_hex(input) class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): assert self.errors == 'strict' return binascii.a2b_hex(input) class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='hex', encode=hex_encode, decode=hex_decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader, )
hmoco/osf.io
refs/heads/develop
api_tests/nodes/views/test_node_alternative_citations.py
2
from nose.tools import * # flake8: noqa from website.util import permissions from api.base.settings.defaults import API_BASE from api.citations import utils as citation_utils from tests.base import ApiTestCase from osf_tests.factories import ( ProjectFactory, RegistrationFactory, AuthUserFactory, AlternativeCitationFactory ) def payload(name=None, text=None, _id=None): data = {'data': { 'type': 'citations', 'attributes': {} } } if name is not None: data['data']['attributes']['name'] = name if text is not None: data['data']['attributes']['text'] = text if _id is not None: data['data']['id'] = _id return data def set_up_citation_and_project(admin, public=True, registration=False, contrib=None, citation2=False, for_delete=False, bad=False): project = ProjectFactory(creator=admin, is_public=public) citation = AlternativeCitationFactory(name='name', text='text') project.alternative_citations.add(citation) if contrib: project.add_contributor(contrib, permissions=[permissions.READ, permissions.WRITE], visible=True) if citation2: citation2 = AlternativeCitationFactory(name='name2', text='text2') project.alternative_citations.add(citation2) project.save() slug = 1 if bad else citation._id if registration: project = RegistrationFactory(project=project, is_public=public) citation_url = '/{}registrations/{}/citations/{}/'.format(API_BASE, project._id, slug) else: citation_url = '/{}nodes/{}/citations/{}/'.format(API_BASE, project._id, slug) if for_delete: return project, citation_url return citation, citation_url class TestUpdateAlternativeCitations(ApiTestCase): def request(self, is_admin=False, is_contrib=True, logged_out=False, errors=False, patch=False, **kwargs): name = kwargs.pop('name', None) text = kwargs.pop('text', None) admin = AuthUserFactory() if is_admin: user = admin elif not logged_out: user = AuthUserFactory() kwargs['contrib'] = user if is_contrib else None citation, citation_url = set_up_citation_and_project(admin, **kwargs) data = payload(name=name, text=text, _id=citation._id) if patch: if not logged_out: res = self.app.patch_json_api(citation_url, data, auth=user.auth, expect_errors=errors) else: res = self.app.patch_json_api(citation_url, data, expect_errors=errors) else: if not logged_out: res = self.app.put_json_api(citation_url, data, auth=user.auth, expect_errors=errors) else: res = self.app.put_json_api(citation_url, data, expect_errors=errors) return res, citation def test_update_citation_name_admin_public(self): res, citation = self.request(name="Test", text="text", is_admin=True) assert_equal(res.status_code, 200) assert_equal(res.json['data']['attributes']['name'], 'Test') citation.reload() assert_equal(citation.name, "Test") def test_update_citation_name_admin_private(self): res, citation = self.request(name="Test", text="text", public=False, is_admin=True) assert_equal(res.status_code, 200) assert_equal(res.json['data']['attributes']['name'], 'Test') citation.reload() assert_equal(citation.name, "Test") def test_update_citation_name_non_admin_public(self): res, citation = self.request(name="Test", text="text", errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.name, "name") def test_update_citation_name_non_admin_private(self): res, citation = self.request(name="Test", text="text", public=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.name, "name") def test_update_citation_name_non_contrib_public(self): res, citation = self.request(name="Test", text="text", is_contrib=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.name, "name") def test_update_citation_name_non_contrib_private(self): res, citation = self.request(name="Test", text="text", public=False, is_contrib=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.name, "name") def test_update_citation_name_logged_out_public(self): res, citation = self.request(name="Test", text="text", logged_out=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') citation.reload() assert_equal(citation.name, "name") def test_update_citation_name_logged_out_private(self): res, citation = self.request(name="Test", text="text", public=False, logged_out=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') citation.reload() assert_equal(citation.name, "name") def test_update_citation_text_admin_public(self): res, citation = self.request(name="name", text="Test", is_admin=True) assert_equal(res.status_code, 200) assert_equal(res.json['data']['attributes']['text'], 'Test') citation.reload() assert_equal(citation.text, "Test") def test_update_citation_text_admin_private(self): res, citation = self.request(name="name", text="Test", public=False, is_admin=True) assert_equal(res.status_code, 200) assert_equal(res.json['data']['attributes']['text'], 'Test') citation.reload() assert_equal(citation.text, "Test") def test_update_citation_text_non_admin_public(self): res, citation = self.request(name="name", text="Test", errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") def test_update_citation_text_non_admin_private(self): res, citation = self.request(name="name", text="Test", public=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") def test_update_citation_text_non_contrib_public(self): res, citation = self.request(name="name", text="Test", is_contrib=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") def test_update_citation_text_non_contrib_private(self): res, citation = self.request(name="name", text="Test", public=False, is_contrib=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") def test_update_citation_text_logged_out_public(self): res, citation = self.request(name="name", text="Test", logged_out=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') citation.reload() assert_equal(citation.text, "text") def test_update_citation_text_logged_out_private(self): res, citation = self.request(name="name", text="Test", public=False, logged_out=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') citation.reload() assert_equal(citation.text, "text") def test_update_citation_admin_public(self): res, citation = self.request(name="Test", text="Test", is_admin=True) assert_equal(res.status_code, 200) assert_equal(res.json['data']['attributes']['name'], res.json['data']['attributes']['text'], 'Test') citation.reload() assert_equal(citation.name, citation.text, "Test") def test_update_citation_admin_private(self): res, citation = self.request(name="Test", text="Test", public=False, is_admin=True) assert_equal(res.status_code, 200) assert_equal(res.json['data']['attributes']['name'], res.json['data']['attributes']['text'], 'Test') citation.reload() assert_equal(citation.name, citation.text, "Test") def test_update_citation_non_admin_public(self): res, citation = self.request(name="Test", text="Test", errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.name, "name") assert_equal(citation.text, "text") def test_update_citation_non_admin_private(self): res, citation = self.request(name="Test", text="Test", public=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.name, "name") assert_equal(citation.text, "text") def test_update_citation_non_contrib_public(self): res, citation = self.request(name="Test", text="Test", is_contrib=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.name, "name") assert_equal(citation.text, "text") def test_update_citation_non_contrib_private(self): res, citation = self.request(name="Test", text="Test", public=False, is_contrib=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.name, "name") assert_equal(citation.text, "text") def test_update_citation_logged_out_public(self): res, citation = self.request(name="Test", text="Test", logged_out=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') citation.reload() assert_equal(citation.name, "name") assert_equal(citation.text, "text") def test_update_citation_logged_out_private(self): res, citation = self.request(name="Test", text="Test", public=False, logged_out=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') citation.reload() assert_equal(citation.name, "name") assert_equal(citation.text, "text") def test_update_citation_repeat_name_admin_public(self): res, citation = self.request(name="name2", text="text", is_admin=True, citation2=True, errors=True) assert_equal(res.status_code, 400) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], "There is already a citation named 'name2'") citation.reload() assert_equal(citation.name, "name") def test_update_citation_repeat_name_admin_private(self): res, citation = self.request(name="name2", text="text", public=False, is_admin=True, citation2=True, errors=True) assert_equal(res.status_code, 400) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], "There is already a citation named 'name2'") citation.reload() assert_equal(citation.name, "name") def test_update_citation_repeat_name_non_admin_public(self): res, citation = self.request(name="name2", text="text", citation2=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.name, "name") def test_update_citation_repeat_name_non_admin_private(self): res, citation = self.request(name="name2", text="text", public=False, citation2=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.name, "name") def test_update_citation_repeat_name_non_contrib_public(self): res, citation = self.request(name="name2", text="text", is_contrib=False, citation2=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.name, "name") def test_update_citation_repeat_name_non_contrib_private(self): res, citation = self.request(name="name2", text="text", public=False, is_contrib=False, citation2=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.name, "name") def test_update_citation_repeat_name_logged_out_public(self): res, citation = self.request(name="name2", text="text", logged_out=True, citation2=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') citation.reload() assert_equal(citation.name, "name") def test_update_citation_repeat_name_logged_out_private(self): res, citation = self.request(name="name2", text="text", public=False, logged_out=True, citation2=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') citation.reload() assert_equal(citation.name, "name") def test_update_citation_repeat_text_admin_public(self): res, citation = self.request(name="name", text="text2", is_admin=True, citation2=True, errors=True) assert_equal(res.status_code, 400) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], "Citation matches 'name2'") citation.reload() assert_equal(citation.text, "text") def test_update_citation_repeat_text_admin_private(self): res, citation = self.request(name="name", text="text2", public=False, is_admin=True, citation2=True, errors=True) assert_equal(res.status_code, 400) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], "Citation matches 'name2'") citation.reload() assert_equal(citation.text, "text") def test_update_citation_repeat_text_non_admin_public(self): res, citation = self.request(name="name", text="text2", citation2=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") def test_update_citation_repeat_text_non_admin_private(self): res, citation = self.request(name="name", text="text2", public=False, citation2=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") def test_update_citation_repeat_text_non_contrib_public(self): res, citation = self.request(name="name", text="text2", is_contrib=False, citation2=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") def test_update_citation_repeat_text_non_contrib_private(self): res, citation = self.request(name="name", text="text2", public=False, is_contrib=False, citation2=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") def test_update_citation_repeat_text_logged_out_public(self): res, citation = self.request(name="name", text="text2", logged_out=True, citation2=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') citation.reload() assert_equal(citation.text, "text") def test_update_citation_repeat_text_logged_out_private(self): res, citation = self.request(name="name", text="text2", public=False, logged_out=True, citation2=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') citation.reload() assert_equal(citation.text, "text") def test_update_citation_repeat_admin_public(self): res, citation = self.request(name="name2", text="text2", is_admin=True, citation2=True, errors=True) assert_equal(res.status_code, 400) assert_equal(len(res.json['errors']), 2) errors = [error['detail'] for error in res.json['errors']] assert_in("There is already a citation named 'name2'", errors) assert_in("Citation matches 'name2'", errors) citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_repeat_admin_private(self): res, citation = self.request(name="name2", text="text2", public=False, is_admin=True, citation2=True, errors=True) assert_equal(res.status_code, 400) assert_equal(len(res.json['errors']), 2) errors = [error['detail'] for error in res.json['errors']] assert_in("There is already a citation named 'name2'", errors) assert_in("Citation matches 'name2'", errors) citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_repeat_non_admin_public(self): res, citation = self.request(name="name2", text="text2", citation2=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_repeat_non_admin_private(self): res, citation = self.request(name="name2", text="text2", public=False, citation2=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_repeat_non_contrib_public(self): res, citation = self.request(name="name2", text="text2", is_contrib=False, citation2=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_repeat_non_contrib_private(self): res, citation = self.request(name="name2", text="text2", public=False, is_contrib=False, citation2=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_repeat_logged_out_public(self): res, citation = self.request(name="name2", text="text2", logged_out=True, citation2=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_repeat_logged_out_private(self): res, citation = self.request(name="name2", text="text2", public=False, logged_out=True, citation2=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_empty_admin_public(self): res, citation = self.request(is_admin=True, patch=True) assert_equal(res.status_code, 200) assert_equal(res.json['data']['attributes']['name'], 'name') assert_equal(res.json['data']['attributes']['text'], 'text') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_empty_admin_private(self): res, citation = self.request(public=False, is_admin=True, patch=True) assert_equal(res.status_code, 200) assert_equal(res.json['data']['attributes']['name'], 'name') assert_equal(res.json['data']['attributes']['text'], 'text') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_empty_non_admin_public(self): res, citation = self.request(patch=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_empty_non_admin_private(self): res, citation = self.request(public=False, patch=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_empty_non_contrib_public(self): res, citation = self.request(is_contrib=False, patch=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_empty_non_contrib_private(self): res, citation = self.request(public=False, is_contrib=False, patch=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_empty_logged_out_public(self): res, citation = self.request(logged_out=True, patch=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_empty_logged_out_private(self): res, citation = self.request(public=False, logged_out=True, patch=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_name_only_admin_public(self): res, citation = self.request(name="new name", patch=True, is_admin=True) assert_equal(res.status_code, 200) assert_equal(res.json['data']['attributes']['name'], 'new name') assert_equal(res.json['data']['attributes']['text'], 'text') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "new name") def test_update_citation_name_only_admin_private(self): res, citation = self.request(name="new name", public=False, patch=True, is_admin=True) assert_equal(res.status_code, 200) assert_equal(res.json['data']['attributes']['name'], 'new name') assert_equal(res.json['data']['attributes']['text'], 'text') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "new name") def test_update_citation_name_only_non_admin_public(self): res, citation = self.request(name="new name", patch=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_name_only_non_admin_private(self): res, citation = self.request(name="new name", public=False, patch=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_name_only_non_contrib_public(self): res, citation = self.request(name="new name", patch=True, is_contrib=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_name_only_non_contrib_private(self): res, citation = self.request(name="new name", public=False, patch=True, is_contrib=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_name_only_logged_out_public(self): res, citation = self.request(name="new name", patch=True, logged_out=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_name_only_logged_out_private(self): res, citation = self.request(name="new name", public=False, patch=True, logged_out=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_text_only_admin_public(self): res, citation = self.request(text="new text", patch=True, is_admin=True) assert_equal(res.status_code, 200) assert_equal(res.json['data']['attributes']['name'], 'name') assert_equal(res.json['data']['attributes']['text'], 'new text') citation.reload() assert_equal(citation.text, "new text") assert_equal(citation.name, "name") def test_update_citation_text_only_admin_private(self): res, citation = self.request(text="new text", public=False, patch=True, is_admin=True) assert_equal(res.status_code, 200) citation.reload() assert_equal(res.json['data']['attributes']['name'], 'name') assert_equal(res.json['data']['attributes']['text'], 'new text') assert_equal(citation.text, "new text") assert_equal(citation.name, "name") def test_update_citation_text_only_non_admin_public(self): res, citation = self.request(text="new text", patch=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_text_only_non_admin_private(self): res, citation = self.request(text="new text", public=False, patch=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_text_only_non_contrib_public(self): res, citation = self.request(text="new text", patch=True, is_contrib=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_text_only_non_contrib_private(self): res, citation = self.request(text="new text", public=False, patch=True, is_contrib=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_text_only_logged_out_public(self): res, citation = self.request(text="new text", patch=True, logged_out=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_text_only_logged_out_private(self): res, citation = self.request(text="new text", public=False, patch=True, logged_out=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_name_only_repeat_admin_public(self): res, citation = self.request(name="name2", patch=True, citation2=True, is_admin=True, errors=True) assert_equal(res.status_code, 400) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], "There is already a citation named 'name2'") citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_name_only_repeat_admin_private(self): res, citation = self.request(name="name2", public=False, patch=True, citation2=True, is_admin=True, errors=True) assert_equal(res.status_code, 400) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], "There is already a citation named 'name2'") citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_name_only_repeat_non_admin_public(self): res, citation = self.request(name="name2", patch=True, citation2=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_name_only_repeat_non_admin_private(self): res, citation = self.request(name="name2", public=False, patch=True, citation2=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_name_only_repeat_non_contrib_public(self): res, citation = self.request(name="name2", patch=True, citation2=True, is_contrib=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_name_only_repeat_non_contrib_private(self): res, citation = self.request(name="name2", public=False, patch=True, citation2=True, is_contrib=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_name_only_repeat_logged_out_public(self): res, citation = self.request(name="name2", patch=True, citation2=True, logged_out=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_name_only_repeat_logged_out_private(self): res, citation = self.request(name="name2", public=False, patch=True, citation2=True, logged_out=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_text_only_repeat_admin_public(self): res, citation = self.request(text="text2", patch=True, citation2=True, is_admin=True, errors=True) assert_equal(res.status_code, 400) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], "Citation matches 'name2'") citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_text_only_repeat_admin_private(self): res, citation = self.request(text="text2", public=False, patch=True, citation2=True, is_admin=True, errors=True) assert_equal(res.status_code, 400) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], "Citation matches 'name2'") citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_text_only_repeat_non_admin_public(self): res, citation = self.request(text="text2", patch=True, citation2=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_text_only_repeat_non_admin_private(self): res, citation = self.request(text="text2", public=False, patch=True, citation2=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_text_only_repeat_non_contrib_public(self): res, citation = self.request(text="text2", patch=True, citation2=True, is_contrib=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_text_only_repeat_non_contrib_private(self): res, citation = self.request(text="text2", public=False, patch=True, citation2=True, is_contrib=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_text_only_repeat_logged_out_public(self): res, citation = self.request(text="text2", patch=True, citation2=True, logged_out=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_text_only_repeat_logged_out_private(self): res, citation = self.request(text="text2", public=False, patch=True, citation2=True, logged_out=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') citation.reload() assert_equal(citation.text, "text") assert_equal(citation.name, "name") def test_update_citation_admin_public_reg(self): res, citation = self.request(name="test", text="Citation", registration=True, is_admin=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') assert_equal(citation.name, "name") assert_equal(citation.text, "text") def test_update_citation_admin_private_reg(self): res, citation = self.request(name="test", text="Citation", public=False, registration=True, is_admin=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') assert_equal(citation.name, "name") assert_equal(citation.text, "text") def test_update_citation_non_admin_public_reg(self): res, citation = self.request(name="test", text="Citation", registration=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') assert_equal(citation.name, "name") assert_equal(citation.text, "text") def test_update_citation_non_admin_private_reg(self): res, citation = self.request(name="test", text="Citation", public=False, registration=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') assert_equal(citation.name, "name") assert_equal(citation.text, "text") def test_update_citation_non_contrib_public_reg(self): res, citation = self.request(name="test", text="Citation", registration=True, is_contrib=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') assert_equal(citation.name, "name") assert_equal(citation.text, "text") def test_update_citation_non_contrib_private_reg(self): res, citation = self.request(name="test", text="Citation", public=False, registration=True, is_contrib=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') assert_equal(citation.name, "name") assert_equal(citation.text, "text") def test_update_citation_logged_out_public_reg(self): res, citation = self.request(name="test", text="Citation", registration=True, logged_out=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') assert_equal(citation.name, "name") assert_equal(citation.text, "text") def test_update_citation_logged_out_private_reg(self): res, citation = self.request(name="test", text="Citation", public=False, registration=True, logged_out=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') assert_equal(citation.name, "name") assert_equal(citation.text, "text") class TestDeleteAlternativeCitations(ApiTestCase): def request(self, is_admin=False, is_contrib=True, logged_out=False, errors=False, **kwargs): admin = AuthUserFactory() if is_admin: user = admin elif not logged_out: user = AuthUserFactory() kwargs['contrib'] = user if is_contrib else None project, citation_url = set_up_citation_and_project(admin, for_delete=True, **kwargs) if not logged_out: res = self.app.delete_json_api(citation_url, auth=user.auth, expect_errors=errors) else: res = self.app.delete_json_api(citation_url, expect_errors=errors) return res, project def test_delete_citation_admin_public(self): res, project = self.request(is_admin=True) assert_equal(res.status_code, 204) project.reload() assert_equal(project.alternative_citations.count(), 0) def test_delete_citation_admin_private(self): res, project = self.request(public=False, is_admin=True) assert_equal(res.status_code, 204) project.reload() assert_equal(project.alternative_citations.count(), 0) def test_delete_citation_non_admin_public(self): res, project = self.request(errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') project.reload() assert_equal(project.alternative_citations.count(), 1) def test_delete_citation_non_admin_private(self): res, project = self.request(public=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') project.reload() assert_equal(project.alternative_citations.count(), 1) def test_delete_citation_non_contrib_public(self): res, project = self.request(is_contrib=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') project.reload() assert_equal(project.alternative_citations.count(), 1) def test_delete_citation_non_contrib_private(self): res, project = self.request(public=False, is_contrib=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') project.reload() assert_equal(project.alternative_citations.count(), 1) def test_delete_citation_logged_out_public(self): res, project = self.request(logged_out=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') project.reload() assert_equal(project.alternative_citations.count(), 1) def test_delete_citation_logged_out_private(self): res, project = self.request(public=False, logged_out=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') project.reload() assert_equal(project.alternative_citations.count(), 1) def test_delete_citation_admin_not_found_public(self): res, project = self.request(is_admin=True, bad=True, errors=True) assert_equal(res.status_code, 404) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Not found.') project.reload() assert_equal(project.alternative_citations.count(), 1) def test_delete_citation_admin_not_found_private(self): res, project = self.request(public=False, is_admin=True, bad=True, errors=True) assert_equal(res.status_code, 404) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Not found.') project.reload() assert_equal(project.alternative_citations.count(), 1) def test_delete_citation_non_admin_not_found_public(self): res, project = self.request(bad=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') project.reload() assert_equal(project.alternative_citations.count(), 1) def test_delete_citation_non_admin_not_found_private(self): res, project = self.request(public=False, bad=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') project.reload() assert_equal(project.alternative_citations.count(), 1) def test_delete_citation_non_contrib_not_found_public(self): res, project = self.request(is_contrib=False, bad=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') project.reload() assert_equal(project.alternative_citations.count(), 1) def test_delete_citation_non_contrib_not_found_private(self): res, project = self.request(public=False, is_contrib=False, bad=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') project.reload() assert_equal(project.alternative_citations.count(), 1) def test_delete_citation_logged_out_not_found_public(self): res, project = self.request(logged_out=True, bad=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') project.reload() assert_equal(project.alternative_citations.count(), 1) def test_delete_citation_logged_out_not_found_private(self): res, project = self.request(public=False, logged_out=True, bad=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') project.reload() assert_equal(project.alternative_citations.count(), 1) def test_delete_citation_admin_public_reg(self): res, registration = self.request(registration=True, is_admin=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') assert_equal(registration.alternative_citations.count(), 1) def test_delete_citation_admin_private_reg(self): res, registration = self.request(public=False, registration=True, is_admin=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') assert_equal(registration.alternative_citations.count(), 1) def test_delete_citation_non_admin_public_reg(self): res, registration = self.request(registration=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') assert_equal(registration.alternative_citations.count(), 1) def test_delete_citation_non_admin_private_reg(self): res, registration = self.request(public=False, registration=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') assert_equal(registration.alternative_citations.count(), 1) def test_delete_citation_non_contrib_public_reg(self): res, registration = self.request(registration=True, is_contrib=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') assert_equal(registration.alternative_citations.count(), 1) def test_delete_citation_non_contrib_private_reg(self): res, registration = self.request(public=False, registration=True, is_contrib=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') assert_equal(registration.alternative_citations.count(), 1) def test_delete_citation_logged_out_public_reg(self): res, registration = self.request(registration=True, logged_out=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') assert_equal(registration.alternative_citations.count(), 1) def test_delete_citation_logged_out_private_reg(self): res, registration = self.request(public=False, registration=True, logged_out=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') assert_equal(registration.alternative_citations.count(), 1) class TestGetAlternativeCitations(ApiTestCase): def request(self, is_admin=False, is_contrib=True, logged_out=False, errors=False, **kwargs): admin = AuthUserFactory() if is_admin: user = admin elif not logged_out: user = AuthUserFactory() kwargs['contrib'] = user if is_contrib else None citation, citation_url = set_up_citation_and_project(admin, **kwargs) if not logged_out: res = self.app.get(citation_url, auth=user.auth, expect_errors=errors) else: res = self.app.get(citation_url, expect_errors=errors) return res, citation def test_get_citation_admin_public(self): res, citation = self.request(is_admin=True) assert_equal(res.status_code, 200) attributes = res.json['data']['attributes'] assert_equal(attributes['name'], 'name') assert_equal(attributes['text'], 'text') def test_get_citation_admin_private(self): res, citation = self.request(public=False, is_admin=True) assert_equal(res.status_code, 200) attributes = res.json['data']['attributes'] assert_equal(attributes['name'], 'name') assert_equal(attributes['text'], 'text') def test_get_citation_non_admin_public(self): res, citation = self.request() assert_equal(res.status_code, 200) attributes = res.json['data']['attributes'] assert_equal(attributes['name'], 'name') assert_equal(attributes['text'], 'text') def test_get_citation_non_admin_private(self): res, citation = self.request(public=False) assert_equal(res.status_code, 200) attributes = res.json['data']['attributes'] assert_equal(attributes['name'], 'name') assert_equal(attributes['text'], 'text') def test_get_citation_non_contrib_public(self): res, citation = self.request(is_contrib=False) assert_equal(res.status_code, 200) attributes = res.json['data']['attributes'] assert_equal(attributes['name'], 'name') assert_equal(attributes['text'], 'text') def test_get_citation_non_contrib_private(self): res, citation = self.request(public=False, is_contrib=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') def test_get_citation_logged_out_public(self): res, citation = self.request(logged_out=True) assert_equal(res.status_code, 200) attributes = res.json['data']['attributes'] assert_equal(attributes['name'], 'name') assert_equal(attributes['text'], 'text') def test_get_citation_logged_out_private(self): res, citation = self.request(public=False, logged_out=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') def test_get_citation_admin_not_found_public(self): res, citation = self.request(is_admin=True, bad=True, errors=True) assert_equal(res.status_code, 404) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Not found.') def test_get_citation_admin_not_found_private(self): res, citation = self.request(public=False, is_admin=True, bad=True, errors=True) assert_equal(res.status_code, 404) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Not found.') def test_get_citation_non_admin_not_found_public(self): res, citation = self.request(bad=True, errors=True) assert_equal(res.status_code, 404) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Not found.') def test_get_citation_non_admin_not_found_private(self): res, citation = self.request(public=False, bad=True, errors=True) assert_equal(res.status_code, 404) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Not found.') def test_get_citation_non_contrib_not_found_public(self): res, citation = self.request(is_contrib=False, bad=True, errors=True) assert_equal(res.status_code, 404) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Not found.') def test_get_citation_non_contrib_not_found_private(self): res, citation = self.request(public=False, is_contrib=False, bad=True, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') def test_get_citation_logged_out_not_found_public(self): res, citation = self.request(logged_out=True, bad=True, errors=True) assert_equal(res.status_code, 404) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Not found.') def test_get_citation_logged_out_not_found_private(self): res, citation = self.request(public=False, logged_out=True, bad=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') def test_get_citation_admin_public_reg(self): res, citation = self.request(registration=True, is_admin=True) assert_equal(res.status_code, 200) attributes = res.json['data']['attributes'] assert_equal(attributes['name'], 'name') assert_equal(attributes['text'], 'text') def test_get_citation_admin_private_reg(self): res, citation = self.request(public=False, registration=True, is_admin=True) assert_equal(res.status_code, 200) attributes = res.json['data']['attributes'] assert_equal(attributes['name'], 'name') assert_equal(attributes['text'], 'text') def test_get_citation_non_admin_public_reg(self): res, citation = self.request(registration=True) assert_equal(res.status_code, 200) attributes = res.json['data']['attributes'] assert_equal(attributes['name'], 'name') assert_equal(attributes['text'], 'text') def test_get_citation_non_admin_private_reg(self): res, citation = self.request(public=False, registration=True) assert_equal(res.status_code, 200) attributes = res.json['data']['attributes'] assert_equal(attributes['name'], 'name') assert_equal(attributes['text'], 'text') def test_get_citation_non_contrib_public_reg(self): res, citation = self.request(registration=True, is_contrib=False) assert_equal(res.status_code, 200) attributes = res.json['data']['attributes'] assert_equal(attributes['name'], 'name') assert_equal(attributes['text'], 'text') def test_get_citation_non_contrib_private_reg(self): res, citation = self.request(public=False, registration=True, is_contrib=False, errors=True) assert_equal(res.status_code, 403) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') def test_get_citation_logged_out_public_reg(self): res, citation = self.request(registration=True, logged_out=True) assert_equal(res.status_code, 200) attributes = res.json['data']['attributes'] assert_equal(attributes['name'], 'name') assert_equal(attributes['text'], 'text') def test_get_citation_logged_out_private_reg(self): res, citation = self.request(public=False, registration=True, logged_out=True, errors=True) assert_equal(res.status_code, 401) assert_equal(len(res.json['errors']), 1) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') class TestManualCitationCorrections(ApiTestCase): def setUp(self): super(TestManualCitationCorrections, self).setUp() self.user = AuthUserFactory() self.project = ProjectFactory(creator=self.user, is_public=True, title="My Project") def test_apa_citation(self): citation = citation_utils.render_citation(self.project, 'apa') expected_citation = self.user.family_name + ', ' + self.user.given_name_initial + '. (' + \ self.project.date_created.strftime("%Y, %B %-d") + '). ' + self.project.title + \ '. Retrieved from ' + self.project.display_absolute_url assert_equal(citation, expected_citation) def test_mla_citation(self): csl = self.project.csl citation = citation_utils.render_citation(self.project, 'modern-language-association') expected_citation = csl['author'][0]['family'] + ', ' + csl['author'][0]['given'] + '. ' + u"\u201c" + csl['title'] + u"\u201d" + '. ' +\ csl['publisher'] + ', ' + (self.project.date_created.strftime("%-d %b. %Y. Web.") if self.project.date_created.month != 5 else self.project.date_created.strftime("%-d %b %Y. Web.")) assert_equal(citation, expected_citation) def test_chicago_citation(self): csl = self.project.csl citation = citation_utils.render_citation(self.project, 'chicago-author-date') expected_citation = csl['author'][0]['family'] + ', ' + csl['author'][0]['given'] + '. ' + str(csl['issued']['date-parts'][0][0]) + '. ' + u"\u201c" + csl['title'] + u"\u201d" + '. ' + csl['publisher'] +'. ' + self.project.date_created.strftime("%B %-d") + '. ' + csl['URL'] + '.' assert_equal(citation, expected_citation)
Big-B702/python-for-android
refs/heads/master
python3-alpha/python3-src/Lib/encodings/mac_turkish.py
272
""" Python Character Mapping Codec mac_turkish generated from 'MAPPINGS/VENDORS/APPLE/TURKISH.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='mac-turkish', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> CONTROL CHARACTER '\x01' # 0x01 -> CONTROL CHARACTER '\x02' # 0x02 -> CONTROL CHARACTER '\x03' # 0x03 -> CONTROL CHARACTER '\x04' # 0x04 -> CONTROL CHARACTER '\x05' # 0x05 -> CONTROL CHARACTER '\x06' # 0x06 -> CONTROL CHARACTER '\x07' # 0x07 -> CONTROL CHARACTER '\x08' # 0x08 -> CONTROL CHARACTER '\t' # 0x09 -> CONTROL CHARACTER '\n' # 0x0A -> CONTROL CHARACTER '\x0b' # 0x0B -> CONTROL CHARACTER '\x0c' # 0x0C -> CONTROL CHARACTER '\r' # 0x0D -> CONTROL CHARACTER '\x0e' # 0x0E -> CONTROL CHARACTER '\x0f' # 0x0F -> CONTROL CHARACTER '\x10' # 0x10 -> CONTROL CHARACTER '\x11' # 0x11 -> CONTROL CHARACTER '\x12' # 0x12 -> CONTROL CHARACTER '\x13' # 0x13 -> CONTROL CHARACTER '\x14' # 0x14 -> CONTROL CHARACTER '\x15' # 0x15 -> CONTROL CHARACTER '\x16' # 0x16 -> CONTROL CHARACTER '\x17' # 0x17 -> CONTROL CHARACTER '\x18' # 0x18 -> CONTROL CHARACTER '\x19' # 0x19 -> CONTROL CHARACTER '\x1a' # 0x1A -> CONTROL CHARACTER '\x1b' # 0x1B -> CONTROL CHARACTER '\x1c' # 0x1C -> CONTROL CHARACTER '\x1d' # 0x1D -> CONTROL CHARACTER '\x1e' # 0x1E -> CONTROL CHARACTER '\x1f' # 0x1F -> CONTROL CHARACTER ' ' # 0x20 -> SPACE '!' # 0x21 -> EXCLAMATION MARK '"' # 0x22 -> QUOTATION MARK '#' # 0x23 -> NUMBER SIGN '$' # 0x24 -> DOLLAR SIGN '%' # 0x25 -> PERCENT SIGN '&' # 0x26 -> AMPERSAND "'" # 0x27 -> APOSTROPHE '(' # 0x28 -> LEFT PARENTHESIS ')' # 0x29 -> RIGHT PARENTHESIS '*' # 0x2A -> ASTERISK '+' # 0x2B -> PLUS SIGN ',' # 0x2C -> COMMA '-' # 0x2D -> HYPHEN-MINUS '.' # 0x2E -> FULL STOP '/' # 0x2F -> SOLIDUS '0' # 0x30 -> DIGIT ZERO '1' # 0x31 -> DIGIT ONE '2' # 0x32 -> DIGIT TWO '3' # 0x33 -> DIGIT THREE '4' # 0x34 -> DIGIT FOUR '5' # 0x35 -> DIGIT FIVE '6' # 0x36 -> DIGIT SIX '7' # 0x37 -> DIGIT SEVEN '8' # 0x38 -> DIGIT EIGHT '9' # 0x39 -> DIGIT NINE ':' # 0x3A -> COLON ';' # 0x3B -> SEMICOLON '<' # 0x3C -> LESS-THAN SIGN '=' # 0x3D -> EQUALS SIGN '>' # 0x3E -> GREATER-THAN SIGN '?' # 0x3F -> QUESTION MARK '@' # 0x40 -> COMMERCIAL AT 'A' # 0x41 -> LATIN CAPITAL LETTER A 'B' # 0x42 -> LATIN CAPITAL LETTER B 'C' # 0x43 -> LATIN CAPITAL LETTER C 'D' # 0x44 -> LATIN CAPITAL LETTER D 'E' # 0x45 -> LATIN CAPITAL LETTER E 'F' # 0x46 -> LATIN CAPITAL LETTER F 'G' # 0x47 -> LATIN CAPITAL LETTER G 'H' # 0x48 -> LATIN CAPITAL LETTER H 'I' # 0x49 -> LATIN CAPITAL LETTER I 'J' # 0x4A -> LATIN CAPITAL LETTER J 'K' # 0x4B -> LATIN CAPITAL LETTER K 'L' # 0x4C -> LATIN CAPITAL LETTER L 'M' # 0x4D -> LATIN CAPITAL LETTER M 'N' # 0x4E -> LATIN CAPITAL LETTER N 'O' # 0x4F -> LATIN CAPITAL LETTER O 'P' # 0x50 -> LATIN CAPITAL LETTER P 'Q' # 0x51 -> LATIN CAPITAL LETTER Q 'R' # 0x52 -> LATIN CAPITAL LETTER R 'S' # 0x53 -> LATIN CAPITAL LETTER S 'T' # 0x54 -> LATIN CAPITAL LETTER T 'U' # 0x55 -> LATIN CAPITAL LETTER U 'V' # 0x56 -> LATIN CAPITAL LETTER V 'W' # 0x57 -> LATIN CAPITAL LETTER W 'X' # 0x58 -> LATIN CAPITAL LETTER X 'Y' # 0x59 -> LATIN CAPITAL LETTER Y 'Z' # 0x5A -> LATIN CAPITAL LETTER Z '[' # 0x5B -> LEFT SQUARE BRACKET '\\' # 0x5C -> REVERSE SOLIDUS ']' # 0x5D -> RIGHT SQUARE BRACKET '^' # 0x5E -> CIRCUMFLEX ACCENT '_' # 0x5F -> LOW LINE '`' # 0x60 -> GRAVE ACCENT 'a' # 0x61 -> LATIN SMALL LETTER A 'b' # 0x62 -> LATIN SMALL LETTER B 'c' # 0x63 -> LATIN SMALL LETTER C 'd' # 0x64 -> LATIN SMALL LETTER D 'e' # 0x65 -> LATIN SMALL LETTER E 'f' # 0x66 -> LATIN SMALL LETTER F 'g' # 0x67 -> LATIN SMALL LETTER G 'h' # 0x68 -> LATIN SMALL LETTER H 'i' # 0x69 -> LATIN SMALL LETTER I 'j' # 0x6A -> LATIN SMALL LETTER J 'k' # 0x6B -> LATIN SMALL LETTER K 'l' # 0x6C -> LATIN SMALL LETTER L 'm' # 0x6D -> LATIN SMALL LETTER M 'n' # 0x6E -> LATIN SMALL LETTER N 'o' # 0x6F -> LATIN SMALL LETTER O 'p' # 0x70 -> LATIN SMALL LETTER P 'q' # 0x71 -> LATIN SMALL LETTER Q 'r' # 0x72 -> LATIN SMALL LETTER R 's' # 0x73 -> LATIN SMALL LETTER S 't' # 0x74 -> LATIN SMALL LETTER T 'u' # 0x75 -> LATIN SMALL LETTER U 'v' # 0x76 -> LATIN SMALL LETTER V 'w' # 0x77 -> LATIN SMALL LETTER W 'x' # 0x78 -> LATIN SMALL LETTER X 'y' # 0x79 -> LATIN SMALL LETTER Y 'z' # 0x7A -> LATIN SMALL LETTER Z '{' # 0x7B -> LEFT CURLY BRACKET '|' # 0x7C -> VERTICAL LINE '}' # 0x7D -> RIGHT CURLY BRACKET '~' # 0x7E -> TILDE '\x7f' # 0x7F -> CONTROL CHARACTER '\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS '\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE '\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA '\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE '\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE '\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS '\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS '\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE '\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE '\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX '\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS '\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE '\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE '\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA '\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE '\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE '\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX '\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS '\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE '\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE '\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX '\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS '\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE '\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE '\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE '\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX '\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS '\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE '\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE '\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE '\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX '\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS '\u2020' # 0xA0 -> DAGGER '\xb0' # 0xA1 -> DEGREE SIGN '\xa2' # 0xA2 -> CENT SIGN '\xa3' # 0xA3 -> POUND SIGN '\xa7' # 0xA4 -> SECTION SIGN '\u2022' # 0xA5 -> BULLET '\xb6' # 0xA6 -> PILCROW SIGN '\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S '\xae' # 0xA8 -> REGISTERED SIGN '\xa9' # 0xA9 -> COPYRIGHT SIGN '\u2122' # 0xAA -> TRADE MARK SIGN '\xb4' # 0xAB -> ACUTE ACCENT '\xa8' # 0xAC -> DIAERESIS '\u2260' # 0xAD -> NOT EQUAL TO '\xc6' # 0xAE -> LATIN CAPITAL LETTER AE '\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE '\u221e' # 0xB0 -> INFINITY '\xb1' # 0xB1 -> PLUS-MINUS SIGN '\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO '\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO '\xa5' # 0xB4 -> YEN SIGN '\xb5' # 0xB5 -> MICRO SIGN '\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL '\u2211' # 0xB7 -> N-ARY SUMMATION '\u220f' # 0xB8 -> N-ARY PRODUCT '\u03c0' # 0xB9 -> GREEK SMALL LETTER PI '\u222b' # 0xBA -> INTEGRAL '\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR '\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR '\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA '\xe6' # 0xBE -> LATIN SMALL LETTER AE '\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE '\xbf' # 0xC0 -> INVERTED QUESTION MARK '\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK '\xac' # 0xC2 -> NOT SIGN '\u221a' # 0xC3 -> SQUARE ROOT '\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK '\u2248' # 0xC5 -> ALMOST EQUAL TO '\u2206' # 0xC6 -> INCREMENT '\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK '\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK '\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS '\xa0' # 0xCA -> NO-BREAK SPACE '\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE '\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE '\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE '\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE '\u0153' # 0xCF -> LATIN SMALL LIGATURE OE '\u2013' # 0xD0 -> EN DASH '\u2014' # 0xD1 -> EM DASH '\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK '\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK '\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK '\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK '\xf7' # 0xD6 -> DIVISION SIGN '\u25ca' # 0xD7 -> LOZENGE '\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS '\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS '\u011e' # 0xDA -> LATIN CAPITAL LETTER G WITH BREVE '\u011f' # 0xDB -> LATIN SMALL LETTER G WITH BREVE '\u0130' # 0xDC -> LATIN CAPITAL LETTER I WITH DOT ABOVE '\u0131' # 0xDD -> LATIN SMALL LETTER DOTLESS I '\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA '\u015f' # 0xDF -> LATIN SMALL LETTER S WITH CEDILLA '\u2021' # 0xE0 -> DOUBLE DAGGER '\xb7' # 0xE1 -> MIDDLE DOT '\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK '\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK '\u2030' # 0xE4 -> PER MILLE SIGN '\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX '\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX '\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE '\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS '\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE '\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE '\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX '\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS '\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE '\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX '\uf8ff' # 0xF0 -> Apple logo '\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE '\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE '\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX '\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE '\uf8a0' # 0xF5 -> undefined1 '\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT '\u02dc' # 0xF7 -> SMALL TILDE '\xaf' # 0xF8 -> MACRON '\u02d8' # 0xF9 -> BREVE '\u02d9' # 0xFA -> DOT ABOVE '\u02da' # 0xFB -> RING ABOVE '\xb8' # 0xFC -> CEDILLA '\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT '\u02db' # 0xFE -> OGONEK '\u02c7' # 0xFF -> CARON ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
SUNNYANDPJ/MongoAlchemy
refs/heads/master
mongoalchemy/query.py
2
# The MIT License # # Copyright (c) 2010 Jeffrey Jenkins # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import print_function from mongoalchemy.py3compat import * from functools import wraps from pymongo import ASCENDING, DESCENDING from copy import copy, deepcopy from mongoalchemy.exceptions import BadValueException, BadResultException from mongoalchemy.query_expression import QueryExpression, BadQueryException, flatten, FreeFormDoc from mongoalchemy.update_expression import UpdateExpression, FindAndModifyExpression from mongoalchemy.util import resolve_name class Query(object): ''' A query object has all of the methods necessary to programmatically generate a mongo query as well as methods to retrieve results of the query or do an update based on it. In general a query object should be created via ``Session.query``, not directly. ''' def __init__(self, type, session, exclude_subclasses=False): ''' :param type: A subclass of class:`mongoalchemy.document.Document` :param db: The :class:`~mongoalchemy.session.Session` which this query is associated with. :param exclude_subclasses: If this is set to false (the default) and `type` \ is polymorphic then subclasses are also retrieved. ''' self.session = session self.type = type self.__query = type.base_query(exclude_subclasses) self._sort = [] self._fields = None self.hints = [] self._limit = None self._skip = None self._raw_output = False def __iter__(self): return self.__get_query_result() @property def query(self): """ The mongo query object which would be executed if this Query object were used """ return flatten(self.__query) def __get_query_result(self): return self.session.execute_query(self, self.session) def raw_output(self): """ Turns on raw output, meaning that the MongoAlchemy ORM layer is skipped and the results from pymongo are returned. Useful if you want to use the query functionality without getting python objects back """ self._raw_output = True return self def _get_fields(self): return self._fields def _get_limit(self): return self._limit def _get_skip(self): return self._skip def limit(self, limit): ''' Sets the limit on the number of documents returned :param limit: the number of documents to return ''' self._limit = limit return self def skip(self, skip): ''' Sets the number of documents to skip in the result :param skip: the number of documents to skip ''' self._skip = skip return self def clone(self): ''' Creates a clone of the current query and all settings. Further updates to the cloned object or the original object will not affect each other ''' qclone = Query(self.type, self.session) qclone.__query = deepcopy(self.__query) qclone._sort = deepcopy(self._sort) qclone._fields = deepcopy(self._fields) qclone._hints = deepcopy(self.hints) qclone._limit = deepcopy(self._limit) qclone._skip = deepcopy(self._skip) qclone._raw_output = deepcopy(self._raw_output) return qclone def one(self): ''' Execute the query and return one result. If more than one result is returned, raises a ``BadResultException`` ''' count = -1 for count, result in enumerate(self): if count > 0: raise BadResultException('Too many results for .one()') if count == -1: raise BadResultException('Too few results for .one()') return result def first(self): ''' Execute the query and return the first result. Unlike ``one``, if there are multiple documents it simply returns the first one. If there are no documents, first returns ``None`` ''' for doc in iter(self): return doc return None def __getitem__(self, index): return self.__get_query_result().__getitem__(index) def hint_asc(self, qfield): ''' Applies a hint for the query that it should use a (``qfield``, ASCENDING) index when performing the query. :param qfield: the instance of :class:`mongoalchemy.QueryField` to use as the key. ''' return self.__hint(qfield, ASCENDING) def hint_desc(self, qfield): ''' Applies a hint for the query that it should use a (``qfield``, DESCENDING) index when performing the query. :param qfield: the instance of :class:`mongoalchemy.QueryField` to use as the key. ''' return self.__hint(qfield, DESCENDING) def __hint(self, qfield, direction): qfield = resolve_name(self.type, qfield) name = str(qfield) for n, _ in self.hints: if n == name: raise BadQueryException('Already gave hint for %s' % name) self.hints.append((name, direction)) return self def explain(self): ''' Executes an explain operation on the database for the current query and returns the raw explain object returned. ''' return self.__get_query_result().cursor.explain() def all(self): ''' Return all of the results of a query in a list''' return [obj for obj in iter(self)] def distinct(self, key): ''' Execute this query and return all of the unique values of ``key``. :param key: the instance of :class:`mongoalchemy.QueryField` to use as the distinct key. ''' return self.__get_query_result().cursor.distinct(str(key)) def filter(self, *query_expressions): ''' Apply the given query expressions to this query object **Example**: ``s.query(SomeObj).filter(SomeObj.age > 10, SomeObj.blood_type == 'O')`` :param query_expressions: Instances of :class:`mongoalchemy.query_expression.QueryExpression` .. seealso:: :class:`~mongoalchemy.query_expression.QueryExpression` class ''' for qe in query_expressions: if isinstance(qe, dict): self._apply_dict(qe) else: self._apply(qe) return self def filter_by(self, **filters): ''' Filter for the names in ``filters`` being equal to the associated values. Cannot be used for sub-objects since keys must be strings''' for name, value in filters.items(): self.filter(resolve_name(self.type, name) == value) return self def count(self, with_limit_and_skip=False): ''' Execute a count on the number of results this query would return. :param with_limit_and_skip: Include ``.limit()`` and ``.skip()`` arguments in the count? ''' return self.__get_query_result().cursor.count(with_limit_and_skip=with_limit_and_skip) def fields(self, *fields): ''' Only return the specified fields from the object. Accessing a \ field that was not specified in ``fields`` will result in a \ :class:``mongoalchemy.document.FieldNotRetrieved`` exception being \ raised :param fields: Instances of :class:``mongoalchemy.query.QueryField`` specifying \ which fields to return ''' if self._fields is None: self._fields = set() for f in fields: f = resolve_name(self.type, f) self._fields.add(f) self._fields.add(self.type.mongo_id) return self def _fields_expression(self): fields = {} for f in self._get_fields(): fields[f.get_absolute_name()] = f.fields_expression return fields def _apply(self, qe): ''' Apply a raw mongo query to the current raw query object''' self._apply_dict(qe.obj) def _apply_dict(self, qe_dict): ''' Apply a query expression, updating the query object ''' for k, v in qe_dict.items(): k = resolve_name(self.type, k) if not k in self.__query: self.__query[k] = v continue if not isinstance(self.__query[k], dict) or not isinstance(v, dict): raise BadQueryException('Multiple assignments to a field must all be dicts.') self.__query[k].update(**v) def ascending(self, qfield): ''' Sort the result based on ``qfield`` in ascending order. These calls can be chained to sort by multiple fields. :param qfield: Instance of :class:``mongoalchemy.query.QueryField`` \ specifying which field to sort by. ''' return self.__sort(qfield, ASCENDING) def descending(self, qfield): ''' Sort the result based on ``qfield`` in ascending order. These calls can be chained to sort by multiple fields. :param qfield: Instance of :class:``mongoalchemy.query.QueryField`` \ specifying which field to sort by. ''' return self.__sort(qfield, DESCENDING) def sort(self, *sort_tuples): ''' pymongo-style sorting. Accepts a list of tuples. :param sort_tuples: varargs of sort tuples. ''' query = self for name, direction in sort_tuples: field = resolve_name(self.type, name) if direction in (ASCENDING, 1): query = query.ascending(field) elif direction in (DESCENDING, -1): query = query.descending(field) else: raise BadQueryException('Bad sort direction: %s' % direction) return query def __sort(self, qfield, direction): qfield = resolve_name(self.type, qfield) name = str(qfield) for n, _ in self._sort: if n == name: raise BadQueryException('Already sorting by %s' % name) self._sort.append((name, direction)) return self def not_(self, *query_expressions): ''' Add a $not expression to the query, negating the query expressions given. **Examples**: ``query.not_(SomeDocClass.age <= 18)`` becomes ``{'age' : { '$not' : { '$gt' : 18 } }}`` :param query_expressions: Instances of :class:`mongoalchemy.query_expression.QueryExpression` ''' for qe in query_expressions: self.filter(qe.not_()) return self def or_(self, first_qe, *qes): ''' Add a $not expression to the query, negating the query expressions given. The ``| operator`` on query expressions does the same thing **Examples**: ``query.or_(SomeDocClass.age == 18, SomeDocClass.age == 17)`` becomes ``{'$or' : [{ 'age' : 18 }, { 'age' : 17 }]}`` :param query_expressions: Instances of :class:`mongoalchemy.query_expression.QueryExpression` ''' res = first_qe for qe in qes: res = (res | qe) self.filter(res) return self def in_(self, qfield, *values): ''' Check to see that the value of ``qfield`` is one of ``values`` :param qfield: Instances of :class:`mongoalchemy.query_expression.QueryExpression` :param values: Values should be python values which ``qfield`` \ understands ''' # TODO: make sure that this field represents a list qfield = resolve_name(self.type, qfield) self.filter(QueryExpression({ qfield : { '$in' : [qfield.wrap_value(value) for value in values]}})) return self def nin(self, qfield, *values): ''' Check to see that the value of ``qfield`` is not one of ``values`` :param qfield: Instances of :class:`mongoalchemy.query_expression.QueryExpression` :param values: Values should be python values which ``qfield`` \ understands ''' # TODO: make sure that this field represents a list qfield = resolve_name(self.type, qfield) self.filter(QueryExpression({ qfield : { '$nin' : [qfield.wrap_value(value) for value in values]}})) return self def find_and_modify(self, new=False, remove=False): ''' The mongo "find and modify" command. Behaves like an update expression in that "execute" must be called to do the update and return the results. :param new: Whether to return the new object or old (default: False) :param remove: Whether to remove the object before returning it ''' return FindAndModifyExpression(self, new=new, remove=remove) def set(self, *args, **kwargs): ''' Refer to: :func:`~mongoalchemy.update_expression.UpdateExpression.set`''' return UpdateExpression(self).set(*args, **kwargs) def unset(self, qfield): ''' Refer to: :func:`~mongoalchemy.update_expression.UpdateExpression.unset`''' return UpdateExpression(self).unset(qfield) def inc(self, *args, **kwargs): ''' Refer to: :func:`~mongoalchemy.update_expression.UpdateExpression.inc`''' return UpdateExpression(self).inc(*args, **kwargs) def append(self, qfield, value): ''' Refer to: :func:`~mongoalchemy.update_expression.UpdateExpression.append`''' return UpdateExpression(self).append(qfield, value) def extend(self, qfield, *value): ''' Refer to: :func:`~mongoalchemy.update_expression.UpdateExpression.extend`''' return UpdateExpression(self).extend(qfield, *value) def remove(self, qfield, value): ''' Refer to: :func:`~mongoalchemy.update_expression.UpdateExpression.remove`''' return UpdateExpression(self).remove(qfield, value) def remove_all(self, qfield, *value): ''' Refer to: :func:`~mongoalchemy.update_expression.UpdateExpression.remove_all`''' return UpdateExpression(self).remove_all(qfield, *value) def add_to_set(self, qfield, value): ''' Refer to: :func:`~mongoalchemy.update_expression.UpdateExpression.add_to_set`''' return UpdateExpression(self).add_to_set(qfield, value) def pop_first(self, qfield): ''' Refer to: :func:`~mongoalchemy.update_expression.UpdateExpression.pop_first`''' return UpdateExpression(self).pop_first(qfield) def pop_last(self, qfield): ''' Refer to: :func:`~mongoalchemy.update_expression.UpdateExpression.pop_last`''' return UpdateExpression(self).pop_last(qfield) class QueryResult(object): def __init__(self, session, cursor, type, raw_output=False, fields=None): self.cursor = cursor self.type = type self.fields = fields self.raw_output = raw_output self.session = session def next(self): return self._next_internal() __next__ = next def _next_internal(self): value = next(self.cursor) if not self.raw_output: db = self.cursor.collection.database conn = db.connection obj = self.session.cache_read(value['_id']) if obj: return obj value = self.session._unwrap(self.type, value, fields=self.fields) if not isinstance(value, dict): self.session.cache_write(value) return value def __getitem__(self, index): value = self.cursor.__getitem__(index) if not self.raw_output: db = self.cursor.collection.database conn = db.connection obj = self.session.cache_read(value['_id']) if obj: return obj value = self.session._unwrap(self.type, value) self.session.cache_write(value) # value = self.session.localize(session, value) return value def rewind(self): return self.cursor.rewind() def clone(self): return QueryResult(self.session, self.cursor.clone(), self.type, raw_output=self.raw_output, fields=self.fields) def __iter__(self): return self class RemoveQuery(object): def __init__(self, type, session): ''' Execute a remove query to remove the matched objects from the database :param type: A subclass of class:`mongoalchemy.document.Document` :param db: The :class:`~mongoalchemy.session.Session` which this query is associated with. ''' self.session = session self.type = type self.safe = None self.get_last_args = {} self.__query_obj = Query(type, session) @property def query(self): return self.__query_obj.query def set_safe(self, is_safe, **kwargs): ''' Set this remove to be safe. It will call getLastError after the remove to make sure it was successful. ``**kwargs`` are parameters to MongoDB's getLastError command (as in pymongo's remove). ''' self.safe = is_safe self.get_last_args.update(**kwargs) return self def execute(self): ''' Run the remove command on the session. Return the result of ``getLastError`` if ``safe`` is ``True``''' return self.session.execute_remove(self) def filter(self, *query_expressions): ''' Filter the remove expression with ``*query_expressions``, as in the ``Query`` filter method.''' self.__query_obj.filter(*query_expressions) return self def filter_by(self, **filters): ''' Filter for the names in ``filters`` being equal to the associated values. Cannot be used for sub-objects since keys must be strings''' self.__query_obj.filter_by(**filters) return self # def not_(self, *query_expressions): # self.__query_obj.not_(*query_expressions) # self.query = self.__query_obj.query # return self def or_(self, first_qe, *qes): ''' Works the same as the query expression method ``or_`` ''' self.__query_obj.or_(first_qe, *qes) return self def in_(self, qfield, *values): ''' Works the same as the query expression method ``in_`` ''' self.__query_obj.in_(qfield, *values) return self def nin(self, qfield, *values): ''' Works the same as the query expression method ``nin_`` ''' self.__query_obj.nin(qfield, *values) return self
djangoeshop/eshop
refs/heads/master
goods/urls.py
2
from django.conf.urls import include, url, patterns urlpatterns = [ # Examples: # url(r'^$', 'webcams.views.home', name='home'), # url(r'^blog/', include('blog.urls')), url(r'^good_card/get/(?P<Wares_id>\d+)/$', 'goods.views.good_card'), url(r'^$', 'goods.views.index'), ]
Asana/boto
refs/heads/develop
boto/roboto/__init__.py
9480
#
Yipit/troposphere
refs/heads/master
troposphere/ec2.py
2
# Copyright (c) 2012-2013, Mark Peek <[email protected]> # All rights reserved. # # See LICENSE file for full license. from . import AWSHelperFn, AWSObject, AWSProperty, FindInMap, Ref from .validators import ( boolean, integer, integer_range, network_port, positive_integer ) try: from awacs.aws import Policy policytypes = (dict, Policy) except ImportError: policytypes = dict, class Tag(AWSHelperFn): def __init__(self, key, value): self.data = {'Key': key, 'Value': value} def JSONrepr(self): return self.data class CustomerGateway(AWSObject): resource_type = "AWS::EC2::CustomerGateway" props = { 'BgpAsn': (integer, True), 'IpAddress': (basestring, True), 'Tags': (list, False), 'Type': (basestring, True), } class DHCPOptions(AWSObject): resource_type = "AWS::EC2::DHCPOptions" props = { 'DomainName': (basestring, False), 'DomainNameServers': (list, False), 'NetbiosNameServers': (list, False), 'NetbiosNodeType': (integer, False), 'NtpServers': (list, False), 'Tags': (list, False), } class EIP(AWSObject): resource_type = "AWS::EC2::EIP" props = { 'InstanceId': (basestring, False), 'Domain': (basestring, False), } class EIPAssociation(AWSObject): resource_type = "AWS::EC2::EIPAssociation" props = { 'AllocationId': (basestring, False), 'EIP': (basestring, False), 'InstanceId': (basestring, False), 'NetworkInterfaceId': (basestring, False), 'PrivateIpAddress': (basestring, False), } class NatGateway(AWSObject): resource_type = "AWS::EC2::NatGateway" props = { 'AllocationId': (basestring, True), 'SubnetId': (basestring, True), } class EBSBlockDevice(AWSProperty): props = { 'DeleteOnTermination': (boolean, False), 'Encrypted': (boolean, False), 'Iops': (integer, False), # Conditional 'SnapshotId': (basestring, False), # Conditional 'VolumeSize': (integer, False), # Conditional 'VolumeType': (basestring, False), } class BlockDeviceMapping(AWSProperty): props = { 'DeviceName': (basestring, True), 'Ebs': (EBSBlockDevice, False), # Conditional 'NoDevice': (dict, False), 'VirtualName': (basestring, False), # Conditional } class MountPoint(AWSProperty): props = { 'Device': (basestring, True), 'VolumeId': (basestring, True), } class PrivateIpAddressSpecification(AWSProperty): props = { 'Primary': (boolean, True), 'PrivateIpAddress': (basestring, True), } class NetworkInterfaceProperty(AWSProperty): props = { 'AssociatePublicIpAddress': (boolean, False), 'DeleteOnTermination': (boolean, False), 'Description': (basestring, False), 'DeviceIndex': (integer, True), 'GroupSet': ([basestring, FindInMap, Ref], False), 'NetworkInterfaceId': (basestring, False), 'PrivateIpAddress': (basestring, False), 'PrivateIpAddresses': ([PrivateIpAddressSpecification], False), 'SecondaryPrivateIpAddressCount': (integer, False), 'SubnetId': (basestring, False), } class AssociationParameters(AWSProperty): props = { 'Key': (basestring, True), 'Value': (basestring, True), } class SsmAssociations(AWSProperty): props = { 'AssociationParameters': ([AssociationParameters], False), 'DocumentName': (basestring, True), } class Instance(AWSObject): resource_type = "AWS::EC2::Instance" props = { 'AvailabilityZone': (basestring, False), 'BlockDeviceMappings': (list, False), 'DisableApiTermination': (boolean, False), 'EbsOptimized': (boolean, False), 'IamInstanceProfile': (basestring, False), 'ImageId': (basestring, True), 'InstanceInitiatedShutdownBehavior': (basestring, False), 'InstanceType': (basestring, False), 'KernelId': (basestring, False), 'KeyName': (basestring, False), 'Monitoring': (boolean, False), 'NetworkInterfaces': ([NetworkInterfaceProperty], False), 'PlacementGroupName': (basestring, False), 'PrivateIpAddress': (basestring, False), 'RamdiskId': (basestring, False), 'SecurityGroupIds': (list, False), 'SecurityGroups': (list, False), 'SsmAssociations': ([SsmAssociations], False), 'SourceDestCheck': (boolean, False), 'SubnetId': (basestring, False), 'Tags': (list, False), 'Tenancy': (basestring, False), 'UserData': (basestring, False), 'Volumes': (list, False), } class InternetGateway(AWSObject): resource_type = "AWS::EC2::InternetGateway" props = { 'Tags': (list, False), } class NetworkAcl(AWSObject): resource_type = "AWS::EC2::NetworkAcl" props = { 'Tags': (list, False), 'VpcId': (basestring, True), } class ICMP(AWSProperty): props = { 'Code': (integer, False), 'Type': (integer, False), } class PortRange(AWSProperty): props = { 'From': (network_port, False), 'To': (network_port, False), } class NetworkAclEntry(AWSObject): resource_type = "AWS::EC2::NetworkAclEntry" props = { 'CidrBlock': (basestring, True), 'Egress': (boolean, True), 'Icmp': (ICMP, False), # Conditional 'NetworkAclId': (basestring, True), 'PortRange': (PortRange, False), # Conditional 'Protocol': (network_port, True), 'RuleAction': (basestring, True), 'RuleNumber': (integer_range(1, 32766), True), } class NetworkInterface(AWSObject): resource_type = "AWS::EC2::NetworkInterface" props = { 'Description': (basestring, False), 'GroupSet': (list, False), 'PrivateIpAddress': (basestring, False), 'PrivateIpAddresses': ([PrivateIpAddressSpecification], False), 'SecondaryPrivateIpAddressCount': (integer, False), 'SourceDestCheck': (boolean, False), 'SubnetId': (basestring, True), 'Tags': (list, False), } class NetworkInterfaceAttachment(AWSObject): resource_type = "AWS::EC2::NetworkInterfaceAttachment" props = { 'DeleteOnTermination': (boolean, False), 'DeviceIndex': (integer, True), 'InstanceId': (basestring, True), 'NetworkInterfaceId': (basestring, True), } class Route(AWSObject): resource_type = "AWS::EC2::Route" props = { 'DestinationCidrBlock': (basestring, True), 'GatewayId': (basestring, False), 'InstanceId': (basestring, False), 'NatGatewayId': (basestring, False), 'NetworkInterfaceId': (basestring, False), 'RouteTableId': (basestring, True), 'VpcPeeringConnectionId': (basestring, False), } class RouteTable(AWSObject): resource_type = "AWS::EC2::RouteTable" props = { 'Tags': (list, False), 'VpcId': (basestring, True), } class SecurityGroupEgress(AWSObject): resource_type = "AWS::EC2::SecurityGroupEgress" props = { 'CidrIp': (basestring, False), 'DestinationSecurityGroupId': (basestring, False), 'FromPort': (network_port, True), 'GroupId': (basestring, True), 'IpProtocol': (basestring, True), 'ToPort': (network_port, True), # # Workaround for a bug in CloudFormation and EC2 where the # DestinationSecurityGroupId property is ignored causing # egress rules targeting a security group to be ignored. # Using SourceSecurityGroupId instead works fine even in # egress rules. AWS have known about this bug for a while. # 'SourceSecurityGroupId': (basestring, False), } class SecurityGroupIngress(AWSObject): resource_type = "AWS::EC2::SecurityGroupIngress" props = { 'CidrIp': (basestring, False), 'FromPort': (network_port, False), 'GroupName': (basestring, False), 'GroupId': (basestring, False), 'IpProtocol': (basestring, True), 'SourceSecurityGroupName': (basestring, False), 'SourceSecurityGroupId': (basestring, False), 'SourceSecurityGroupOwnerId': (basestring, False), 'ToPort': (network_port, False), } class SecurityGroupRule(AWSProperty): props = { 'CidrIp': (basestring, False), 'FromPort': (network_port, True), 'IpProtocol': (basestring, True), 'SourceSecurityGroupId': (basestring, False), 'SourceSecurityGroupName': (basestring, False), 'SourceSecurityGroupOwnerId': (basestring, False), 'ToPort': (network_port, True), 'DestinationSecurityGroupId': (basestring, False), } class SecurityGroup(AWSObject): resource_type = "AWS::EC2::SecurityGroup" props = { 'GroupDescription': (basestring, True), 'SecurityGroupEgress': (list, False), 'SecurityGroupIngress': (list, False), 'VpcId': (basestring, False), 'Tags': (list, False), } class Subnet(AWSObject): resource_type = "AWS::EC2::Subnet" props = { 'AvailabilityZone': (basestring, False), 'CidrBlock': (basestring, True), 'MapPublicIpOnLaunch': (boolean, False), 'Tags': (list, False), 'VpcId': (basestring, True), } class SubnetNetworkAclAssociation(AWSObject): resource_type = "AWS::EC2::SubnetNetworkAclAssociation" props = { 'SubnetId': (basestring, True), 'NetworkAclId': (basestring, True), } class SubnetRouteTableAssociation(AWSObject): resource_type = "AWS::EC2::SubnetRouteTableAssociation" props = { 'RouteTableId': (basestring, True), 'SubnetId': (basestring, True), } class Volume(AWSObject): resource_type = "AWS::EC2::Volume" props = { 'AutoEnableIO': (boolean, False), 'AvailabilityZone': (basestring, True), 'Encrypted': (boolean, False), 'Iops': (integer, False), 'KmsKeyId': (basestring, False), 'Size': (basestring, False), 'SnapshotId': (basestring, False), 'Tags': (list, False), 'VolumeType': (basestring, False), } class VolumeAttachment(AWSObject): resource_type = "AWS::EC2::VolumeAttachment" props = { 'Device': (basestring, True), 'InstanceId': (basestring, True), 'VolumeId': (basestring, True), } class VPC(AWSObject): resource_type = "AWS::EC2::VPC" props = { 'CidrBlock': (basestring, True), 'EnableDnsSupport': (boolean, False), 'EnableDnsHostnames': (boolean, False), 'InstanceTenancy': (basestring, False), 'Tags': (list, False), } class VPCDHCPOptionsAssociation(AWSObject): resource_type = "AWS::EC2::VPCDHCPOptionsAssociation" props = { 'DhcpOptionsId': (basestring, True), 'VpcId': (basestring, True), } class VPCEndpoint(AWSObject): resource_type = "AWS::EC2::VPCEndpoint" props = { 'PolicyDocument': (policytypes, False), 'RouteTableIds': ([basestring, Ref], False), 'ServiceName': (basestring, True), 'VpcId': (basestring, True), } class VPCGatewayAttachment(AWSObject): resource_type = "AWS::EC2::VPCGatewayAttachment" props = { 'InternetGatewayId': (basestring, False), 'VpcId': (basestring, True), 'VpnGatewayId': (basestring, False), } class VPNConnection(AWSObject): resource_type = "AWS::EC2::VPNConnection" props = { 'Type': (basestring, True), 'CustomerGatewayId': (basestring, True), 'StaticRoutesOnly': (boolean, False), 'Tags': (list, False), 'VpnGatewayId': (basestring, True), } class VPNConnectionRoute(AWSObject): resource_type = "AWS::EC2::VPNConnectionRoute" props = { 'DestinationCidrBlock': (basestring, True), 'VpnConnectionId': (basestring, True), } class VPNGateway(AWSObject): resource_type = "AWS::EC2::VPNGateway" props = { 'Type': (basestring, True), 'Tags': (list, False), } class VPNGatewayRoutePropagation(AWSObject): resource_type = "AWS::EC2::VPNGatewayRoutePropagation" props = { 'RouteTableIds': ([basestring, Ref], True), 'VpnGatewayId': (basestring, True), } class VPCPeeringConnection(AWSObject): resource_type = "AWS::EC2::VPCPeeringConnection" props = { 'PeerVpcId': (basestring, True), 'VpcId': (basestring, True), 'Tags': (list, False), } class Monitoring(AWSProperty): props = { 'Enabled': (boolean, False), } class NetworkInterfaces(AWSProperty): props = { 'AssociatePublicIpAddress': (boolean, False), 'DeleteOnTermination': (boolean, False), 'Description': (basestring, False), 'DeviceIndex': (integer, True), 'Groups': ([basestring], False), 'NetworkInterfaceId': (basestring, False), 'PrivateIpAddresses': ([PrivateIpAddressSpecification], False), 'SecondaryPrivateIpAddressCount': (integer, False), 'SubnetId': (basestring, False), } class SecurityGroups(AWSProperty): props = { 'GroupId': (basestring, False), } class IamInstanceProfile(AWSProperty): props = { 'Arn': (basestring, False), } class LaunchSpecifications(AWSProperty): props = { 'BlockDeviceMappings': ([BlockDeviceMapping], False), 'EbsOptimized': (boolean, False), 'IamInstanceProfile': (IamInstanceProfile, False), 'ImageId': (basestring, True), 'InstanceType': (basestring, True), 'KernelId': (basestring, False), 'KeyName': (basestring, False), 'Monitoring': (Monitoring, False), 'NetworkInterfaces': ([NetworkInterfaces], False), 'Placement': (basestring, False), 'RamdiskId': (basestring, False), 'SecurityGroups': ([SecurityGroups], False), 'SubnetId': (basestring, False), 'UserData': (basestring, False), 'WeightedCapacity': (positive_integer, False), } class SpotFleetRequestConfigData(AWSProperty): props = { 'AllocationStrategy': (basestring, False), 'ExcessCapacityTerminationPolicy': (basestring, False), 'IamFleetRole': (basestring, True), 'LaunchSpecifications': ([LaunchSpecifications], True), 'SpotPrice': (basestring, True), 'TargetCapacity': (positive_integer, True), 'TerminateInstancesWithExpiration': (boolean, False), 'ValidFrom': (basestring, False), 'ValidUntil': (basestring, False), } class SpotFleet(AWSObject): resource_type = "AWS::EC2::SpotFleet" props = { 'SpotFleetRequestConfigData': (SpotFleetRequestConfigData, True), } class PlacementGroup(AWSObject): resource_type = "AWS::EC2::PlacementGroup" props = { 'Strategy': (basestring, True), }
bgroff/kala-app
refs/heads/master
django_kala/projects/views/documents/settings/transfer_ownership.py
1
from django.contrib import messages from django.contrib.auth.decorators import login_required from django.core.exceptions import PermissionDenied from django.shortcuts import get_object_or_404, redirect from django.urls import reverse from django.utils.decorators import method_decorator from django.utils.translation import ugettext as _ from django.views.generic.base import TemplateView from documents.models import Document from projects.forms.documents.settings.transfer_ownership import TransferOwnershipForm from projects.models import Project class TransferOwnershipView(TemplateView): template_name = 'documents/settings/transfer_ownership.html' def get_context_data(self, **kwargs): return { 'form': self.form, 'project': self.project, 'organization': self.project.organization, 'document': self.document } @method_decorator(login_required) def dispatch(self, request, project_pk, document_pk, *args, **kwargs): self.project = get_object_or_404(Project.objects.active(), pk=project_pk) self.document = get_object_or_404(Document.objects.active(), pk=document_pk) if not self.document.can_manage(request.user): raise PermissionDenied(_('You do not have permission to edit this document')) self.form = TransferOwnershipForm( request.POST or None, document=self.document, projects=request.user.get_projects(['can_manage']) ) return super(TransferOwnershipView, self).dispatch(request, *args, **kwargs) def post(self, request, *args, **kwargs): if self.form.is_valid(): self.form.save() messages.success(request, _('The document has been transferred.')) return redirect(reverse('projects:document_transfer_ownership', args=[self.document.project.pk, self.document.pk])) return self.render_to_response(self.get_context_data())
smurfix/MoaT
refs/heads/master
monitor/monitor.py
2
#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, division, unicode_literals ## ## This file is part of MoaT, the Master of all Things. ## ## MoaT is Copyright © 2007-2016 by Matthias Urlichs <[email protected]>, ## it is licensed under the GPLv3. See the file `README.rst` for details, ## including optimistic statements by the author. ## ## This program is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License (included; see the file LICENSE) ## for more details. ## ## This header is auto-generated and may self-destruct at any time, ## courtesy of "make update". The original is in ‘scripts/_boilerplate.py’. ## Thus, do not remove the next line, or insert any blank lines above. ##BP from moat import patch; moat.patch() import qbroker; qbroker.setup(gevent=True) from qbroker.util.async import Main import asyncio import gtk import gtk.gdk import gnome.ui import gtk.glade import gobject import glib import os from time import time from datetime import datetime APPNAME="monitor" APPVERSION="0.1" class Monitor(object): def __init__(self, widgets): self.widgets = widgets widgets.MonitorData = self class MonitorUI(object): def __init__(self): #self._init_acctcache() gnome.init(APPNAME, APPVERSION) self.widgets = gtk.glade.XML(APPNAME+".glade") d = MonitorUI.__dict__.copy() for k in d.keys(): d[k] = getattr(self,k) self.widgets.signal_autoconnect(d) self.events = {} self._init_events() #self.enable_stuff() def init_done(self): self['main'].show_all() def __getitem__(self,name): return self.widgets.get_widget(name) def _init_events(self): v = self['events_view'] s = v.get_selection() s.set_mode(gtk.SELECTION_SINGLE) m = gtk.TreeStore(gobject.TYPE_STRING, gobject.TYPE_UINT, gobject.TYPE_PYOBJECT) # text counter time # time should be TYPE_FLOAT, but that doesn't work for some reason mm = gtk.TreeModelSort(m) def cmp(s,a,b): a=s.get(a,2)[0] b=s.get(b,2)[0] if a is None and b is None: return 0 if a is None: return 1 if b is None: return -1 return a-b mm.set_sort_func(2,cmp) #mm.set_sort_column_id (-1, gtk.SORT_DESCENDING) v.set_model(mm) v.set_headers_visible(True) v.set_show_expanders(False) c = v.get_column(0) if c: v.remove_column(c) # create the TreeViewColumn to display the data def add(name,col,renderer=None, *renderer_args): r = gtk.CellRendererText() column = gtk.TreeViewColumn(name,r,text=col) if renderer: column.set_cell_data_func(r, renderer, renderer_args) v.append_column(column) column.set_sizing (gtk.TREE_VIEW_COLUMN_AUTOSIZE) column.set_resizable(True) column.set_reorderable(True) cell = gtk.CellRendererText() column.pack_start(cell, True) # doesn't work for some reason. TODO. # def ClickMe(*a,**k): # print("Clicked",a,k) # column.connect("clicked", ClickMe,col) def DatePrinter(column, cell, model, iter, col_key): text = model.get_value(iter, 2) text = datetime.utcfromtimestamp(text) text = text.strftime("%Y-%m-%d %H:%M:%S.%.3f") ti = text.rfind('.') if ti>0 and len(ti)-ti > 4: # limit to msec text = text[:ti+4] cell.set_property("text", text) add('Event',0) add('#',1) add('zuletzt',2,DatePrinter) def add_event(self,name): v = self['events_view'] m = v.get_model().get_model() # we want the unsorted master model that's not sorted tm=time() try: i = self.events[name] except KeyError: i = m.append(None,row=[name,1,tm]) self.events[name] = i else: r, = m.get(i,1) m.set(i, 1,r+1, 2,tm) ### EVENTS def on_main_destroy(self,window): # main window goes away gtk.main_quit() def on_main_delete_event(self,window,event): # True if the window should not be deleted return False def on_quit_button_clicked(self,x): gtk.main_quit() def on_menu_quit(self,x): gtk.main_quit() def on_menu_test(self,x): self.add_event("Test") def on_menu_test2(self,x): self.add_event("Test 2") def on_menu_prefs(self,x): self['prefs_status'].hide() self['prefs'].show() def on_prefs_delete_event(self,*x): self['prefs'].hide() return True def on_prefs_ok(self,*x): print("OK",x) self['prefs'].hide() def on_prefs_test(self,*x): print("TEST",x) def on_prefs_cancel(self,*x): print("CANCEL",x) self['prefs'].hide() def on_prefs_port_ins(self,*x): print("PI",x) def on_prefs_port_insa(self,*x): print("PIa",x) def on_prefs_port_pre(self,*x): print("PE",x) class MonitorMain(Main): @asyncio.coroutine def at_start(self): yield from super().at_start() self.widgets = MonitorUI() MonitorData = Monitor(widgets) self.widgets.init_done() if __name__ == "__main__": main=MonitorMain() main.run() # END #
dongyue/cmglue
refs/heads/master
cmglib/common.py
1
"""common functions and global variables of CMG.""" import os import sys import subprocess import re import configparser #for Python 3.x #import ConfigParser #for Python 2.x VERSION = "0.9.2" #CMG version STREAMFILE = "_stream" #The file in root of container which stores the stream configuration. BASELINEFILE = "_baseline" # The file in .git in container which stores the baseline configuration to write to tag cfgs = {'verbose': False, \ 'gitrebase': True, \ 'online': True, \ 'addnewfile': True} """Some global CMG configuations that alternate CMG's behavior. * verbose: True if you want CMG show verbose information. * gitrebase: True if you want 'git rebase' instead of 'git merge' when you update a local branch from a remote branch. It affects both the container and all components under Git. * online: False if you can not / do not want link to remote repository/place to download from / upload to. Accordingly, Git will not 'git fetch', and SVN can not 'svn update', 'svn commit' etc. All these values may changed when initialize. See get_cmg_cfg(root) in git.py """ def command(cmd, dir): """ To run a command line. `cmd' is the command line. `dir' is the working directory to run the command line. Return output msg, err/warn msg, as well as the exit code. """ if cfgs['verbose']: print("\n[call cmd] " + cmd) print("[in dir] " + dir) pipe = subprocess.Popen(cmd, shell=True, cwd=dir, universal_newlines=True,\ stdout=subprocess.PIPE, stderr=subprocess.PIPE ) (stdoutdata, stderrdata) = pipe.communicate() if cfgs['verbose']: print("[output msg]\n" + stdoutdata) print("[err/warn msg] " + stderrdata) print("[exit code] " + str(pipe.returncode)) return (stdoutdata, stderrdata, pipe.returncode) def command_free(cmd, dir): """ To run a command line interactively, and without monitor the result. `cmd' is the command line. `dir' is the working directory to run the command line. """ if cfgs['verbose']: print("\n[call cmd] " + cmd) print("[in dir] " + dir) pipe = subprocess.Popen(cmd, shell=True, cwd=dir, universal_newlines=True) pipe.communicate() return pipe.returncode DEFAULTSECT = "DEFAULT" #the defult section name. See codes in base class. class MyCfgParser(configparser.SafeConfigParser): #for Python 3.x #class MyCfgParser(ConfigParser.SafeConfigParser): #for Python 2.x """Some wrap on standard lib configperser.SafeConfigParser.""" class StrFp: """A internal class to make string as if a file handle to read.""" def __init__(self, string): self.lines = string.splitlines(True) self.pointer = -1 def readline(self): self.pointer = self.pointer + 1 if self.pointer < len(self.lines): return self.lines[self.pointer] else: return "" def readstr(self, string, title): """To read config from a multiline string""" fp = self.StrFp(string) self._read(fp, title) def _read(self, fp, fpname): """Different to base class's: allow whitespaces as prefix of each valid line. This is to accept Git style cfg format. While the side effect is, you can not have a value longer than one line. """ #only one line is different from base class. See highlighted comment below. cursect = None # None, or a dictionary optname = None lineno = 0 e = None # None, or an exception while True: line = fp.readline() if not line: break lineno = lineno + 1 # comment or blank line? if line.strip() == '' or line[0] in '#;': continue if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR": # no leading whitespace continue ######################### # Below is the only line different from base class, to accept git style cfg format. line = line.lstrip() ######################### # continuation line? if line[0].isspace() and cursect is not None and optname: value = line.strip() if value: cursect[optname] = "%s\n%s" % (cursect[optname], value) # a section header or option header? else: # is it a section header? mo = self.SECTCRE.match(line) if mo: sectname = mo.group('header') if sectname in self._sections: cursect = self._sections[sectname] elif sectname == DEFAULTSECT: cursect = self._defaults else: cursect = self._dict() cursect['__name__'] = sectname self._sections[sectname] = cursect # So sections can't start with a continuation line optname = None # no section header in the file? elif cursect is None: raise configparser.MissingSectionHeaderError(fpname, lineno, line) # an option line? else: mo = self.OPTCRE.match(line) if mo: optname, vi, optval = mo.group('option', 'vi', 'value') if vi in ('=', ':') and ';' in optval: # ';' is a comment delimiter only if it follows # a spacing character pos = optval.find(';') if pos != -1 and optval[pos-1].isspace(): optval = optval[:pos] optval = optval.strip() # allow empty values if optval == '""': optval = '' optname = self.optionxform(optname.rstrip()) cursect[optname] = optval else: # a non-fatal parsing error occurred. set up the # exception but keep going. the exception will be # raised at the end of the file and will contain a # list of all bogus lines if not e: e = ParsingError(fpname) e.append(lineno, repr(line)) # if any parsing errors occurred, raise an exception if e: raise e def get_root(): """Get the root path of the container. If a super-directory of current directory has a special sub-directory named `_stream', then the super-directory is the answer. """ root = os.path.abspath('.') while True: if os.path.isfile(os.path.join(root, STREAMFILE)): break else: newroot = os.path.dirname(root) if newroot == root: sys.stderr.write("Failed to find the " + STREAMFILE +\ " file which indicate the container.") sys.exit(2) root = newroot return root def get_stream_cfg(root, local, remote): """Get current stream's configuration. `root' is the container's root dir. `local' stands for the local branch. `remote' stands for the remote branch. """ config = MyCfgParser() std_cfg = os.path.join(root, STREAMFILE) try: config.readfp(open(std_cfg)) # except configparser.ParsingError as err: # sys.stderr.write("Failed to read stream config from " + std_cfg \ # + ":\n" + str(err)) # exit(2) except: sys.stderr.write("Failed to read stream config from " + std_cfg \ + ":\n" + str(sys.exc_info()[0])) exit(2) if local != "": cfg_local = os.path.join(root, STREAMFILE + "_" + re.sub(r"\\|/", "_", local)) try: config.read(cfg_local) # except configparser.ParsingError as err: # sys.stderr.write("Failed to read stream config from " + cfg_local \ # + ":\n" + str(err)) # exit(2) except: sys.stderr.write("Failed to read stream config from " + cfg_local \ + ":\n" + str(sys.exc_info()[0])) exit(2) if remote != "": cfg_remote = os.path.join(root, STREAMFILE + "_" + re.sub(r"\\|/", "_", remote)) try: config.read(cfg_remote) # except configparser.ParsingError as err: # sys.stderr.write("Failed to read stream config from " + cfg_remote \ # + ":\n" + str(err)) # exit(2) except: sys.stderr.write("Failed to read stream config from " + cfg_remote \ + ":\n" + str(sys.exc_info()[0])) exit(2) return config def get_baseline_cfg(root, tag_name, tag_annotation): """Get a baseline's configuration. `root' is the container's root dir. the configuration information is stored in `tag_annotation' string. """ config = MyCfgParser() try: config.readstr(tag_annotation, tag_name) except configparser.ParsingError as err: sys.stderr.write("Failed to read baseline config from tag " + tag_name + \ "' annotation:\n" + str(err)) print("Is this tag an tag with annotation that in CMG special format?") exit(2) return config
macks22/scikit-learn
refs/heads/master
sklearn/covariance/shrunk_covariance_.py
209
""" Covariance estimators using shrinkage. Shrinkage corresponds to regularising `cov` using a convex combination: shrunk_cov = (1-shrinkage)*cov + shrinkage*structured_estimate. """ # Author: Alexandre Gramfort <[email protected]> # Gael Varoquaux <[email protected]> # Virgile Fritsch <[email protected]> # # License: BSD 3 clause # avoid division truncation from __future__ import division import warnings import numpy as np from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance from ..externals.six.moves import xrange from ..utils import check_array # ShrunkCovariance estimator def shrunk_covariance(emp_cov, shrinkage=0.1): """Calculates a covariance matrix shrunk on the diagonal Read more in the :ref:`User Guide <shrunk_covariance>`. Parameters ---------- emp_cov : array-like, shape (n_features, n_features) Covariance matrix to be shrunk shrinkage : float, 0 <= shrinkage <= 1 Coefficient in the convex combination used for the computation of the shrunk estimate. Returns ------- shrunk_cov : array-like Shrunk covariance. Notes ----- The regularized (shrunk) covariance is given by (1 - shrinkage)*cov + shrinkage*mu*np.identity(n_features) where mu = trace(cov) / n_features """ emp_cov = check_array(emp_cov) n_features = emp_cov.shape[0] mu = np.trace(emp_cov) / n_features shrunk_cov = (1. - shrinkage) * emp_cov shrunk_cov.flat[::n_features + 1] += shrinkage * mu return shrunk_cov class ShrunkCovariance(EmpiricalCovariance): """Covariance estimator with shrinkage Read more in the :ref:`User Guide <shrunk_covariance>`. Parameters ---------- store_precision : boolean, default True Specify if the estimated precision is stored shrinkage : float, 0 <= shrinkage <= 1, default 0.1 Coefficient in the convex combination used for the computation of the shrunk estimate. assume_centered : boolean, default False If True, data are not centered before computation. Useful when working with data whose mean is almost, but not exactly zero. If False, data are centered before computation. Attributes ---------- covariance_ : array-like, shape (n_features, n_features) Estimated covariance matrix precision_ : array-like, shape (n_features, n_features) Estimated pseudo inverse matrix. (stored only if store_precision is True) `shrinkage` : float, 0 <= shrinkage <= 1 Coefficient in the convex combination used for the computation of the shrunk estimate. Notes ----- The regularized covariance is given by (1 - shrinkage)*cov + shrinkage*mu*np.identity(n_features) where mu = trace(cov) / n_features """ def __init__(self, store_precision=True, assume_centered=False, shrinkage=0.1): EmpiricalCovariance.__init__(self, store_precision=store_precision, assume_centered=assume_centered) self.shrinkage = shrinkage def fit(self, X, y=None): """ Fits the shrunk covariance model according to the given training data and parameters. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training data, where n_samples is the number of samples and n_features is the number of features. y : not used, present for API consistence purpose. Returns ------- self : object Returns self. """ X = check_array(X) # Not calling the parent object to fit, to avoid a potential # matrix inversion when setting the precision if self.assume_centered: self.location_ = np.zeros(X.shape[1]) else: self.location_ = X.mean(0) covariance = empirical_covariance( X, assume_centered=self.assume_centered) covariance = shrunk_covariance(covariance, self.shrinkage) self._set_covariance(covariance) return self # Ledoit-Wolf estimator def ledoit_wolf_shrinkage(X, assume_centered=False, block_size=1000): """Estimates the shrunk Ledoit-Wolf covariance matrix. Read more in the :ref:`User Guide <shrunk_covariance>`. Parameters ---------- X : array-like, shape (n_samples, n_features) Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage. assume_centered : Boolean If True, data are not centered before computation. Useful to work with data whose mean is significantly equal to zero but is not exactly zero. If False, data are centered before computation. block_size : int Size of the blocks into which the covariance matrix will be split. Returns ------- shrinkage: float Coefficient in the convex combination used for the computation of the shrunk estimate. Notes ----- The regularized (shrunk) covariance is: (1 - shrinkage)*cov + shrinkage * mu * np.identity(n_features) where mu = trace(cov) / n_features """ X = np.asarray(X) # for only one feature, the result is the same whatever the shrinkage if len(X.shape) == 2 and X.shape[1] == 1: return 0. if X.ndim == 1: X = np.reshape(X, (1, -1)) if X.shape[0] == 1: warnings.warn("Only one sample available. " "You may want to reshape your data array") n_samples, n_features = X.shape # optionaly center data if not assume_centered: X = X - X.mean(0) # number of blocks to split the covariance matrix into n_splits = int(n_features / block_size) X2 = X ** 2 emp_cov_trace = np.sum(X2, axis=0) / n_samples mu = np.sum(emp_cov_trace) / n_features beta_ = 0. # sum of the coefficients of <X2.T, X2> delta_ = 0. # sum of the *squared* coefficients of <X.T, X> # starting block computation for i in xrange(n_splits): for j in xrange(n_splits): rows = slice(block_size * i, block_size * (i + 1)) cols = slice(block_size * j, block_size * (j + 1)) beta_ += np.sum(np.dot(X2.T[rows], X2[:, cols])) delta_ += np.sum(np.dot(X.T[rows], X[:, cols]) ** 2) rows = slice(block_size * i, block_size * (i + 1)) beta_ += np.sum(np.dot(X2.T[rows], X2[:, block_size * n_splits:])) delta_ += np.sum( np.dot(X.T[rows], X[:, block_size * n_splits:]) ** 2) for j in xrange(n_splits): cols = slice(block_size * j, block_size * (j + 1)) beta_ += np.sum(np.dot(X2.T[block_size * n_splits:], X2[:, cols])) delta_ += np.sum( np.dot(X.T[block_size * n_splits:], X[:, cols]) ** 2) delta_ += np.sum(np.dot(X.T[block_size * n_splits:], X[:, block_size * n_splits:]) ** 2) delta_ /= n_samples ** 2 beta_ += np.sum(np.dot(X2.T[block_size * n_splits:], X2[:, block_size * n_splits:])) # use delta_ to compute beta beta = 1. / (n_features * n_samples) * (beta_ / n_samples - delta_) # delta is the sum of the squared coefficients of (<X.T,X> - mu*Id) / p delta = delta_ - 2. * mu * emp_cov_trace.sum() + n_features * mu ** 2 delta /= n_features # get final beta as the min between beta and delta beta = min(beta, delta) # finally get shrinkage shrinkage = 0 if beta == 0 else beta / delta return shrinkage def ledoit_wolf(X, assume_centered=False, block_size=1000): """Estimates the shrunk Ledoit-Wolf covariance matrix. Read more in the :ref:`User Guide <shrunk_covariance>`. Parameters ---------- X : array-like, shape (n_samples, n_features) Data from which to compute the covariance estimate assume_centered : boolean, default=False If True, data are not centered before computation. Useful to work with data whose mean is significantly equal to zero but is not exactly zero. If False, data are centered before computation. block_size : int, default=1000 Size of the blocks into which the covariance matrix will be split. This is purely a memory optimization and does not affect results. Returns ------- shrunk_cov : array-like, shape (n_features, n_features) Shrunk covariance. shrinkage : float Coefficient in the convex combination used for the computation of the shrunk estimate. Notes ----- The regularized (shrunk) covariance is: (1 - shrinkage)*cov + shrinkage * mu * np.identity(n_features) where mu = trace(cov) / n_features """ X = np.asarray(X) # for only one feature, the result is the same whatever the shrinkage if len(X.shape) == 2 and X.shape[1] == 1: if not assume_centered: X = X - X.mean() return np.atleast_2d((X ** 2).mean()), 0. if X.ndim == 1: X = np.reshape(X, (1, -1)) warnings.warn("Only one sample available. " "You may want to reshape your data array") n_samples = 1 n_features = X.size else: n_samples, n_features = X.shape # get Ledoit-Wolf shrinkage shrinkage = ledoit_wolf_shrinkage( X, assume_centered=assume_centered, block_size=block_size) emp_cov = empirical_covariance(X, assume_centered=assume_centered) mu = np.sum(np.trace(emp_cov)) / n_features shrunk_cov = (1. - shrinkage) * emp_cov shrunk_cov.flat[::n_features + 1] += shrinkage * mu return shrunk_cov, shrinkage class LedoitWolf(EmpiricalCovariance): """LedoitWolf Estimator Ledoit-Wolf is a particular form of shrinkage, where the shrinkage coefficient is computed using O. Ledoit and M. Wolf's formula as described in "A Well-Conditioned Estimator for Large-Dimensional Covariance Matrices", Ledoit and Wolf, Journal of Multivariate Analysis, Volume 88, Issue 2, February 2004, pages 365-411. Read more in the :ref:`User Guide <shrunk_covariance>`. Parameters ---------- store_precision : bool, default=True Specify if the estimated precision is stored. assume_centered : bool, default=False If True, data are not centered before computation. Useful when working with data whose mean is almost, but not exactly zero. If False (default), data are centered before computation. block_size : int, default=1000 Size of the blocks into which the covariance matrix will be split during its Ledoit-Wolf estimation. This is purely a memory optimization and does not affect results. Attributes ---------- covariance_ : array-like, shape (n_features, n_features) Estimated covariance matrix precision_ : array-like, shape (n_features, n_features) Estimated pseudo inverse matrix. (stored only if store_precision is True) shrinkage_ : float, 0 <= shrinkage <= 1 Coefficient in the convex combination used for the computation of the shrunk estimate. Notes ----- The regularised covariance is:: (1 - shrinkage)*cov + shrinkage*mu*np.identity(n_features) where mu = trace(cov) / n_features and shrinkage is given by the Ledoit and Wolf formula (see References) References ---------- "A Well-Conditioned Estimator for Large-Dimensional Covariance Matrices", Ledoit and Wolf, Journal of Multivariate Analysis, Volume 88, Issue 2, February 2004, pages 365-411. """ def __init__(self, store_precision=True, assume_centered=False, block_size=1000): EmpiricalCovariance.__init__(self, store_precision=store_precision, assume_centered=assume_centered) self.block_size = block_size def fit(self, X, y=None): """ Fits the Ledoit-Wolf shrunk covariance model according to the given training data and parameters. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training data, where n_samples is the number of samples and n_features is the number of features. y : not used, present for API consistence purpose. Returns ------- self : object Returns self. """ # Not calling the parent object to fit, to avoid computing the # covariance matrix (and potentially the precision) X = check_array(X) if self.assume_centered: self.location_ = np.zeros(X.shape[1]) else: self.location_ = X.mean(0) covariance, shrinkage = ledoit_wolf(X - self.location_, assume_centered=True, block_size=self.block_size) self.shrinkage_ = shrinkage self._set_covariance(covariance) return self # OAS estimator def oas(X, assume_centered=False): """Estimate covariance with the Oracle Approximating Shrinkage algorithm. Parameters ---------- X : array-like, shape (n_samples, n_features) Data from which to compute the covariance estimate. assume_centered : boolean If True, data are not centered before computation. Useful to work with data whose mean is significantly equal to zero but is not exactly zero. If False, data are centered before computation. Returns ------- shrunk_cov : array-like, shape (n_features, n_features) Shrunk covariance. shrinkage : float Coefficient in the convex combination used for the computation of the shrunk estimate. Notes ----- The regularised (shrunk) covariance is: (1 - shrinkage)*cov + shrinkage * mu * np.identity(n_features) where mu = trace(cov) / n_features The formula we used to implement the OAS does not correspond to the one given in the article. It has been taken from the MATLAB program available from the author's webpage (https://tbayes.eecs.umich.edu/yilun/covestimation). """ X = np.asarray(X) # for only one feature, the result is the same whatever the shrinkage if len(X.shape) == 2 and X.shape[1] == 1: if not assume_centered: X = X - X.mean() return np.atleast_2d((X ** 2).mean()), 0. if X.ndim == 1: X = np.reshape(X, (1, -1)) warnings.warn("Only one sample available. " "You may want to reshape your data array") n_samples = 1 n_features = X.size else: n_samples, n_features = X.shape emp_cov = empirical_covariance(X, assume_centered=assume_centered) mu = np.trace(emp_cov) / n_features # formula from Chen et al.'s **implementation** alpha = np.mean(emp_cov ** 2) num = alpha + mu ** 2 den = (n_samples + 1.) * (alpha - (mu ** 2) / n_features) shrinkage = 1. if den == 0 else min(num / den, 1.) shrunk_cov = (1. - shrinkage) * emp_cov shrunk_cov.flat[::n_features + 1] += shrinkage * mu return shrunk_cov, shrinkage class OAS(EmpiricalCovariance): """Oracle Approximating Shrinkage Estimator Read more in the :ref:`User Guide <shrunk_covariance>`. OAS is a particular form of shrinkage described in "Shrinkage Algorithms for MMSE Covariance Estimation" Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010. The formula used here does not correspond to the one given in the article. It has been taken from the Matlab program available from the authors' webpage (https://tbayes.eecs.umich.edu/yilun/covestimation). Parameters ---------- store_precision : bool, default=True Specify if the estimated precision is stored. assume_centered: bool, default=False If True, data are not centered before computation. Useful when working with data whose mean is almost, but not exactly zero. If False (default), data are centered before computation. Attributes ---------- covariance_ : array-like, shape (n_features, n_features) Estimated covariance matrix. precision_ : array-like, shape (n_features, n_features) Estimated pseudo inverse matrix. (stored only if store_precision is True) shrinkage_ : float, 0 <= shrinkage <= 1 coefficient in the convex combination used for the computation of the shrunk estimate. Notes ----- The regularised covariance is:: (1 - shrinkage)*cov + shrinkage*mu*np.identity(n_features) where mu = trace(cov) / n_features and shrinkage is given by the OAS formula (see References) References ---------- "Shrinkage Algorithms for MMSE Covariance Estimation" Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010. """ def fit(self, X, y=None): """ Fits the Oracle Approximating Shrinkage covariance model according to the given training data and parameters. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training data, where n_samples is the number of samples and n_features is the number of features. y : not used, present for API consistence purpose. Returns ------- self: object Returns self. """ X = check_array(X) # Not calling the parent object to fit, to avoid computing the # covariance matrix (and potentially the precision) if self.assume_centered: self.location_ = np.zeros(X.shape[1]) else: self.location_ = X.mean(0) covariance, shrinkage = oas(X - self.location_, assume_centered=True) self.shrinkage_ = shrinkage self._set_covariance(covariance) return self
faust64/ansible
refs/heads/devel
lib/ansible/modules/network/illumos/__init__.py
12133432
Anlim/decode-Django
refs/heads/master
Django-1.5.1/django/conf/locale/et/__init__.py
12133432
bunnyitvn/webptn
refs/heads/master
build/lib.linux-i686-2.7/django/contrib/sessions/backends/cached_db.py
102
""" Cached, database-backed sessions. """ from django.contrib.sessions.backends.db import SessionStore as DBStore from django.core.cache import cache from django.core.exceptions import SuspiciousOperation from django.utils import timezone KEY_PREFIX = "django.contrib.sessions.cached_db" class SessionStore(DBStore): """ Implements cached, database backed sessions. """ def __init__(self, session_key=None): super(SessionStore, self).__init__(session_key) @property def cache_key(self): return KEY_PREFIX + self._get_or_create_session_key() def load(self): try: data = cache.get(self.cache_key, None) except Exception: # Some backends (e.g. memcache) raise an exception on invalid # cache keys. If this happens, reset the session. See #17810. data = None if data is None: # Duplicate DBStore.load, because we need to keep track # of the expiry date to set it properly in the cache. try: s = Session.objects.get( session_key=self.session_key, expire_date__gt=timezone.now() ) data = self.decode(s.session_data) cache.set(self.cache_key, data, self.get_expiry_age(expiry=s.expire_date)) except (Session.DoesNotExist, SuspiciousOperation): self.create() data = {} return data def exists(self, session_key): if (KEY_PREFIX + session_key) in cache: return True return super(SessionStore, self).exists(session_key) def save(self, must_create=False): super(SessionStore, self).save(must_create) cache.set(self.cache_key, self._session, self.get_expiry_age()) def delete(self, session_key=None): super(SessionStore, self).delete(session_key) if session_key is None: if self.session_key is None: return session_key = self.session_key cache.delete(KEY_PREFIX + session_key) def flush(self): """ Removes the current session data from the database and regenerates the key. """ self.clear() self.delete(self.session_key) self.create() # At bottom to avoid circular import from django.contrib.sessions.models import Session
michalsenkyr/spark
refs/heads/master
examples/src/main/python/ml/word2vec_example.py
122
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function # $example on$ from pyspark.ml.feature import Word2Vec # $example off$ from pyspark.sql import SparkSession if __name__ == "__main__": spark = SparkSession\ .builder\ .appName("Word2VecExample")\ .getOrCreate() # $example on$ # Input data: Each row is a bag of words from a sentence or document. documentDF = spark.createDataFrame([ ("Hi I heard about Spark".split(" "), ), ("I wish Java could use case classes".split(" "), ), ("Logistic regression models are neat".split(" "), ) ], ["text"]) # Learn a mapping from words to Vectors. word2Vec = Word2Vec(vectorSize=3, minCount=0, inputCol="text", outputCol="result") model = word2Vec.fit(documentDF) result = model.transform(documentDF) for row in result.collect(): text, vector = row print("Text: [%s] => \nVector: %s\n" % (", ".join(text), str(vector))) # $example off$ spark.stop()
Microsoft/Tocino
refs/heads/master
src/buildings/bindings/modulegen__gcc_LP64.py
38
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.buildings', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## propagation-environment.h (module 'propagation'): ns3::CitySize [enumeration] module.add_enum('CitySize', ['SmallCity', 'MediumCity', 'LargeCity'], import_from_module='ns.propagation') ## propagation-environment.h (module 'propagation'): ns3::EnvironmentType [enumeration] module.add_enum('EnvironmentType', ['UrbanEnvironment', 'SubUrbanEnvironment', 'OpenAreasEnvironment'], import_from_module='ns.propagation') ## address.h (module 'network'): ns3::Address [class] module.add_class('Address', import_from_module='ns.network') ## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration] module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) ## box.h (module 'mobility'): ns3::Box [class] module.add_class('Box', import_from_module='ns.mobility') ## box.h (module 'mobility'): ns3::Box::Side [enumeration] module.add_enum('Side', ['RIGHT', 'LEFT', 'TOP', 'BOTTOM', 'UP', 'DOWN'], outer_class=root_module['ns3::Box'], import_from_module='ns.mobility') ## building-container.h (module 'buildings'): ns3::BuildingContainer [class] module.add_class('BuildingContainer') ## building-list.h (module 'buildings'): ns3::BuildingList [class] module.add_class('BuildingList') ## buildings-helper.h (module 'buildings'): ns3::BuildingsHelper [class] module.add_class('BuildingsHelper') ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## constant-velocity-helper.h (module 'mobility'): ns3::ConstantVelocityHelper [class] module.add_class('ConstantVelocityHelper', import_from_module='ns.mobility') ## hash.h (module 'core'): ns3::Hasher [class] module.add_class('Hasher', import_from_module='ns.core') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] module.add_class('Ipv4Address', import_from_module='ns.network') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class] module.add_class('Ipv4Mask', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] module.add_class('Ipv6Address', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class] module.add_class('Ipv6Prefix', import_from_module='ns.network') ## node-container.h (module 'network'): ns3::NodeContainer [class] module.add_class('NodeContainer', import_from_module='ns.network') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## object-factory.h (module 'core'): ns3::ObjectFactory [class] module.add_class('ObjectFactory', import_from_module='ns.core') ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## tag-buffer.h (module 'network'): ns3::TagBuffer [class] module.add_class('TagBuffer', import_from_module='ns.network') ## nstime.h (module 'core'): ns3::TimeWithUnit [class] module.add_class('TimeWithUnit', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## vector.h (module 'core'): ns3::Vector2D [class] module.add_class('Vector2D', import_from_module='ns.core') ## vector.h (module 'core'): ns3::Vector3D [class] module.add_class('Vector3D', import_from_module='ns.core') ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t [class] module.add_class('int64x64_t', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration] module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core') ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## position-allocator.h (module 'mobility'): ns3::PositionAllocator [class] module.add_class('PositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::Object']) ## propagation-loss-model.h (module 'propagation'): ns3::PropagationLossModel [class] module.add_class('PropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::Object']) ## position-allocator.h (module 'mobility'): ns3::RandomBoxPositionAllocator [class] module.add_class('RandomBoxPositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator']) ## building-position-allocator.h (module 'buildings'): ns3::RandomBuildingPositionAllocator [class] module.add_class('RandomBuildingPositionAllocator', parent=root_module['ns3::PositionAllocator']) ## position-allocator.h (module 'mobility'): ns3::RandomDiscPositionAllocator [class] module.add_class('RandomDiscPositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator']) ## propagation-loss-model.h (module 'propagation'): ns3::RandomPropagationLossModel [class] module.add_class('RandomPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## position-allocator.h (module 'mobility'): ns3::RandomRectanglePositionAllocator [class] module.add_class('RandomRectanglePositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator']) ## building-position-allocator.h (module 'buildings'): ns3::RandomRoomPositionAllocator [class] module.add_class('RandomRoomPositionAllocator', parent=root_module['ns3::PositionAllocator']) ## random-variable-stream.h (module 'core'): ns3::RandomVariableStream [class] module.add_class('RandomVariableStream', import_from_module='ns.core', parent=root_module['ns3::Object']) ## propagation-loss-model.h (module 'propagation'): ns3::RangePropagationLossModel [class] module.add_class('RangePropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## building-position-allocator.h (module 'buildings'): ns3::SameRoomPositionAllocator [class] module.add_class('SameRoomPositionAllocator', parent=root_module['ns3::PositionAllocator']) ## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable [class] module.add_class('SequentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## propagation-loss-model.h (module 'propagation'): ns3::ThreeLogDistancePropagationLossModel [class] module.add_class('ThreeLogDistancePropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## nstime.h (module 'core'): ns3::Time [class] module.add_class('Time', import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time::Unit [enumeration] module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time [class] root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t']) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable [class] module.add_class('TriangularRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## propagation-loss-model.h (module 'propagation'): ns3::TwoRayGroundPropagationLossModel [class] module.add_class('TwoRayGroundPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## position-allocator.h (module 'mobility'): ns3::UniformDiscPositionAllocator [class] module.add_class('UniformDiscPositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator']) ## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable [class] module.add_class('UniformRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable [class] module.add_class('WeibullRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable [class] module.add_class('ZetaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable [class] module.add_class('ZipfRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## box.h (module 'mobility'): ns3::BoxChecker [class] module.add_class('BoxChecker', import_from_module='ns.mobility', parent=root_module['ns3::AttributeChecker']) ## box.h (module 'mobility'): ns3::BoxValue [class] module.add_class('BoxValue', import_from_module='ns.mobility', parent=root_module['ns3::AttributeValue']) ## building.h (module 'buildings'): ns3::Building [class] module.add_class('Building', parent=root_module['ns3::Object']) ## building.h (module 'buildings'): ns3::Building::BuildingType_t [enumeration] module.add_enum('BuildingType_t', ['Residential', 'Office', 'Commercial'], outer_class=root_module['ns3::Building']) ## building.h (module 'buildings'): ns3::Building::ExtWallsType_t [enumeration] module.add_enum('ExtWallsType_t', ['Wood', 'ConcreteWithWindows', 'ConcreteWithoutWindows', 'StoneBlocks'], outer_class=root_module['ns3::Building']) ## buildings-propagation-loss-model.h (module 'buildings'): ns3::BuildingsPropagationLossModel [class] module.add_class('BuildingsPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable [class] module.add_class('ConstantRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable [class] module.add_class('DeterministicRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable [class] module.add_class('EmpiricalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable [class] module.add_class('ErlangRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable [class] module.add_class('ExponentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## building-position-allocator.h (module 'buildings'): ns3::FixedRoomPositionAllocator [class] module.add_class('FixedRoomPositionAllocator', parent=root_module['ns3::PositionAllocator']) ## propagation-loss-model.h (module 'propagation'): ns3::FixedRssLossModel [class] module.add_class('FixedRssLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## propagation-loss-model.h (module 'propagation'): ns3::FriisPropagationLossModel [class] module.add_class('FriisPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable [class] module.add_class('GammaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## building-allocator.h (module 'buildings'): ns3::GridBuildingAllocator [class] module.add_class('GridBuildingAllocator', parent=root_module['ns3::Object']) ## position-allocator.h (module 'mobility'): ns3::GridPositionAllocator [class] module.add_class('GridPositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator']) ## position-allocator.h (module 'mobility'): ns3::GridPositionAllocator::LayoutType [enumeration] module.add_enum('LayoutType', ['ROW_FIRST', 'COLUMN_FIRST'], outer_class=root_module['ns3::GridPositionAllocator'], import_from_module='ns.mobility') ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): ns3::HybridBuildingsPropagationLossModel [class] module.add_class('HybridBuildingsPropagationLossModel', parent=root_module['ns3::BuildingsPropagationLossModel']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class] module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class] module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class] module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class] module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class] module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class] module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class] module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class] module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## itu-r-1238-propagation-loss-model.h (module 'buildings'): ns3::ItuR1238PropagationLossModel [class] module.add_class('ItuR1238PropagationLossModel', parent=root_module['ns3::PropagationLossModel']) ## position-allocator.h (module 'mobility'): ns3::ListPositionAllocator [class] module.add_class('ListPositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator']) ## propagation-loss-model.h (module 'propagation'): ns3::LogDistancePropagationLossModel [class] module.add_class('LogDistancePropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable [class] module.add_class('LogNormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## propagation-loss-model.h (module 'propagation'): ns3::MatrixPropagationLossModel [class] module.add_class('MatrixPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## mobility-building-info.h (module 'buildings'): ns3::MobilityBuildingInfo [class] module.add_class('MobilityBuildingInfo', parent=root_module['ns3::Object']) ## propagation-loss-model.h (module 'propagation'): ns3::NakagamiPropagationLossModel [class] module.add_class('NakagamiPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## net-device.h (module 'network'): ns3::NetDevice [class] module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object']) ## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration] module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network') ## node.h (module 'network'): ns3::Node [class] module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object']) ## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable [class] module.add_class('NormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class] module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class] module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## oh-buildings-propagation-loss-model.h (module 'buildings'): ns3::OhBuildingsPropagationLossModel [class] module.add_class('OhBuildingsPropagationLossModel', parent=root_module['ns3::BuildingsPropagationLossModel']) ## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable [class] module.add_class('ParetoRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## nstime.h (module 'core'): ns3::TimeValue [class] module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## vector.h (module 'core'): ns3::Vector2DChecker [class] module.add_class('Vector2DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## vector.h (module 'core'): ns3::Vector2DValue [class] module.add_class('Vector2DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## vector.h (module 'core'): ns3::Vector3DChecker [class] module.add_class('Vector3DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## vector.h (module 'core'): ns3::Vector3DValue [class] module.add_class('Vector3DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## address.h (module 'network'): ns3::AddressChecker [class] module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## address.h (module 'network'): ns3::AddressValue [class] module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) typehandlers.add_type_alias(u'ns3::Vector3D', u'ns3::Vector') typehandlers.add_type_alias(u'ns3::Vector3D*', u'ns3::Vector*') typehandlers.add_type_alias(u'ns3::Vector3D&', u'ns3::Vector&') module.add_typedef(root_module['ns3::Vector3D'], 'Vector') typehandlers.add_type_alias(u'ns3::Vector3DValue', u'ns3::VectorValue') typehandlers.add_type_alias(u'ns3::Vector3DValue*', u'ns3::VectorValue*') typehandlers.add_type_alias(u'ns3::Vector3DValue&', u'ns3::VectorValue&') module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue') typehandlers.add_type_alias(u'ns3::Vector3DChecker', u'ns3::VectorChecker') typehandlers.add_type_alias(u'ns3::Vector3DChecker*', u'ns3::VectorChecker*') typehandlers.add_type_alias(u'ns3::Vector3DChecker&', u'ns3::VectorChecker&') module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker') ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) ## Register a nested module for the namespace Hash nested_module = module.add_cpp_namespace('Hash') register_types_ns3_Hash(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_types_ns3_Hash(module): root_module = module.get_root() ## hash-function.h (module 'core'): ns3::Hash::Implementation [class] module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&') ## Register a nested module for the namespace Function nested_module = module.add_cpp_namespace('Function') register_types_ns3_Hash_Function(nested_module) def register_types_ns3_Hash_Function(module): root_module = module.get_root() ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class] module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class] module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class] module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class] module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Box_methods(root_module, root_module['ns3::Box']) register_Ns3BuildingContainer_methods(root_module, root_module['ns3::BuildingContainer']) register_Ns3BuildingList_methods(root_module, root_module['ns3::BuildingList']) register_Ns3BuildingsHelper_methods(root_module, root_module['ns3::BuildingsHelper']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3ConstantVelocityHelper_methods(root_module, root_module['ns3::ConstantVelocityHelper']) register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D']) register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3PositionAllocator_methods(root_module, root_module['ns3::PositionAllocator']) register_Ns3PropagationLossModel_methods(root_module, root_module['ns3::PropagationLossModel']) register_Ns3RandomBoxPositionAllocator_methods(root_module, root_module['ns3::RandomBoxPositionAllocator']) register_Ns3RandomBuildingPositionAllocator_methods(root_module, root_module['ns3::RandomBuildingPositionAllocator']) register_Ns3RandomDiscPositionAllocator_methods(root_module, root_module['ns3::RandomDiscPositionAllocator']) register_Ns3RandomPropagationLossModel_methods(root_module, root_module['ns3::RandomPropagationLossModel']) register_Ns3RandomRectanglePositionAllocator_methods(root_module, root_module['ns3::RandomRectanglePositionAllocator']) register_Ns3RandomRoomPositionAllocator_methods(root_module, root_module['ns3::RandomRoomPositionAllocator']) register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream']) register_Ns3RangePropagationLossModel_methods(root_module, root_module['ns3::RangePropagationLossModel']) register_Ns3SameRoomPositionAllocator_methods(root_module, root_module['ns3::SameRoomPositionAllocator']) register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3ThreeLogDistancePropagationLossModel_methods(root_module, root_module['ns3::ThreeLogDistancePropagationLossModel']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable']) register_Ns3TwoRayGroundPropagationLossModel_methods(root_module, root_module['ns3::TwoRayGroundPropagationLossModel']) register_Ns3UniformDiscPositionAllocator_methods(root_module, root_module['ns3::UniformDiscPositionAllocator']) register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable']) register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable']) register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable']) register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3BoxChecker_methods(root_module, root_module['ns3::BoxChecker']) register_Ns3BoxValue_methods(root_module, root_module['ns3::BoxValue']) register_Ns3Building_methods(root_module, root_module['ns3::Building']) register_Ns3BuildingsPropagationLossModel_methods(root_module, root_module['ns3::BuildingsPropagationLossModel']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable']) register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable']) register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable']) register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable']) register_Ns3FixedRoomPositionAllocator_methods(root_module, root_module['ns3::FixedRoomPositionAllocator']) register_Ns3FixedRssLossModel_methods(root_module, root_module['ns3::FixedRssLossModel']) register_Ns3FriisPropagationLossModel_methods(root_module, root_module['ns3::FriisPropagationLossModel']) register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable']) register_Ns3GridBuildingAllocator_methods(root_module, root_module['ns3::GridBuildingAllocator']) register_Ns3GridPositionAllocator_methods(root_module, root_module['ns3::GridPositionAllocator']) register_Ns3HybridBuildingsPropagationLossModel_methods(root_module, root_module['ns3::HybridBuildingsPropagationLossModel']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3ItuR1238PropagationLossModel_methods(root_module, root_module['ns3::ItuR1238PropagationLossModel']) register_Ns3ListPositionAllocator_methods(root_module, root_module['ns3::ListPositionAllocator']) register_Ns3LogDistancePropagationLossModel_methods(root_module, root_module['ns3::LogDistancePropagationLossModel']) register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable']) register_Ns3MatrixPropagationLossModel_methods(root_module, root_module['ns3::MatrixPropagationLossModel']) register_Ns3MobilityBuildingInfo_methods(root_module, root_module['ns3::MobilityBuildingInfo']) register_Ns3NakagamiPropagationLossModel_methods(root_module, root_module['ns3::NakagamiPropagationLossModel']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3OhBuildingsPropagationLossModel_methods(root_module, root_module['ns3::OhBuildingsPropagationLossModel']) register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker']) register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue']) register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker']) register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation']) register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a']) register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32']) register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64']) register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3']) return def register_Ns3Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## address.h (module 'network'): ns3::Address::Address() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor] cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor] cls.add_constructor([param('ns3::Address const &', 'address')]) ## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function] cls.add_method('CheckCompatible', 'bool', [param('uint8_t', 'type'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyAllFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function] cls.add_method('CopyAllTo', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'uint32_t', [param('uint8_t *', 'buffer')], is_const=True) ## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'buffer')]) ## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function] cls.add_method('GetLength', 'uint8_t', [], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function] cls.add_method('IsInvalid', 'bool', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function] cls.add_method('IsMatchingType', 'bool', [param('uint8_t', 'type')], is_const=True) ## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function] cls.add_method('Register', 'uint8_t', [], is_static=True) ## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'buffer')], is_const=True) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3Box_methods(root_module, cls): cls.add_output_stream_operator() ## box.h (module 'mobility'): ns3::Box::Box(ns3::Box const & arg0) [copy constructor] cls.add_constructor([param('ns3::Box const &', 'arg0')]) ## box.h (module 'mobility'): ns3::Box::Box(double _xMin, double _xMax, double _yMin, double _yMax, double _zMin, double _zMax) [constructor] cls.add_constructor([param('double', '_xMin'), param('double', '_xMax'), param('double', '_yMin'), param('double', '_yMax'), param('double', '_zMin'), param('double', '_zMax')]) ## box.h (module 'mobility'): ns3::Box::Box() [constructor] cls.add_constructor([]) ## box.h (module 'mobility'): ns3::Vector ns3::Box::CalculateIntersection(ns3::Vector const & current, ns3::Vector const & speed) const [member function] cls.add_method('CalculateIntersection', 'ns3::Vector', [param('ns3::Vector const &', 'current'), param('ns3::Vector const &', 'speed')], is_const=True) ## box.h (module 'mobility'): ns3::Box::Side ns3::Box::GetClosestSide(ns3::Vector const & position) const [member function] cls.add_method('GetClosestSide', 'ns3::Box::Side', [param('ns3::Vector const &', 'position')], is_const=True) ## box.h (module 'mobility'): bool ns3::Box::IsInside(ns3::Vector const & position) const [member function] cls.add_method('IsInside', 'bool', [param('ns3::Vector const &', 'position')], is_const=True) ## box.h (module 'mobility'): ns3::Box::xMax [variable] cls.add_instance_attribute('xMax', 'double', is_const=False) ## box.h (module 'mobility'): ns3::Box::xMin [variable] cls.add_instance_attribute('xMin', 'double', is_const=False) ## box.h (module 'mobility'): ns3::Box::yMax [variable] cls.add_instance_attribute('yMax', 'double', is_const=False) ## box.h (module 'mobility'): ns3::Box::yMin [variable] cls.add_instance_attribute('yMin', 'double', is_const=False) ## box.h (module 'mobility'): ns3::Box::zMax [variable] cls.add_instance_attribute('zMax', 'double', is_const=False) ## box.h (module 'mobility'): ns3::Box::zMin [variable] cls.add_instance_attribute('zMin', 'double', is_const=False) return def register_Ns3BuildingContainer_methods(root_module, cls): ## building-container.h (module 'buildings'): ns3::BuildingContainer::BuildingContainer(ns3::BuildingContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::BuildingContainer const &', 'arg0')]) ## building-container.h (module 'buildings'): ns3::BuildingContainer::BuildingContainer() [constructor] cls.add_constructor([]) ## building-container.h (module 'buildings'): ns3::BuildingContainer::BuildingContainer(ns3::Ptr<ns3::Building> building) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Building >', 'building')]) ## building-container.h (module 'buildings'): ns3::BuildingContainer::BuildingContainer(std::string buildingName) [constructor] cls.add_constructor([param('std::string', 'buildingName')]) ## building-container.h (module 'buildings'): void ns3::BuildingContainer::Add(ns3::BuildingContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::BuildingContainer', 'other')]) ## building-container.h (module 'buildings'): void ns3::BuildingContainer::Add(ns3::Ptr<ns3::Building> building) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Building >', 'building')]) ## building-container.h (module 'buildings'): void ns3::BuildingContainer::Add(std::string buildingName) [member function] cls.add_method('Add', 'void', [param('std::string', 'buildingName')]) ## building-container.h (module 'buildings'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Building>*,std::vector<ns3::Ptr<ns3::Building>, std::allocator<ns3::Ptr<ns3::Building> > > > ns3::BuildingContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Building > const, std::vector< ns3::Ptr< ns3::Building > > >', [], is_const=True) ## building-container.h (module 'buildings'): void ns3::BuildingContainer::Create(uint32_t n) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n')]) ## building-container.h (module 'buildings'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Building>*,std::vector<ns3::Ptr<ns3::Building>, std::allocator<ns3::Ptr<ns3::Building> > > > ns3::BuildingContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Building > const, std::vector< ns3::Ptr< ns3::Building > > >', [], is_const=True) ## building-container.h (module 'buildings'): ns3::Ptr<ns3::Building> ns3::BuildingContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::Building >', [param('uint32_t', 'i')], is_const=True) ## building-container.h (module 'buildings'): static ns3::BuildingContainer ns3::BuildingContainer::GetGlobal() [member function] cls.add_method('GetGlobal', 'ns3::BuildingContainer', [], is_static=True) ## building-container.h (module 'buildings'): uint32_t ns3::BuildingContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3BuildingList_methods(root_module, cls): ## building-list.h (module 'buildings'): ns3::BuildingList::BuildingList() [constructor] cls.add_constructor([]) ## building-list.h (module 'buildings'): ns3::BuildingList::BuildingList(ns3::BuildingList const & arg0) [copy constructor] cls.add_constructor([param('ns3::BuildingList const &', 'arg0')]) ## building-list.h (module 'buildings'): static uint32_t ns3::BuildingList::Add(ns3::Ptr<ns3::Building> building) [member function] cls.add_method('Add', 'uint32_t', [param('ns3::Ptr< ns3::Building >', 'building')], is_static=True) ## building-list.h (module 'buildings'): static __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Building>*,std::vector<ns3::Ptr<ns3::Building>, std::allocator<ns3::Ptr<ns3::Building> > > > ns3::BuildingList::Begin() [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Building > const, std::vector< ns3::Ptr< ns3::Building > > >', [], is_static=True) ## building-list.h (module 'buildings'): static __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Building>*,std::vector<ns3::Ptr<ns3::Building>, std::allocator<ns3::Ptr<ns3::Building> > > > ns3::BuildingList::End() [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Building > const, std::vector< ns3::Ptr< ns3::Building > > >', [], is_static=True) ## building-list.h (module 'buildings'): static ns3::Ptr<ns3::Building> ns3::BuildingList::GetBuilding(uint32_t n) [member function] cls.add_method('GetBuilding', 'ns3::Ptr< ns3::Building >', [param('uint32_t', 'n')], is_static=True) ## building-list.h (module 'buildings'): static uint32_t ns3::BuildingList::GetNBuildings() [member function] cls.add_method('GetNBuildings', 'uint32_t', [], is_static=True) return def register_Ns3BuildingsHelper_methods(root_module, cls): ## buildings-helper.h (module 'buildings'): ns3::BuildingsHelper::BuildingsHelper() [constructor] cls.add_constructor([]) ## buildings-helper.h (module 'buildings'): ns3::BuildingsHelper::BuildingsHelper(ns3::BuildingsHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::BuildingsHelper const &', 'arg0')]) ## buildings-helper.h (module 'buildings'): static void ns3::BuildingsHelper::Install(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('Install', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_static=True) ## buildings-helper.h (module 'buildings'): static void ns3::BuildingsHelper::Install(ns3::NodeContainer c) [member function] cls.add_method('Install', 'void', [param('ns3::NodeContainer', 'c')], is_static=True) ## buildings-helper.h (module 'buildings'): static void ns3::BuildingsHelper::MakeConsistent(ns3::Ptr<ns3::MobilityModel> bmm) [member function] cls.add_method('MakeConsistent', 'void', [param('ns3::Ptr< ns3::MobilityModel >', 'bmm')], is_static=True) ## buildings-helper.h (module 'buildings'): static void ns3::BuildingsHelper::MakeMobilityModelConsistent() [member function] cls.add_method('MakeMobilityModelConsistent', 'void', [], is_static=True) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') ## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3ConstantVelocityHelper_methods(root_module, cls): ## constant-velocity-helper.h (module 'mobility'): ns3::ConstantVelocityHelper::ConstantVelocityHelper(ns3::ConstantVelocityHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::ConstantVelocityHelper const &', 'arg0')]) ## constant-velocity-helper.h (module 'mobility'): ns3::ConstantVelocityHelper::ConstantVelocityHelper() [constructor] cls.add_constructor([]) ## constant-velocity-helper.h (module 'mobility'): ns3::ConstantVelocityHelper::ConstantVelocityHelper(ns3::Vector const & position) [constructor] cls.add_constructor([param('ns3::Vector const &', 'position')]) ## constant-velocity-helper.h (module 'mobility'): ns3::ConstantVelocityHelper::ConstantVelocityHelper(ns3::Vector const & position, ns3::Vector const & vel) [constructor] cls.add_constructor([param('ns3::Vector const &', 'position'), param('ns3::Vector const &', 'vel')]) ## constant-velocity-helper.h (module 'mobility'): ns3::Vector ns3::ConstantVelocityHelper::GetCurrentPosition() const [member function] cls.add_method('GetCurrentPosition', 'ns3::Vector', [], is_const=True) ## constant-velocity-helper.h (module 'mobility'): ns3::Vector ns3::ConstantVelocityHelper::GetVelocity() const [member function] cls.add_method('GetVelocity', 'ns3::Vector', [], is_const=True) ## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::Pause() [member function] cls.add_method('Pause', 'void', []) ## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::SetPosition(ns3::Vector const & position) [member function] cls.add_method('SetPosition', 'void', [param('ns3::Vector const &', 'position')]) ## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::SetVelocity(ns3::Vector const & vel) [member function] cls.add_method('SetVelocity', 'void', [param('ns3::Vector const &', 'vel')]) ## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::Unpause() [member function] cls.add_method('Unpause', 'void', []) ## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::Update() const [member function] cls.add_method('Update', 'void', [], is_const=True) ## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::UpdateWithBounds(ns3::Rectangle const & rectangle) const [member function] cls.add_method('UpdateWithBounds', 'void', [param('ns3::Rectangle const &', 'rectangle')], is_const=True) ## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::UpdateWithBounds(ns3::Box const & bounds) const [member function] cls.add_method('UpdateWithBounds', 'void', [param('ns3::Box const &', 'bounds')], is_const=True) return def register_Ns3Hasher_methods(root_module, cls): ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hasher const &', 'arg0')]) ## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor] cls.add_constructor([]) ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function] cls.add_method('GetHash32', 'uint32_t', [param('std::string const', 's')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function] cls.add_method('GetHash64', 'uint64_t', [param('std::string const', 's')]) ## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function] cls.add_method('clear', 'ns3::Hasher &', []) return def register_Ns3Ipv4Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor] cls.add_constructor([param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('CombineMask', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv4Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv4Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('GetSubnetDirectedBroadcast', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Address const &', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function] cls.add_method('IsLocalMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('IsSubnetDirectedBroadcast', 'bool', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) return def register_Ns3Ipv4Mask_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor] cls.add_constructor([param('uint32_t', 'mask')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor] cls.add_constructor([param('char const *', 'mask')]) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function] cls.add_method('GetInverse', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint16_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Mask', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'mask')]) return def register_Ns3Ipv6Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor] cls.add_constructor([param('uint8_t *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor] cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function] cls.add_method('CombinePrefix', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv6Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv6Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function] cls.add_method('GetAllHostsMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function] cls.add_method('GetAllNodesMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function] cls.add_method('GetAllRoutersMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function] cls.add_method('GetIpv4MappedAddress', 'ns3::Ipv4Address', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function] cls.add_method('IsAllHostsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function] cls.add_method('IsAllNodesMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function] cls.add_method('IsAllRoutersMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function] cls.add_method('IsDocumentation', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Address const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function] cls.add_method('IsIpv4MappedAddress', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function] cls.add_method('IsLinkLocal', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function] cls.add_method('IsLinkLocalMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function] cls.add_method('IsSolicitedMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function] cls.add_method('MakeIpv4MappedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv4Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function] cls.add_method('MakeSolicitedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function] cls.add_method('Set', 'void', [param('uint8_t *', 'address')]) return def register_Ns3Ipv6Prefix_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor] cls.add_constructor([param('uint8_t *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor] cls.add_constructor([param('char const *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor] cls.add_constructor([param('uint8_t', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint8_t', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Prefix const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) return def register_Ns3NodeContainer_methods(root_module, cls): ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor] cls.add_constructor([]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor] cls.add_constructor([param('std::string', 'nodeName')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::NodeContainer', 'other')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function] cls.add_method('Add', 'void', [param('std::string', 'nodeName')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<ns3::Ptr<ns3::Node> const*, std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node >, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n'), param('uint32_t', 'systemId')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<ns3::Ptr<ns3::Node> const*, std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node >, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::Node >', [param('uint32_t', 'i')], is_const=True) ## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function] cls.add_method('GetGlobal', 'ns3::NodeContainer', [], is_static=True) ## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3ObjectFactory_methods(root_module, cls): cls.add_output_stream_operator() ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor] cls.add_constructor([param('std::string', 'typeId')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Object >', [], is_const=True) ## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) ## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function] cls.add_method('SetTypeId', 'void', [param('ns3::TypeId', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function] cls.add_method('SetTypeId', 'void', [param('char const *', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function] cls.add_method('SetTypeId', 'void', [param('std::string', 'tid')]) return def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3TagBuffer_methods(root_module, cls): ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor] cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')]) ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor] cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function] cls.add_method('CopyFrom', 'void', [param('ns3::TagBuffer', 'o')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function] cls.add_method('ReadDouble', 'double', []) ## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function] cls.add_method('TrimAtEnd', 'void', [param('uint32_t', 'trim')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function] cls.add_method('WriteDouble', 'void', [param('double', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'v')]) return def register_Ns3TimeWithUnit_methods(root_module, cls): cls.add_output_stream_operator() ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor] cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')]) return def register_Ns3TypeId_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor] cls.add_constructor([param('char const *', 'name')]) ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor] cls.add_constructor([param('ns3::TypeId const &', 'o')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')], deprecated=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function] cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function] cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function] cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function] cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function] cls.add_method('GetGroupName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function] cls.add_method('GetHash', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function] cls.add_method('GetName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function] cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function] cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True) ## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function] cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True) ## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function] cls.add_method('GetSize', 'std::size_t', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function] cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function] cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function] cls.add_method('GetUid', 'uint16_t', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function] cls.add_method('HasConstructor', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function] cls.add_method('HasParent', 'bool', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function] cls.add_method('HideFromDocumentation', 'ns3::TypeId', []) ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function] cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function] cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function] cls.add_method('LookupByHash', 'ns3::TypeId', [param('uint32_t', 'hash')], is_static=True) ## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function] cls.add_method('LookupByHashFailSafe', 'bool', [param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')], is_static=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function] cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function] cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function] cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function] cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function] cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function] cls.add_method('SetSize', 'ns3::TypeId', [param('std::size_t', 'size')]) ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function] cls.add_method('SetUid', 'void', [param('uint16_t', 'tid')]) return def register_Ns3TypeIdAttributeInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable] cls.add_instance_attribute('flags', 'uint32_t', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable] cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable] cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) return def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable] cls.add_instance_attribute('callback', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) return def register_Ns3Vector2D_methods(root_module, cls): cls.add_output_stream_operator() ## vector.h (module 'core'): ns3::Vector2D::Vector2D(ns3::Vector2D const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector2D const &', 'arg0')]) ## vector.h (module 'core'): ns3::Vector2D::Vector2D(double _x, double _y) [constructor] cls.add_constructor([param('double', '_x'), param('double', '_y')]) ## vector.h (module 'core'): ns3::Vector2D::Vector2D() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector2D::x [variable] cls.add_instance_attribute('x', 'double', is_const=False) ## vector.h (module 'core'): ns3::Vector2D::y [variable] cls.add_instance_attribute('y', 'double', is_const=False) return def register_Ns3Vector3D_methods(root_module, cls): cls.add_output_stream_operator() ## vector.h (module 'core'): ns3::Vector3D::Vector3D(ns3::Vector3D const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector3D const &', 'arg0')]) ## vector.h (module 'core'): ns3::Vector3D::Vector3D(double _x, double _y, double _z) [constructor] cls.add_constructor([param('double', '_x'), param('double', '_y'), param('double', '_z')]) ## vector.h (module 'core'): ns3::Vector3D::Vector3D() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector3D::x [variable] cls.add_instance_attribute('x', 'double', is_const=False) ## vector.h (module 'core'): ns3::Vector3D::y [variable] cls.add_instance_attribute('y', 'double', is_const=False) ## vector.h (module 'core'): ns3::Vector3D::z [variable] cls.add_instance_attribute('z', 'double', is_const=False) return def register_Ns3Empty_methods(root_module, cls): ## empty.h (module 'core'): ns3::empty::empty() [constructor] cls.add_constructor([]) ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor] cls.add_constructor([param('ns3::empty const &', 'arg0')]) return def register_Ns3Int64x64_t_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_unary_numeric_operator('-') cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor] cls.add_constructor([]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor] cls.add_constructor([param('long double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor] cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function] cls.add_method('GetHigh', 'int64_t', [], is_const=True) ## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function] cls.add_method('GetLow', 'uint64_t', [], is_const=True) ## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function] cls.add_method('Invert', 'ns3::int64x64_t', [param('uint64_t', 'v')], is_static=True) ## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function] cls.add_method('MulByInvert', 'void', [param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable] cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True) return def register_Ns3Object_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::Object() [constructor] cls.add_constructor([]) ## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function] cls.add_method('AggregateObject', 'void', [param('ns3::Ptr< ns3::Object >', 'other')]) ## object.h (module 'core'): void ns3::Object::Dispose() [member function] cls.add_method('Dispose', 'void', []) ## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function] cls.add_method('GetAggregateIterator', 'ns3::Object::AggregateIterator', [], is_const=True) ## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object.h (module 'core'): void ns3::Object::Initialize() [member function] cls.add_method('Initialize', 'void', []) ## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor] cls.add_constructor([param('ns3::Object const &', 'o')], visibility='protected') ## object.h (module 'core'): void ns3::Object::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function] cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectAggregateIterator_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')]) ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor] cls.add_constructor([]) ## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function] cls.add_method('Next', 'ns3::Ptr< ns3::Object const >', []) return def register_Ns3PositionAllocator_methods(root_module, cls): ## position-allocator.h (module 'mobility'): ns3::PositionAllocator::PositionAllocator(ns3::PositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PositionAllocator const &', 'arg0')]) ## position-allocator.h (module 'mobility'): ns3::PositionAllocator::PositionAllocator() [constructor] cls.add_constructor([]) ## position-allocator.h (module 'mobility'): int64_t ns3::PositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_pure_virtual=True, is_virtual=True) ## position-allocator.h (module 'mobility'): ns3::Vector ns3::PositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::PositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3PropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::PropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::PropagationLossModel::PropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::PropagationLossModel::SetNext(ns3::Ptr<ns3::PropagationLossModel> next) [member function] cls.add_method('SetNext', 'void', [param('ns3::Ptr< ns3::PropagationLossModel >', 'next')]) ## propagation-loss-model.h (module 'propagation'): ns3::Ptr<ns3::PropagationLossModel> ns3::PropagationLossModel::GetNext() [member function] cls.add_method('GetNext', 'ns3::Ptr< ns3::PropagationLossModel >', []) ## propagation-loss-model.h (module 'propagation'): double ns3::PropagationLossModel::CalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('CalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::PropagationLossModel::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')]) ## propagation-loss-model.h (module 'propagation'): double ns3::PropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::PropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_pure_virtual=True, visibility='private', is_virtual=True) return def register_Ns3RandomBoxPositionAllocator_methods(root_module, cls): ## position-allocator.h (module 'mobility'): ns3::RandomBoxPositionAllocator::RandomBoxPositionAllocator(ns3::RandomBoxPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::RandomBoxPositionAllocator const &', 'arg0')]) ## position-allocator.h (module 'mobility'): ns3::RandomBoxPositionAllocator::RandomBoxPositionAllocator() [constructor] cls.add_constructor([]) ## position-allocator.h (module 'mobility'): int64_t ns3::RandomBoxPositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## position-allocator.h (module 'mobility'): ns3::Vector ns3::RandomBoxPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::RandomBoxPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## position-allocator.h (module 'mobility'): void ns3::RandomBoxPositionAllocator::SetX(ns3::Ptr<ns3::RandomVariableStream> x) [member function] cls.add_method('SetX', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'x')]) ## position-allocator.h (module 'mobility'): void ns3::RandomBoxPositionAllocator::SetY(ns3::Ptr<ns3::RandomVariableStream> y) [member function] cls.add_method('SetY', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'y')]) ## position-allocator.h (module 'mobility'): void ns3::RandomBoxPositionAllocator::SetZ(ns3::Ptr<ns3::RandomVariableStream> z) [member function] cls.add_method('SetZ', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'z')]) return def register_Ns3RandomBuildingPositionAllocator_methods(root_module, cls): ## building-position-allocator.h (module 'buildings'): ns3::RandomBuildingPositionAllocator::RandomBuildingPositionAllocator(ns3::RandomBuildingPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::RandomBuildingPositionAllocator const &', 'arg0')]) ## building-position-allocator.h (module 'buildings'): ns3::RandomBuildingPositionAllocator::RandomBuildingPositionAllocator() [constructor] cls.add_constructor([]) ## building-position-allocator.h (module 'buildings'): int64_t ns3::RandomBuildingPositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## building-position-allocator.h (module 'buildings'): ns3::Vector ns3::RandomBuildingPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## building-position-allocator.h (module 'buildings'): static ns3::TypeId ns3::RandomBuildingPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3RandomDiscPositionAllocator_methods(root_module, cls): ## position-allocator.h (module 'mobility'): ns3::RandomDiscPositionAllocator::RandomDiscPositionAllocator(ns3::RandomDiscPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::RandomDiscPositionAllocator const &', 'arg0')]) ## position-allocator.h (module 'mobility'): ns3::RandomDiscPositionAllocator::RandomDiscPositionAllocator() [constructor] cls.add_constructor([]) ## position-allocator.h (module 'mobility'): int64_t ns3::RandomDiscPositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## position-allocator.h (module 'mobility'): ns3::Vector ns3::RandomDiscPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::RandomDiscPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## position-allocator.h (module 'mobility'): void ns3::RandomDiscPositionAllocator::SetRho(ns3::Ptr<ns3::RandomVariableStream> rho) [member function] cls.add_method('SetRho', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'rho')]) ## position-allocator.h (module 'mobility'): void ns3::RandomDiscPositionAllocator::SetTheta(ns3::Ptr<ns3::RandomVariableStream> theta) [member function] cls.add_method('SetTheta', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'theta')]) ## position-allocator.h (module 'mobility'): void ns3::RandomDiscPositionAllocator::SetX(double x) [member function] cls.add_method('SetX', 'void', [param('double', 'x')]) ## position-allocator.h (module 'mobility'): void ns3::RandomDiscPositionAllocator::SetY(double y) [member function] cls.add_method('SetY', 'void', [param('double', 'y')]) return def register_Ns3RandomPropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::RandomPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::RandomPropagationLossModel::RandomPropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): double ns3::RandomPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::RandomPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3RandomRectanglePositionAllocator_methods(root_module, cls): ## position-allocator.h (module 'mobility'): ns3::RandomRectanglePositionAllocator::RandomRectanglePositionAllocator(ns3::RandomRectanglePositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::RandomRectanglePositionAllocator const &', 'arg0')]) ## position-allocator.h (module 'mobility'): ns3::RandomRectanglePositionAllocator::RandomRectanglePositionAllocator() [constructor] cls.add_constructor([]) ## position-allocator.h (module 'mobility'): int64_t ns3::RandomRectanglePositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## position-allocator.h (module 'mobility'): ns3::Vector ns3::RandomRectanglePositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::RandomRectanglePositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## position-allocator.h (module 'mobility'): void ns3::RandomRectanglePositionAllocator::SetX(ns3::Ptr<ns3::RandomVariableStream> x) [member function] cls.add_method('SetX', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'x')]) ## position-allocator.h (module 'mobility'): void ns3::RandomRectanglePositionAllocator::SetY(ns3::Ptr<ns3::RandomVariableStream> y) [member function] cls.add_method('SetY', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'y')]) return def register_Ns3RandomRoomPositionAllocator_methods(root_module, cls): ## building-position-allocator.h (module 'buildings'): ns3::RandomRoomPositionAllocator::RandomRoomPositionAllocator(ns3::RandomRoomPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::RandomRoomPositionAllocator const &', 'arg0')]) ## building-position-allocator.h (module 'buildings'): ns3::RandomRoomPositionAllocator::RandomRoomPositionAllocator() [constructor] cls.add_constructor([]) ## building-position-allocator.h (module 'buildings'): int64_t ns3::RandomRoomPositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## building-position-allocator.h (module 'buildings'): ns3::Vector ns3::RandomRoomPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## building-position-allocator.h (module 'buildings'): static ns3::TypeId ns3::RandomRoomPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3RandomVariableStream_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::RandomVariableStream::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::RandomVariableStream::RandomVariableStream() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetStream(int64_t stream) [member function] cls.add_method('SetStream', 'void', [param('int64_t', 'stream')]) ## random-variable-stream.h (module 'core'): int64_t ns3::RandomVariableStream::GetStream() const [member function] cls.add_method('GetStream', 'int64_t', [], is_const=True) ## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetAntithetic(bool isAntithetic) [member function] cls.add_method('SetAntithetic', 'void', [param('bool', 'isAntithetic')]) ## random-variable-stream.h (module 'core'): bool ns3::RandomVariableStream::IsAntithetic() const [member function] cls.add_method('IsAntithetic', 'bool', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::RandomVariableStream::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_pure_virtual=True, is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::RandomVariableStream::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_pure_virtual=True, is_virtual=True) ## random-variable-stream.h (module 'core'): ns3::RngStream * ns3::RandomVariableStream::Peek() const [member function] cls.add_method('Peek', 'ns3::RngStream *', [], is_const=True, visibility='protected') return def register_Ns3RangePropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::RangePropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::RangePropagationLossModel::RangePropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): double ns3::RangePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::RangePropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3SameRoomPositionAllocator_methods(root_module, cls): ## building-position-allocator.h (module 'buildings'): ns3::SameRoomPositionAllocator::SameRoomPositionAllocator(ns3::SameRoomPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::SameRoomPositionAllocator const &', 'arg0')]) ## building-position-allocator.h (module 'buildings'): ns3::SameRoomPositionAllocator::SameRoomPositionAllocator() [constructor] cls.add_constructor([]) ## building-position-allocator.h (module 'buildings'): ns3::SameRoomPositionAllocator::SameRoomPositionAllocator(ns3::NodeContainer c) [constructor] cls.add_constructor([param('ns3::NodeContainer', 'c')]) ## building-position-allocator.h (module 'buildings'): int64_t ns3::SameRoomPositionAllocator::AssignStreams(int64_t arg0) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'arg0')], is_virtual=True) ## building-position-allocator.h (module 'buildings'): ns3::Vector ns3::SameRoomPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## building-position-allocator.h (module 'buildings'): static ns3::TypeId ns3::SameRoomPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3SequentialRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::SequentialRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable::SequentialRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMin() const [member function] cls.add_method('GetMin', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMax() const [member function] cls.add_method('GetMax', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): ns3::Ptr<ns3::RandomVariableStream> ns3::SequentialRandomVariable::GetIncrement() const [member function] cls.add_method('GetIncrement', 'ns3::Ptr< ns3::RandomVariableStream >', [], is_const=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetConsecutive() const [member function] cls.add_method('GetConsecutive', 'uint32_t', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3ThreeLogDistancePropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::ThreeLogDistancePropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::ThreeLogDistancePropagationLossModel::ThreeLogDistancePropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): double ns3::ThreeLogDistancePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::ThreeLogDistancePropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3Time_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right')) cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right')) cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right')) cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right')) cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## nstime.h (module 'core'): ns3::Time::Time() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor] cls.add_constructor([param('ns3::Time const &', 'o')]) ## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor] cls.add_constructor([param('std::string const &', 's')]) ## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function] cls.add_method('As', 'ns3::TimeWithUnit', [param('ns3::Time::Unit const', 'unit')], is_const=True) ## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function] cls.add_method('Compare', 'int', [param('ns3::Time const &', 'o')], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function] cls.add_method('FromDouble', 'ns3::Time', [param('double', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function] cls.add_method('FromInteger', 'ns3::Time', [param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function] cls.add_method('GetDays', 'double', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function] cls.add_method('GetFemtoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function] cls.add_method('GetHours', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function] cls.add_method('GetInteger', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function] cls.add_method('GetMicroSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function] cls.add_method('GetMilliSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function] cls.add_method('GetMinutes', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function] cls.add_method('GetNanoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function] cls.add_method('GetPicoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function] cls.add_method('GetResolution', 'ns3::Time::Unit', [], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function] cls.add_method('GetSeconds', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function] cls.add_method('GetTimeStep', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function] cls.add_method('GetYears', 'double', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function] cls.add_method('IsNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function] cls.add_method('IsPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function] cls.add_method('IsStrictlyNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function] cls.add_method('IsStrictlyPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function] cls.add_method('IsZero', 'bool', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function] cls.add_method('Max', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function] cls.add_method('Min', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function] cls.add_method('SetResolution', 'void', [param('ns3::Time::Unit', 'resolution')], is_static=True) ## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function] cls.add_method('StaticInit', 'bool', [], is_static=True) ## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function] cls.add_method('To', 'ns3::int64x64_t', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function] cls.add_method('ToDouble', 'double', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function] cls.add_method('ToInteger', 'int64_t', [param('ns3::Time::Unit', 'unit')], is_const=True) return def register_Ns3TraceSourceAccessor_methods(root_module, cls): ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')]) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor] cls.add_constructor([]) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Connect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('ConnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Disconnect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('DisconnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3TriangularRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::TriangularRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable::TriangularRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMin() const [member function] cls.add_method('GetMin', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMax() const [member function] cls.add_method('GetMax', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue(double mean, double min, double max) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'min'), param('double', 'max')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger(uint32_t mean, uint32_t min, uint32_t max) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'min'), param('uint32_t', 'max')]) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3TwoRayGroundPropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::TwoRayGroundPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::TwoRayGroundPropagationLossModel::TwoRayGroundPropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetFrequency(double frequency) [member function] cls.add_method('SetFrequency', 'void', [param('double', 'frequency')]) ## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetSystemLoss(double systemLoss) [member function] cls.add_method('SetSystemLoss', 'void', [param('double', 'systemLoss')]) ## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetMinDistance(double minDistance) [member function] cls.add_method('SetMinDistance', 'void', [param('double', 'minDistance')]) ## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetMinDistance() const [member function] cls.add_method('GetMinDistance', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetFrequency() const [member function] cls.add_method('GetFrequency', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetSystemLoss() const [member function] cls.add_method('GetSystemLoss', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetHeightAboveZ(double heightAboveZ) [member function] cls.add_method('SetHeightAboveZ', 'void', [param('double', 'heightAboveZ')]) ## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::TwoRayGroundPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3UniformDiscPositionAllocator_methods(root_module, cls): ## position-allocator.h (module 'mobility'): ns3::UniformDiscPositionAllocator::UniformDiscPositionAllocator(ns3::UniformDiscPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::UniformDiscPositionAllocator const &', 'arg0')]) ## position-allocator.h (module 'mobility'): ns3::UniformDiscPositionAllocator::UniformDiscPositionAllocator() [constructor] cls.add_constructor([]) ## position-allocator.h (module 'mobility'): int64_t ns3::UniformDiscPositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## position-allocator.h (module 'mobility'): ns3::Vector ns3::UniformDiscPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::UniformDiscPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## position-allocator.h (module 'mobility'): void ns3::UniformDiscPositionAllocator::SetRho(double rho) [member function] cls.add_method('SetRho', 'void', [param('double', 'rho')]) ## position-allocator.h (module 'mobility'): void ns3::UniformDiscPositionAllocator::SetX(double x) [member function] cls.add_method('SetX', 'void', [param('double', 'x')]) ## position-allocator.h (module 'mobility'): void ns3::UniformDiscPositionAllocator::SetY(double y) [member function] cls.add_method('SetY', 'void', [param('double', 'y')]) return def register_Ns3UniformRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::UniformRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable::UniformRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMin() const [member function] cls.add_method('GetMin', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMax() const [member function] cls.add_method('GetMax', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue(double min, double max) [member function] cls.add_method('GetValue', 'double', [param('double', 'min'), param('double', 'max')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger(uint32_t min, uint32_t max) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'min'), param('uint32_t', 'max')]) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3WeibullRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::WeibullRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable::WeibullRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetScale() const [member function] cls.add_method('GetScale', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetShape() const [member function] cls.add_method('GetShape', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue(double scale, double shape, double bound) [member function] cls.add_method('GetValue', 'double', [param('double', 'scale'), param('double', 'shape'), param('double', 'bound')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ZetaRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZetaRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable::ZetaRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetAlpha() const [member function] cls.add_method('GetAlpha', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue(double alpha) [member function] cls.add_method('GetValue', 'double', [param('double', 'alpha')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger(uint32_t alpha) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'alpha')]) ## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ZipfRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZipfRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable::ZipfRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetAlpha() const [member function] cls.add_method('GetAlpha', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue(uint32_t n, double alpha) [member function] cls.add_method('GetValue', 'double', [param('uint32_t', 'n'), param('double', 'alpha')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger(uint32_t n, uint32_t alpha) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'n'), param('uint32_t', 'alpha')]) ## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3AttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function] cls.add_method('CreateValidValue', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::AttributeValue const &', 'value')], is_const=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3BoxChecker_methods(root_module, cls): ## box.h (module 'mobility'): ns3::BoxChecker::BoxChecker() [constructor] cls.add_constructor([]) ## box.h (module 'mobility'): ns3::BoxChecker::BoxChecker(ns3::BoxChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::BoxChecker const &', 'arg0')]) return def register_Ns3BoxValue_methods(root_module, cls): ## box.h (module 'mobility'): ns3::BoxValue::BoxValue() [constructor] cls.add_constructor([]) ## box.h (module 'mobility'): ns3::BoxValue::BoxValue(ns3::BoxValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::BoxValue const &', 'arg0')]) ## box.h (module 'mobility'): ns3::BoxValue::BoxValue(ns3::Box const & value) [constructor] cls.add_constructor([param('ns3::Box const &', 'value')]) ## box.h (module 'mobility'): ns3::Ptr<ns3::AttributeValue> ns3::BoxValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## box.h (module 'mobility'): bool ns3::BoxValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## box.h (module 'mobility'): ns3::Box ns3::BoxValue::Get() const [member function] cls.add_method('Get', 'ns3::Box', [], is_const=True) ## box.h (module 'mobility'): std::string ns3::BoxValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## box.h (module 'mobility'): void ns3::BoxValue::Set(ns3::Box const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Box const &', 'value')]) return def register_Ns3Building_methods(root_module, cls): ## building.h (module 'buildings'): ns3::Building::Building(ns3::Building const & arg0) [copy constructor] cls.add_constructor([param('ns3::Building const &', 'arg0')]) ## building.h (module 'buildings'): ns3::Building::Building(double xMin, double xMax, double yMin, double yMax, double zMin, double zMax) [constructor] cls.add_constructor([param('double', 'xMin'), param('double', 'xMax'), param('double', 'yMin'), param('double', 'yMax'), param('double', 'zMin'), param('double', 'zMax')]) ## building.h (module 'buildings'): ns3::Building::Building() [constructor] cls.add_constructor([]) ## building.h (module 'buildings'): void ns3::Building::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], is_virtual=True) ## building.h (module 'buildings'): ns3::Box ns3::Building::GetBoundaries() const [member function] cls.add_method('GetBoundaries', 'ns3::Box', [], is_const=True) ## building.h (module 'buildings'): ns3::Building::BuildingType_t ns3::Building::GetBuildingType() const [member function] cls.add_method('GetBuildingType', 'ns3::Building::BuildingType_t', [], is_const=True) ## building.h (module 'buildings'): ns3::Building::ExtWallsType_t ns3::Building::GetExtWallsType() const [member function] cls.add_method('GetExtWallsType', 'ns3::Building::ExtWallsType_t', [], is_const=True) ## building.h (module 'buildings'): uint16_t ns3::Building::GetFloor(ns3::Vector position) const [member function] cls.add_method('GetFloor', 'uint16_t', [param('ns3::Vector', 'position')], is_const=True) ## building.h (module 'buildings'): uint32_t ns3::Building::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## building.h (module 'buildings'): uint16_t ns3::Building::GetNFloors() const [member function] cls.add_method('GetNFloors', 'uint16_t', [], is_const=True) ## building.h (module 'buildings'): uint16_t ns3::Building::GetNRoomsX() const [member function] cls.add_method('GetNRoomsX', 'uint16_t', [], is_const=True) ## building.h (module 'buildings'): uint16_t ns3::Building::GetNRoomsY() const [member function] cls.add_method('GetNRoomsY', 'uint16_t', [], is_const=True) ## building.h (module 'buildings'): uint16_t ns3::Building::GetRoomX(ns3::Vector position) const [member function] cls.add_method('GetRoomX', 'uint16_t', [param('ns3::Vector', 'position')], is_const=True) ## building.h (module 'buildings'): uint16_t ns3::Building::GetRoomY(ns3::Vector position) const [member function] cls.add_method('GetRoomY', 'uint16_t', [param('ns3::Vector', 'position')], is_const=True) ## building.h (module 'buildings'): static ns3::TypeId ns3::Building::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## building.h (module 'buildings'): bool ns3::Building::IsInside(ns3::Vector position) const [member function] cls.add_method('IsInside', 'bool', [param('ns3::Vector', 'position')], is_const=True) ## building.h (module 'buildings'): void ns3::Building::SetBoundaries(ns3::Box box) [member function] cls.add_method('SetBoundaries', 'void', [param('ns3::Box', 'box')]) ## building.h (module 'buildings'): void ns3::Building::SetBuildingType(ns3::Building::BuildingType_t t) [member function] cls.add_method('SetBuildingType', 'void', [param('ns3::Building::BuildingType_t', 't')]) ## building.h (module 'buildings'): void ns3::Building::SetExtWallsType(ns3::Building::ExtWallsType_t t) [member function] cls.add_method('SetExtWallsType', 'void', [param('ns3::Building::ExtWallsType_t', 't')]) ## building.h (module 'buildings'): void ns3::Building::SetNFloors(uint16_t nfloors) [member function] cls.add_method('SetNFloors', 'void', [param('uint16_t', 'nfloors')]) ## building.h (module 'buildings'): void ns3::Building::SetNRoomsX(uint16_t nroomx) [member function] cls.add_method('SetNRoomsX', 'void', [param('uint16_t', 'nroomx')]) ## building.h (module 'buildings'): void ns3::Building::SetNRoomsY(uint16_t nroomy) [member function] cls.add_method('SetNRoomsY', 'void', [param('uint16_t', 'nroomy')]) return def register_Ns3BuildingsPropagationLossModel_methods(root_module, cls): ## buildings-propagation-loss-model.h (module 'buildings'): ns3::BuildingsPropagationLossModel::BuildingsPropagationLossModel() [constructor] cls.add_constructor([]) ## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True) ## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetLoss', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_pure_virtual=True, is_const=True, is_virtual=True) ## buildings-propagation-loss-model.h (module 'buildings'): static ns3::TypeId ns3::BuildingsPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## buildings-propagation-loss-model.h (module 'buildings'): int64_t ns3::BuildingsPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='protected', is_virtual=True) ## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::EvaluateSigma(ns3::Ptr<ns3::MobilityBuildingInfo> a, ns3::Ptr<ns3::MobilityBuildingInfo> b) const [member function] cls.add_method('EvaluateSigma', 'double', [param('ns3::Ptr< ns3::MobilityBuildingInfo >', 'a'), param('ns3::Ptr< ns3::MobilityBuildingInfo >', 'b')], is_const=True, visibility='protected') ## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::ExternalWallLoss(ns3::Ptr<ns3::MobilityBuildingInfo> a) const [member function] cls.add_method('ExternalWallLoss', 'double', [param('ns3::Ptr< ns3::MobilityBuildingInfo >', 'a')], is_const=True, visibility='protected') ## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::GetShadowing(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetShadowing', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='protected') ## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::HeightLoss(ns3::Ptr<ns3::MobilityBuildingInfo> n) const [member function] cls.add_method('HeightLoss', 'double', [param('ns3::Ptr< ns3::MobilityBuildingInfo >', 'n')], is_const=True, visibility='protected') ## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::InternalWallsLoss(ns3::Ptr<ns3::MobilityBuildingInfo> a, ns3::Ptr<ns3::MobilityBuildingInfo> b) const [member function] cls.add_method('InternalWallsLoss', 'double', [param('ns3::Ptr< ns3::MobilityBuildingInfo >', 'a'), param('ns3::Ptr< ns3::MobilityBuildingInfo >', 'b')], is_const=True, visibility='protected') return def register_Ns3CallbackChecker_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')]) return def register_Ns3CallbackImplBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')]) ## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackValue_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'base')]) ## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function] cls.add_method('Set', 'void', [param('ns3::CallbackBase', 'base')]) return def register_Ns3ConstantRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ConstantRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable::ConstantRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetConstant() const [member function] cls.add_method('GetConstant', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue(double constant) [member function] cls.add_method('GetValue', 'double', [param('double', 'constant')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger(uint32_t constant) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'constant')]) ## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3DeterministicRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::DeterministicRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable::DeterministicRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): void ns3::DeterministicRandomVariable::SetValueArray(double * values, uint64_t length) [member function] cls.add_method('SetValueArray', 'void', [param('double *', 'values'), param('uint64_t', 'length')]) ## random-variable-stream.h (module 'core'): double ns3::DeterministicRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::DeterministicRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3EmpiricalRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable::EmpiricalRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::CDF(double v, double c) [member function] cls.add_method('CDF', 'void', [param('double', 'v'), param('double', 'c')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::EmpiricalRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::EmpiricalRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::Interpolate(double arg0, double arg1, double arg2, double arg3, double arg4) [member function] cls.add_method('Interpolate', 'double', [param('double', 'arg0'), param('double', 'arg1'), param('double', 'arg2'), param('double', 'arg3'), param('double', 'arg4')], visibility='private', is_virtual=True) ## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::Validate() [member function] cls.add_method('Validate', 'void', [], visibility='private', is_virtual=True) return def register_Ns3EmptyAttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, visibility='private', is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], visibility='private', is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3ErlangRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ErlangRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable::ErlangRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetK() const [member function] cls.add_method('GetK', 'uint32_t', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetLambda() const [member function] cls.add_method('GetLambda', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue(uint32_t k, double lambda) [member function] cls.add_method('GetValue', 'double', [param('uint32_t', 'k'), param('double', 'lambda')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger(uint32_t k, uint32_t lambda) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'k'), param('uint32_t', 'lambda')]) ## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ExponentialRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ExponentialRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable::ExponentialRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue(double mean, double bound) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'bound')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger(uint32_t mean, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3FixedRoomPositionAllocator_methods(root_module, cls): ## building-position-allocator.h (module 'buildings'): ns3::FixedRoomPositionAllocator::FixedRoomPositionAllocator(ns3::FixedRoomPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::FixedRoomPositionAllocator const &', 'arg0')]) ## building-position-allocator.h (module 'buildings'): ns3::FixedRoomPositionAllocator::FixedRoomPositionAllocator(uint32_t x, uint32_t y, uint32_t z, ns3::Ptr<ns3::Building> b) [constructor] cls.add_constructor([param('uint32_t', 'x'), param('uint32_t', 'y'), param('uint32_t', 'z'), param('ns3::Ptr< ns3::Building >', 'b')]) ## building-position-allocator.h (module 'buildings'): int64_t ns3::FixedRoomPositionAllocator::AssignStreams(int64_t arg0) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'arg0')], is_virtual=True) ## building-position-allocator.h (module 'buildings'): ns3::Vector ns3::FixedRoomPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## building-position-allocator.h (module 'buildings'): static ns3::TypeId ns3::FixedRoomPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3FixedRssLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::FixedRssLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::FixedRssLossModel::FixedRssLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::FixedRssLossModel::SetRss(double rss) [member function] cls.add_method('SetRss', 'void', [param('double', 'rss')]) ## propagation-loss-model.h (module 'propagation'): double ns3::FixedRssLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::FixedRssLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3FriisPropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::FriisPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::FriisPropagationLossModel::FriisPropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetFrequency(double frequency) [member function] cls.add_method('SetFrequency', 'void', [param('double', 'frequency')]) ## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetSystemLoss(double systemLoss) [member function] cls.add_method('SetSystemLoss', 'void', [param('double', 'systemLoss')]) ## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetMinLoss(double minLoss) [member function] cls.add_method('SetMinLoss', 'void', [param('double', 'minLoss')]) ## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetMinLoss() const [member function] cls.add_method('GetMinLoss', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetFrequency() const [member function] cls.add_method('GetFrequency', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetSystemLoss() const [member function] cls.add_method('GetSystemLoss', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::FriisPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3GammaRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::GammaRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable::GammaRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetAlpha() const [member function] cls.add_method('GetAlpha', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetBeta() const [member function] cls.add_method('GetBeta', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue(double alpha, double beta) [member function] cls.add_method('GetValue', 'double', [param('double', 'alpha'), param('double', 'beta')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger(uint32_t alpha, uint32_t beta) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'alpha'), param('uint32_t', 'beta')]) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3GridBuildingAllocator_methods(root_module, cls): ## building-allocator.h (module 'buildings'): ns3::GridBuildingAllocator::GridBuildingAllocator(ns3::GridBuildingAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::GridBuildingAllocator const &', 'arg0')]) ## building-allocator.h (module 'buildings'): ns3::GridBuildingAllocator::GridBuildingAllocator() [constructor] cls.add_constructor([]) ## building-allocator.h (module 'buildings'): ns3::BuildingContainer ns3::GridBuildingAllocator::Create(uint32_t n) const [member function] cls.add_method('Create', 'ns3::BuildingContainer', [param('uint32_t', 'n')], is_const=True) ## building-allocator.h (module 'buildings'): static ns3::TypeId ns3::GridBuildingAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## building-allocator.h (module 'buildings'): void ns3::GridBuildingAllocator::SetBuildingAttribute(std::string n, ns3::AttributeValue const & v) [member function] cls.add_method('SetBuildingAttribute', 'void', [param('std::string', 'n'), param('ns3::AttributeValue const &', 'v')]) return def register_Ns3GridPositionAllocator_methods(root_module, cls): ## position-allocator.h (module 'mobility'): ns3::GridPositionAllocator::GridPositionAllocator(ns3::GridPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::GridPositionAllocator const &', 'arg0')]) ## position-allocator.h (module 'mobility'): ns3::GridPositionAllocator::GridPositionAllocator() [constructor] cls.add_constructor([]) ## position-allocator.h (module 'mobility'): int64_t ns3::GridPositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## position-allocator.h (module 'mobility'): double ns3::GridPositionAllocator::GetDeltaX() const [member function] cls.add_method('GetDeltaX', 'double', [], is_const=True) ## position-allocator.h (module 'mobility'): double ns3::GridPositionAllocator::GetDeltaY() const [member function] cls.add_method('GetDeltaY', 'double', [], is_const=True) ## position-allocator.h (module 'mobility'): ns3::GridPositionAllocator::LayoutType ns3::GridPositionAllocator::GetLayoutType() const [member function] cls.add_method('GetLayoutType', 'ns3::GridPositionAllocator::LayoutType', [], is_const=True) ## position-allocator.h (module 'mobility'): double ns3::GridPositionAllocator::GetMinX() const [member function] cls.add_method('GetMinX', 'double', [], is_const=True) ## position-allocator.h (module 'mobility'): double ns3::GridPositionAllocator::GetMinY() const [member function] cls.add_method('GetMinY', 'double', [], is_const=True) ## position-allocator.h (module 'mobility'): uint32_t ns3::GridPositionAllocator::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) ## position-allocator.h (module 'mobility'): ns3::Vector ns3::GridPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::GridPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetDeltaX(double deltaX) [member function] cls.add_method('SetDeltaX', 'void', [param('double', 'deltaX')]) ## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetDeltaY(double deltaY) [member function] cls.add_method('SetDeltaY', 'void', [param('double', 'deltaY')]) ## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetLayoutType(ns3::GridPositionAllocator::LayoutType layoutType) [member function] cls.add_method('SetLayoutType', 'void', [param('ns3::GridPositionAllocator::LayoutType', 'layoutType')]) ## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetMinX(double xMin) [member function] cls.add_method('SetMinX', 'void', [param('double', 'xMin')]) ## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetMinY(double yMin) [member function] cls.add_method('SetMinY', 'void', [param('double', 'yMin')]) ## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetN(uint32_t n) [member function] cls.add_method('SetN', 'void', [param('uint32_t', 'n')]) return def register_Ns3HybridBuildingsPropagationLossModel_methods(root_module, cls): ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): static ns3::TypeId ns3::HybridBuildingsPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): ns3::HybridBuildingsPropagationLossModel::HybridBuildingsPropagationLossModel() [constructor] cls.add_constructor([]) ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): void ns3::HybridBuildingsPropagationLossModel::SetEnvironment(ns3::EnvironmentType env) [member function] cls.add_method('SetEnvironment', 'void', [param('ns3::EnvironmentType', 'env')]) ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): void ns3::HybridBuildingsPropagationLossModel::SetCitySize(ns3::CitySize size) [member function] cls.add_method('SetCitySize', 'void', [param('ns3::CitySize', 'size')]) ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): void ns3::HybridBuildingsPropagationLossModel::SetFrequency(double freq) [member function] cls.add_method('SetFrequency', 'void', [param('double', 'freq')]) ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): void ns3::HybridBuildingsPropagationLossModel::SetRooftopHeight(double rooftopHeight) [member function] cls.add_method('SetRooftopHeight', 'void', [param('double', 'rooftopHeight')]) ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): double ns3::HybridBuildingsPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetLoss', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True) return def register_Ns3Ipv4AddressChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')]) return def register_Ns3Ipv4AddressValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Address const &', 'value')]) return def register_Ns3Ipv4MaskChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')]) return def register_Ns3Ipv4MaskValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Mask const &', 'value')]) return def register_Ns3Ipv6AddressChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')]) return def register_Ns3Ipv6AddressValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Address', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Address const &', 'value')]) return def register_Ns3Ipv6PrefixChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')]) return def register_Ns3Ipv6PrefixValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Prefix', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Prefix const &', 'value')]) return def register_Ns3ItuR1238PropagationLossModel_methods(root_module, cls): ## itu-r-1238-propagation-loss-model.h (module 'buildings'): ns3::ItuR1238PropagationLossModel::ItuR1238PropagationLossModel() [constructor] cls.add_constructor([]) ## itu-r-1238-propagation-loss-model.h (module 'buildings'): static ns3::TypeId ns3::ItuR1238PropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## itu-r-1238-propagation-loss-model.h (module 'buildings'): double ns3::ItuR1238PropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetLoss', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True) ## itu-r-1238-propagation-loss-model.h (module 'buildings'): double ns3::ItuR1238PropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## itu-r-1238-propagation-loss-model.h (module 'buildings'): int64_t ns3::ItuR1238PropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3ListPositionAllocator_methods(root_module, cls): ## position-allocator.h (module 'mobility'): ns3::ListPositionAllocator::ListPositionAllocator(ns3::ListPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ListPositionAllocator const &', 'arg0')]) ## position-allocator.h (module 'mobility'): ns3::ListPositionAllocator::ListPositionAllocator() [constructor] cls.add_constructor([]) ## position-allocator.h (module 'mobility'): void ns3::ListPositionAllocator::Add(ns3::Vector v) [member function] cls.add_method('Add', 'void', [param('ns3::Vector', 'v')]) ## position-allocator.h (module 'mobility'): int64_t ns3::ListPositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## position-allocator.h (module 'mobility'): ns3::Vector ns3::ListPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::ListPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3LogDistancePropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::LogDistancePropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::LogDistancePropagationLossModel::LogDistancePropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::LogDistancePropagationLossModel::SetPathLossExponent(double n) [member function] cls.add_method('SetPathLossExponent', 'void', [param('double', 'n')]) ## propagation-loss-model.h (module 'propagation'): double ns3::LogDistancePropagationLossModel::GetPathLossExponent() const [member function] cls.add_method('GetPathLossExponent', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): void ns3::LogDistancePropagationLossModel::SetReference(double referenceDistance, double referenceLoss) [member function] cls.add_method('SetReference', 'void', [param('double', 'referenceDistance'), param('double', 'referenceLoss')]) ## propagation-loss-model.h (module 'propagation'): double ns3::LogDistancePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::LogDistancePropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3LogNormalRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::LogNormalRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable::LogNormalRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetMu() const [member function] cls.add_method('GetMu', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetSigma() const [member function] cls.add_method('GetSigma', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue(double mu, double sigma) [member function] cls.add_method('GetValue', 'double', [param('double', 'mu'), param('double', 'sigma')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger(uint32_t mu, uint32_t sigma) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mu'), param('uint32_t', 'sigma')]) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3MatrixPropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::MatrixPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::MatrixPropagationLossModel::MatrixPropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::MatrixPropagationLossModel::SetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, double loss, bool symmetric=true) [member function] cls.add_method('SetLoss', 'void', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('double', 'loss'), param('bool', 'symmetric', default_value='true')]) ## propagation-loss-model.h (module 'propagation'): void ns3::MatrixPropagationLossModel::SetDefaultLoss(double defaultLoss) [member function] cls.add_method('SetDefaultLoss', 'void', [param('double', 'defaultLoss')]) ## propagation-loss-model.h (module 'propagation'): double ns3::MatrixPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::MatrixPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3MobilityBuildingInfo_methods(root_module, cls): ## mobility-building-info.h (module 'buildings'): ns3::MobilityBuildingInfo::MobilityBuildingInfo(ns3::MobilityBuildingInfo const & arg0) [copy constructor] cls.add_constructor([param('ns3::MobilityBuildingInfo const &', 'arg0')]) ## mobility-building-info.h (module 'buildings'): ns3::MobilityBuildingInfo::MobilityBuildingInfo() [constructor] cls.add_constructor([]) ## mobility-building-info.h (module 'buildings'): ns3::MobilityBuildingInfo::MobilityBuildingInfo(ns3::Ptr<ns3::Building> building) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Building >', 'building')]) ## mobility-building-info.h (module 'buildings'): ns3::Ptr<ns3::Building> ns3::MobilityBuildingInfo::GetBuilding() [member function] cls.add_method('GetBuilding', 'ns3::Ptr< ns3::Building >', []) ## mobility-building-info.h (module 'buildings'): uint8_t ns3::MobilityBuildingInfo::GetFloorNumber() [member function] cls.add_method('GetFloorNumber', 'uint8_t', []) ## mobility-building-info.h (module 'buildings'): uint8_t ns3::MobilityBuildingInfo::GetRoomNumberX() [member function] cls.add_method('GetRoomNumberX', 'uint8_t', []) ## mobility-building-info.h (module 'buildings'): uint8_t ns3::MobilityBuildingInfo::GetRoomNumberY() [member function] cls.add_method('GetRoomNumberY', 'uint8_t', []) ## mobility-building-info.h (module 'buildings'): static ns3::TypeId ns3::MobilityBuildingInfo::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## mobility-building-info.h (module 'buildings'): bool ns3::MobilityBuildingInfo::IsIndoor() [member function] cls.add_method('IsIndoor', 'bool', []) ## mobility-building-info.h (module 'buildings'): bool ns3::MobilityBuildingInfo::IsOutdoor() [member function] cls.add_method('IsOutdoor', 'bool', []) ## mobility-building-info.h (module 'buildings'): void ns3::MobilityBuildingInfo::SetIndoor(ns3::Ptr<ns3::Building> building, uint8_t nfloor, uint8_t nroomx, uint8_t nroomy) [member function] cls.add_method('SetIndoor', 'void', [param('ns3::Ptr< ns3::Building >', 'building'), param('uint8_t', 'nfloor'), param('uint8_t', 'nroomx'), param('uint8_t', 'nroomy')]) ## mobility-building-info.h (module 'buildings'): void ns3::MobilityBuildingInfo::SetIndoor(uint8_t nfloor, uint8_t nroomx, uint8_t nroomy) [member function] cls.add_method('SetIndoor', 'void', [param('uint8_t', 'nfloor'), param('uint8_t', 'nroomx'), param('uint8_t', 'nroomy')]) ## mobility-building-info.h (module 'buildings'): void ns3::MobilityBuildingInfo::SetOutdoor() [member function] cls.add_method('SetOutdoor', 'void', []) return def register_Ns3NakagamiPropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::NakagamiPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::NakagamiPropagationLossModel::NakagamiPropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): double ns3::NakagamiPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::NakagamiPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3NetDevice_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDevice const &', 'arg0')]) ## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Node_methods(root_module, cls): ## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor] cls.add_constructor([param('ns3::Node const &', 'arg0')]) ## node.h (module 'network'): ns3::Node::Node() [constructor] cls.add_constructor([]) ## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor] cls.add_constructor([param('uint32_t', 'systemId')]) ## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function] cls.add_method('AddApplication', 'uint32_t', [param('ns3::Ptr< ns3::Application >', 'application')]) ## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddDevice', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function] cls.add_method('ChecksumEnabled', 'bool', [], is_static=True) ## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function] cls.add_method('GetApplication', 'ns3::Ptr< ns3::Application >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function] cls.add_method('GetNApplications', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('RegisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function] cls.add_method('RegisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')]) ## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('UnregisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function] cls.add_method('UnregisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')]) ## node.h (module 'network'): void ns3::Node::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## node.h (module 'network'): void ns3::Node::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3NormalRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::INFINITE_VALUE [variable] cls.add_static_attribute('INFINITE_VALUE', 'double const', is_const=True) ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::NormalRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::NormalRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetVariance() const [member function] cls.add_method('GetVariance', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue(double mean, double variance, double bound=ns3::NormalRandomVariable::INFINITE_VALUE) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'variance'), param('double', 'bound', default_value='ns3::NormalRandomVariable::INFINITE_VALUE')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger(uint32_t mean, uint32_t variance, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'variance'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ObjectFactoryChecker_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')]) return def register_Ns3ObjectFactoryValue_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'value')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function] cls.add_method('Get', 'ns3::ObjectFactory', [], is_const=True) ## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function] cls.add_method('Set', 'void', [param('ns3::ObjectFactory const &', 'value')]) return def register_Ns3OhBuildingsPropagationLossModel_methods(root_module, cls): ## oh-buildings-propagation-loss-model.h (module 'buildings'): static ns3::TypeId ns3::OhBuildingsPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## oh-buildings-propagation-loss-model.h (module 'buildings'): ns3::OhBuildingsPropagationLossModel::OhBuildingsPropagationLossModel() [constructor] cls.add_constructor([]) ## oh-buildings-propagation-loss-model.h (module 'buildings'): double ns3::OhBuildingsPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetLoss', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True) return def register_Ns3ParetoRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ParetoRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable::ParetoRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetShape() const [member function] cls.add_method('GetShape', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue(double mean, double shape, double bound) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'shape'), param('double', 'bound')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger(uint32_t mean, uint32_t shape, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'shape'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3TimeValue_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeValue const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor] cls.add_constructor([param('ns3::Time const &', 'value')]) ## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function] cls.add_method('Get', 'ns3::Time', [], is_const=True) ## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Time const &', 'value')]) return def register_Ns3TypeIdChecker_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')]) return def register_Ns3TypeIdValue_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor] cls.add_constructor([param('ns3::TypeId const &', 'value')]) ## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function] cls.add_method('Get', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function] cls.add_method('Set', 'void', [param('ns3::TypeId const &', 'value')]) return def register_Ns3Vector2DChecker_methods(root_module, cls): ## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker(ns3::Vector2DChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector2DChecker const &', 'arg0')]) return def register_Ns3Vector2DValue_methods(root_module, cls): ## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2DValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector2DValue const &', 'arg0')]) ## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2D const & value) [constructor] cls.add_constructor([param('ns3::Vector2D const &', 'value')]) ## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector2DValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## vector.h (module 'core'): bool ns3::Vector2DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## vector.h (module 'core'): ns3::Vector2D ns3::Vector2DValue::Get() const [member function] cls.add_method('Get', 'ns3::Vector2D', [], is_const=True) ## vector.h (module 'core'): std::string ns3::Vector2DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## vector.h (module 'core'): void ns3::Vector2DValue::Set(ns3::Vector2D const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Vector2D const &', 'value')]) return def register_Ns3Vector3DChecker_methods(root_module, cls): ## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker(ns3::Vector3DChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector3DChecker const &', 'arg0')]) return def register_Ns3Vector3DValue_methods(root_module, cls): ## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3DValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector3DValue const &', 'arg0')]) ## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3D const & value) [constructor] cls.add_constructor([param('ns3::Vector3D const &', 'value')]) ## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector3DValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## vector.h (module 'core'): bool ns3::Vector3DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## vector.h (module 'core'): ns3::Vector3D ns3::Vector3DValue::Get() const [member function] cls.add_method('Get', 'ns3::Vector3D', [], is_const=True) ## vector.h (module 'core'): std::string ns3::Vector3DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## vector.h (module 'core'): void ns3::Vector3DValue::Set(ns3::Vector3D const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Vector3D const &', 'value')]) return def register_Ns3AddressChecker_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')]) return def register_Ns3AddressValue_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressValue const &', 'arg0')]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor] cls.add_constructor([param('ns3::Address const &', 'value')]) ## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Address', [], is_const=True) ## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Address const &', 'value')]) return def register_Ns3HashImplementation_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor] cls.add_constructor([]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_pure_virtual=True, is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function] cls.add_method('clear', 'void', [], is_pure_virtual=True, is_virtual=True) return def register_Ns3HashFunctionFnv1a_methods(root_module, cls): ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')]) ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor] cls.add_constructor([]) ## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash32_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash64_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionMurmur3_methods(root_module, cls): ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')]) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor] cls.add_constructor([]) ## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_functions(root_module): module = root_module register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) register_functions_ns3_Hash(module.get_submodule('Hash'), root_module) return def register_functions_ns3_FatalImpl(module, root_module): return def register_functions_ns3_Hash(module, root_module): register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module) return def register_functions_ns3_Hash_Function(module, root_module): return def main(): out = FileCodeSink(sys.stdout) root_module = module_init() register_types(root_module) register_methods(root_module) register_functions(root_module) root_module.generate(out) if __name__ == '__main__': main()
h4ck3rm1k3/pywikibot-core
refs/heads/master
tests/timestripper_tests.py
3
# -*- coding: utf-8 -*- """Tests for archivebot.py/Timestripper.""" # # (C) Pywikibot team, 2014 # # Distributed under the terms of the MIT license. # from __future__ import absolute_import, unicode_literals __version__ = '$Id$' import datetime from pywikibot.textlib import TimeStripper, tzoneFixedOffset from tests.aspects import ( unittest, TestCase, DefaultSiteTestCase, DeprecationTestCase, ) class TestTimeStripperCase(TestCase): """Basic class to test the TimeStripper class.""" cached = True def setUp(self): """Set up test cases.""" super(TestTimeStripperCase, self).setUp() self.ts = TimeStripper(self.get_site()) class DeprecatedTestTimeStripperCase(TestTimeStripperCase, DeprecationTestCase, DefaultSiteTestCase): """Test deprecated parts of the TimeStripper class.""" def test_findmarker(self): """Test that string which is not part of text is found.""" txt = u'this is a string with a maker is @@@@already present' self.assertEqual(self.ts.findmarker(txt, base=u'@@', delta='@@'), '@@@@@@') self.assertOneDeprecation() class TestTimeStripperWithNoDigitsAsMonths(TestTimeStripperCase): """Test cases for TimeStripper methods.""" family = 'wikipedia' code = 'fr' def test_last_match_and_replace(self): """Test that pattern matches and removes items correctly.""" txtWithOneMatch = u'this string has 3000, 1999 and 3000 in it' txtWithTwoMatch = u'this string has 1998, 1999 and 3000 in it' txtWithNoMatch = u'this string has no match' pat = self.ts.pyearR self.assertEqual(self.ts.last_match_and_replace(txtWithOneMatch, pat), (u'this string has 3000, @@ and 3000 in it', {'year': u'1999'}) ) self.assertEqual(self.ts.last_match_and_replace(txtWithTwoMatch, pat), (u'this string has @@, @@ and 3000 in it', {'year': u'1999'}) ) self.assertEqual(self.ts.last_match_and_replace(txtWithNoMatch, pat), (txtWithNoMatch, None) ) txtWithOneMatch = u'this string has XXX, YYY and février in it' txtWithTwoMatch = u'this string has XXX, mars and février in it' txtWithThreeMatch = u'this string has avr, mars and février in it' txtWithNoMatch = u'this string has no match' pat = self.ts.pmonthR self.assertEqual(self.ts.last_match_and_replace(txtWithOneMatch, pat), (u'this string has XXX, YYY and @@ in it', {'month': u'février'}) ) self.assertEqual(self.ts.last_match_and_replace(txtWithTwoMatch, pat), (u'this string has XXX, @@ and @@ in it', {'month': u'février'}) ) self.assertEqual(self.ts.last_match_and_replace(txtWithThreeMatch, pat), (u'this string has @@, @@ and @@ in it', {'month': u'février'}) ) self.assertEqual(self.ts.last_match_and_replace(txtWithNoMatch, pat), (txtWithNoMatch, None) ) def test_hour(self): """Test that correct hour is matched.""" txtHourInRange = u'7 février 2010 à 23:00 (CET)' txtHourOutOfRange = u'7 février 2010 à 24:00 (CET)' self.assertNotEqual(self.ts.timestripper(txtHourInRange), None) self.assertEqual(self.ts.timestripper(txtHourOutOfRange), None) class TestTimeStripperWithDigitsAsMonths(TestTimeStripperCase): """Test cases for TimeStripper methods.""" family = 'wikipedia' code = 'cs' def test_last_match_and_replace(self): """Test that pattern matches and removes items correctly.""" txtWithOneMatch = u'this string has XX. YY. 12. in it' txtWithTwoMatch = u'this string has XX. 1. 12. in it' txtWithThreeMatch = u'this string has 1. 1. 12. in it' txtWithNoMatch = u'this string has no match' pat = self.ts.pmonthR self.assertEqual(self.ts.last_match_and_replace(txtWithOneMatch, pat), (u'this string has XX. YY. 12. in it', {'month': u'12.'}) ) self.assertEqual(self.ts.last_match_and_replace(txtWithTwoMatch, pat), (u'this string has XX. 1. 12. in it', {'month': u'12.'}) ) self.assertEqual(self.ts.last_match_and_replace(txtWithThreeMatch, pat), (u'this string has @@ 1. 12. in it', {'month': u'12.'}) ) self.assertEqual(self.ts.last_match_and_replace(txtWithNoMatch, pat), (txtWithNoMatch, None) ) class TestTimeStripperLanguage(TestCase): """Test cases for English language.""" sites = { 'cswiki': { 'family': 'wikipedia', 'code': 'cs', 'match': u'3. 2. 2011, 19:48 (UTC) 7. 2. 2010 19:48 (UTC)', }, 'enwiki': { 'family': 'wikipedia', 'code': 'en', 'match': u'3 February 2011 19:48 (UTC) 7 February 2010 19:48 (UTC)', 'nomatch': u'3. 2. 2011, 19:48 (UTC) 7. 2. 2010 19:48 (UTC)', }, 'fawiki': { 'family': 'wikipedia', 'code': 'fa', 'match': u'۳ فوریهٔ ۲۰۱۱، ساعت ۱۹:۴۸ (UTC) ۷ فوریهٔ ۲۰۱۰، ساعت ۱۹:۴۸ (UTC)', 'nomatch': u'۳ ۲ ۲۰۱۴ ۱۹:۴۸ (UTC) ۷ ۲ ۲۰۱۰ ۱۹:۴۸ (UTC)', }, 'frwiki': { 'family': 'wikipedia', 'code': 'fr', 'match': u'3 février 2011 à 19:48 (CET) 7 février 2010 à 19:48 (CET)', 'nomatch': u'3 March 2011 19:48 (CET) 7 March 2010 19:48 (CET)', }, 'kowiki': { 'family': 'wikipedia', 'code': 'ko', 'match': u'2011년 2월 3일 (수) 19:48 (KST) 2010년 2월 7일 (수) 19:48 (KST)', }, 'nowiki': { 'family': 'wikipedia', 'code': 'no', 'match': u'3. feb 2011 kl. 19:48 (CET) 7. feb 2010 kl. 19:48 (UTC)', }, 'ptwiki': { 'family': 'wikipedia', 'code': 'pt', 'match': '19h48min de 3 de fevereiro de 2011‎ (UTC) 19h48min ' 'de 7 de fevereiro de 2010‎ (UTC)', }, 'viwiki': { 'family': 'wikipedia', 'code': 'vi', 'match': '19:48, ngày 3 tháng 2 năm 2011 (UTC) 19:48, ngày 7 tháng 2 năm 2010 (UTC)', 'match2': '16:41, ngày 15 tháng 9 năm 2001 (UTC) 16:41, ' 'ngày 12 tháng 9 năm 2008 (UTC)', 'match3': '21:18, ngày 13 tháng 8 năm 2011 (UTC) 21:18, ' 'ngày 14 tháng 8 năm 2014 (UTC)', 'nomatch1': '21:18, ngày 13 March 8 năm 2011 (UTC) 21:18, ' 'ngày 14 March 8 năm 2014 (UTC)', }, } cached = True def test_timestripper_match(self, key): """Test that correct date is matched.""" self.ts = TimeStripper(self.get_site(key)) tzone = tzoneFixedOffset(self.ts.site.siteinfo['timeoffset'], self.ts.site.siteinfo['timezone']) txtMatch = self.sites[key]['match'] res = datetime.datetime(2010, 2, 7, 19, 48, tzinfo=tzone) self.assertEqual(self.ts.timestripper(txtMatch), res) if 'match2' not in self.sites[key]: return txtMatch = self.sites[key]['match2'] res = datetime.datetime(2008, 9, 12, 16, 41, tzinfo=tzone) self.assertEqual(self.ts.timestripper(txtMatch), res) if 'match3' not in self.sites[key]: return txtMatch = self.sites[key]['match3'] res = datetime.datetime(2014, 8, 14, 21, 18, tzinfo=tzone) self.assertEqual(self.ts.timestripper(txtMatch), res) def test_timestripper_nomatch(self, key): """Test that correct date is not matched.""" self.ts = TimeStripper(self.get_site(key)) if 'nomatch' in self.sites[key]: txtNoMatch = self.sites[key]['nomatch'] else: txtNoMatch = u'3 March 2011 19:48 (UTC) 7 March 2010 19:48 (UTC)' self.assertEqual(self.ts.timestripper(txtNoMatch), None) if 'nomatch1' not in self.sites[key]: return txtNoMatch = self.sites[key]['nomatch1'] self.assertEqual(self.ts.timestripper(txtNoMatch), None) class TestTimeStripperDoNotArchiveUntil(TestTimeStripperCase): """Test cases for Do Not Archive Until templates. See https://commons.wikimedia.org/wiki/Template:DNAU and https://en.wikipedia.org/wiki/Template:Do_not_archive_until. """ family = 'wikisource' code = 'en' username = '[[User:DoNotArchiveUntil]]' date = '06:57 06 June 2015 (UTC)' user_and_date = username + ' ' + date tzone = tzoneFixedOffset(0, 'UTC') def test_timestripper_match(self): """Test that dates in comments are correctly recognised.""" ts = self.ts txt_match = '<!-- [[User:Do___ArchiveUntil]] ' + self.date + ' -->' res = datetime.datetime(2015, 6, 6, 6, 57, tzinfo=self.tzone) self.assertEqual(ts.timestripper(txt_match), res) txt_match = '<!-- --> <!-- ' + self.user_and_date + ' <!-- -->' res = datetime.datetime(2015, 6, 6, 6, 57, tzinfo=self.tzone) self.assertEqual(ts.timestripper(txt_match), res) txt_match = '<!-- ' + self.user_and_date + ' -->' res = datetime.datetime(2015, 6, 6, 6, 57, tzinfo=self.tzone) self.assertEqual(ts.timestripper(txt_match), res) def test_timestripper_match_only(self): """Test that latest date is used instead of other dates.""" ts = self.ts later_date = '10:57 06 June 2015 (UTC)' txt_match = '<!-- --> ' + self.user_and_date + ' <!-- -->' + later_date res = datetime.datetime(2015, 6, 6, 10, 57, tzinfo=self.tzone) self.assertEqual(ts.timestripper(txt_match), res) earlier_date = '02:57 06 June 2015 (UTC)' txt_match = '<!-- ' + self.user_and_date + ' --> ' + earlier_date res = datetime.datetime(2015, 6, 6, 6, 57, tzinfo=self.tzone) self.assertEqual(ts.timestripper(txt_match), res) if __name__ == '__main__': try: unittest.main() except SystemExit: pass
XiaosongWei/crosswalk-test-suite
refs/heads/master
webapi/webapi-input-html5-tests/inst.xpk.py
456
#!/usr/bin/env python import os import shutil import glob import time import sys import subprocess import string from optparse import OptionParser, make_option SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) PKG_NAME = os.path.basename(SCRIPT_DIR) PARAMETERS = None #XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket" SRC_DIR = "" PKG_SRC_DIR = "" def doCMD(cmd): # Do not need handle timeout in this short script, let tool do it print "-->> \"%s\"" % cmd output = [] cmd_return_code = 1 cmd_proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) while True: output_line = cmd_proc.stdout.readline().strip("\r\n") cmd_return_code = cmd_proc.poll() if output_line == '' and cmd_return_code is not None: break sys.stdout.write("%s\n" % output_line) sys.stdout.flush() output.append(output_line) return (cmd_return_code, output) def updateCMD(cmd=None): if "pkgcmd" in cmd: cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd) return cmd def getUSERID(): if PARAMETERS.mode == "SDB": cmd = "sdb -s %s shell id -u %s" % ( PARAMETERS.device, PARAMETERS.user) else: cmd = "ssh %s \"id -u %s\"" % ( PARAMETERS.device, PARAMETERS.user) return doCMD(cmd) def getPKGID(pkg_name=None): if PARAMETERS.mode == "SDB": cmd = "sdb -s %s shell %s" % ( PARAMETERS.device, updateCMD('pkgcmd -l')) else: cmd = "ssh %s \"%s\"" % ( PARAMETERS.device, updateCMD('pkgcmd -l')) (return_code, output) = doCMD(cmd) if return_code != 0: return None test_pkg_id = None for line in output: if line.find("[" + pkg_name + "]") != -1: pkgidIndex = line.split().index("pkgid") test_pkg_id = line.split()[pkgidIndex + 1].strip("[]") break return test_pkg_id def doRemoteCMD(cmd=None): if PARAMETERS.mode == "SDB": cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd)) else: cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd)) return doCMD(cmd) def doRemoteCopy(src=None, dest=None): if PARAMETERS.mode == "SDB": cmd_prefix = "sdb -s %s push" % PARAMETERS.device cmd = "%s %s %s" % (cmd_prefix, src, dest) else: cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest) (return_code, output) = doCMD(cmd) doRemoteCMD("sync") if return_code != 0: return True else: return False def uninstPKGs(): action_status = True for root, dirs, files in os.walk(SCRIPT_DIR): if root.endswith("mediasrc"): continue for file in files: if file.endswith(".xpk"): pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0])) if not pkg_id: action_status = False continue (return_code, output) = doRemoteCMD( "pkgcmd -u -t xpk -q -n %s" % pkg_id) for line in output: if "Failure" in line: action_status = False break (return_code, output) = doRemoteCMD( "rm -rf %s" % PKG_SRC_DIR) if return_code != 0: action_status = False return action_status def instPKGs(): action_status = True (return_code, output) = doRemoteCMD( "mkdir -p %s" % PKG_SRC_DIR) if return_code != 0: action_status = False for root, dirs, files in os.walk(SCRIPT_DIR): if root.endswith("mediasrc"): continue for file in files: if file.endswith(".xpk"): if not doRemoteCopy( os.path.join(root, file), "%s/%s" % (SRC_DIR, file)): action_status = False (return_code, output) = doRemoteCMD( "pkgcmd -i -t xpk -q -p %s/%s" % (SRC_DIR, file)) doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file)) for line in output: if "Failure" in line: action_status = False break # Do some special copy/delete... steps ''' (return_code, output) = doRemoteCMD( "mkdir -p %s/tests" % PKG_SRC_DIR) if return_code != 0: action_status = False if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR): action_status = False ''' return action_status def main(): try: usage = "usage: inst.py -i" opts_parser = OptionParser(usage=usage) opts_parser.add_option( "-m", dest="mode", action="store", help="Specify mode") opts_parser.add_option( "-s", dest="device", action="store", help="Specify device") opts_parser.add_option( "-i", dest="binstpkg", action="store_true", help="Install package") opts_parser.add_option( "-u", dest="buninstpkg", action="store_true", help="Uninstall package") opts_parser.add_option( "-a", dest="user", action="store", help="User name") global PARAMETERS (PARAMETERS, args) = opts_parser.parse_args() except Exception as e: print "Got wrong option: %s, exit ..." % e sys.exit(1) if not PARAMETERS.user: PARAMETERS.user = "app" global SRC_DIR, PKG_SRC_DIR SRC_DIR = "/home/%s/content" % PARAMETERS.user PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME) if not PARAMETERS.mode: PARAMETERS.mode = "SDB" if PARAMETERS.mode == "SDB": if not PARAMETERS.device: (return_code, output) = doCMD("sdb devices") for line in output: if str.find(line, "\tdevice") != -1: PARAMETERS.device = line.split("\t")[0] break else: PARAMETERS.mode = "SSH" if not PARAMETERS.device: print "No device provided" sys.exit(1) user_info = getUSERID() re_code = user_info[0] if re_code == 0: global XW_ENV userid = user_info[1][0] XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket" % str( userid) else: print "[Error] cmd commands error : %s" % str(user_info[1]) sys.exit(1) if PARAMETERS.binstpkg and PARAMETERS.buninstpkg: print "-i and -u are conflict" sys.exit(1) if PARAMETERS.buninstpkg: if not uninstPKGs(): sys.exit(1) else: if not instPKGs(): sys.exit(1) if __name__ == "__main__": main() sys.exit(0)
arista-eosplus/pyeapi
refs/heads/develop
test/system/test_api_mlag.py
1
# # Copyright (c) 2014, Arista Networks, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # Neither the name of Arista Networks nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR # BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN # IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import os import unittest import sys sys.path.append(os.path.join(os.path.dirname(__file__), '../lib')) from systestlib import DutSystemTest class TestApiMlag(DutSystemTest): def test_get(self): for dut in self.duts: dut.config(['no interface Port-Channel1-2000', 'default mlag configuration']) response = dut.api('mlag').get() config = dict(domain_id=None, local_interface=None, peer_link=None, peer_address=None, shutdown=False) values = dict(config=config, interfaces=dict()) self.assertEqual(values, response) def test_set_domain_id_with_value(self): for dut in self.duts: dut.config('default mlag configuration') api = dut.api('mlag') self.assertIn('no domain-id', api.get_block('mlag configuration')) for domid in ['test_domain_id', 'test.dom-id', 'test domain id']: result = dut.api('mlag').set_domain_id(domid) self.assertTrue(result) self.assertIn('domain-id %s' % domid, api.get_block('mlag configuration')) def test_set_domain_id_with_no_value(self): for dut in self.duts: dut.config(['mlag configuration', 'domain-id test']) api = dut.api('mlag') self.assertIn('domain-id test', api.get_block('mlag configuration')) result = dut.api('mlag').set_domain_id(disable=True) self.assertTrue(result) self.assertIn('no domain-id', api.get_block('mlag configuration')) def test_set_domain_id_with_default(self): for dut in self.duts: dut.config(['mlag configuration', 'domain-id test']) api = dut.api('mlag') self.assertIn('domain-id test', api.get_block('mlag configuration')) result = dut.api('mlag').set_domain_id(default=True) self.assertTrue(result) self.assertIn('no domain-id', api.get_block('mlag configuration')) def test_set_local_interface_with_value(self): for dut in self.duts: dut.config('default mlag configuration') api = dut.api('mlag') self.assertIn('no local-interface', api.get_block('mlag configuration')) result = dut.api('mlag').set_local_interface('Vlan1234') self.assertTrue(result) self.assertIn('local-interface Vlan1234', api.get_block('mlag configuration')) def test_set_local_interface_with_no_value(self): for dut in self.duts: dut.config(['interface Vlan1234', 'mlag configuration', 'local-interface Vlan1234']) api = dut.api('mlag') self.assertIn('local-interface Vlan1234', api.get_block('mlag configuration')) result = api.set_local_interface(disable=True) self.assertTrue(result) self.assertIn('no local-interface', api.get_block('mlag configuration')) def test_set_local_interface_with_default(self): for dut in self.duts: dut.config(['interface Vlan1234', 'mlag configuration', 'local-interface Vlan1234']) api = dut.api('mlag') self.assertIn('local-interface Vlan1234', api.get_block('mlag configuration')) result = api.set_local_interface(default=True) self.assertTrue(result) self.assertIn('no local-interface', api.get_block('mlag configuration')) def test_set_peer_address_with_value(self): for dut in self.duts: dut.config('default mlag configuration') api = dut.api('mlag') self.assertIn('no peer-address', api.get_block('mlag configuration')) result = dut.api('mlag').set_peer_address('1.2.3.4') self.assertTrue(result) self.assertIn('peer-address 1.2.3.4', api.get_block('mlag configuration')) def test_set_peer_address_with_no_value(self): for dut in self.duts: dut.config(['interface Vlan1234', 'ip address 1.2.3.1/24', 'mlag configuration', 'peer-address 1.2.3.4']) api = dut.api('mlag') self.assertIn('peer-address 1.2.3.4', api.get_block('mlag configuration')) result = api.set_peer_address(disable=True) self.assertTrue(result) self.assertIn('no peer-address', api.get_block('mlag configuration')) def test_set_peer_address_with_default(self): for dut in self.duts: dut.config(['interface Vlan1234', 'ip address 1.2.3.1/24', 'mlag configuration', 'peer-address 1.2.3.4']) api = dut.api('mlag') self.assertIn('peer-address 1.2.3.4', api.get_block('mlag configuration')) result = api.set_peer_address(default=True) self.assertTrue(result) self.assertIn('no peer-address', api.get_block('mlag configuration')) def test_set_peer_link_with_value(self): for dut in self.duts: dut.config('default mlag configuration') api = dut.api('mlag') self.assertIn('no peer-link', api.get_block('mlag configuration')) result = dut.api('mlag').set_peer_link('Ethernet1') self.assertTrue(result) self.assertIn('peer-link Ethernet1', api.get_block('mlag configuration')) def test_set_peer_link_with_value_portchannel(self): for dut in self.duts: dut.config(['default mlag configuration', 'interface Port-Channel5']) api = dut.api('mlag') self.assertIn('no peer-link', api.get_block('mlag configuration')) result = dut.api('mlag').set_peer_link('Port-Channel5') self.assertTrue(result) self.assertIn('peer-link Port-Channel5', api.get_block('mlag configuration')) def test_set_peer_link_with_no_value(self): for dut in self.duts: dut.config(['mlag configuration', 'peer-link Ethernet1']) api = dut.api('mlag') self.assertIn('peer-link Ethernet1', api.get_block('mlag configuration')) result = api.set_peer_link(disable=True) self.assertTrue(result) self.assertIn('no peer-link', api.get_block('mlag configuration')) def test_set_peer_link_with_default(self): for dut in self.duts: dut.config(['mlag configuration', 'peer-link Ethernet1']) api = dut.api('mlag') self.assertIn('peer-link Ethernet1', api.get_block('mlag configuration')) result = api.set_peer_link(default=True) self.assertTrue(result) self.assertIn('no peer-link', api.get_block('mlag configuration')) def test_set_shutdown_with_true(self): for dut in self.duts: dut.config('default mlag configuration') api = dut.api('mlag') self.assertIn('no shutdown', api.get_block('mlag configuration')) result = api.set_shutdown(True) self.assertTrue(result) self.assertIn('shutdown', api.get_block('mlag configuration')) def test_set_shutdown_with_false(self): for dut in self.duts: dut.config(['mlag configuration', 'shutdown']) api = dut.api('mlag') self.assertIn('shutdown', api.get_block('mlag configuration')) result = api.set_shutdown(False) self.assertTrue(result) self.assertIn('no shutdown', api.get_block('mlag configuration')) def test_set_shutdown_with_no_value(self): for dut in self.duts: dut.config(['mlag configuration', 'shutdown']) api = dut.api('mlag') self.assertIn('shutdown', api.get_block('mlag configuration')) result = api.set_shutdown(disable=True) self.assertTrue(result) self.assertIn('no shutdown', api.get_block('mlag configuration')) def test_set_shutdown_with_default(self): for dut in self.duts: dut.config(['mlag configuration', 'shutdown']) api = dut.api('mlag') self.assertIn('shutdown', api.get_block('mlag configuration')) result = api.set_shutdown(default=True) self.assertTrue(result) self.assertIn('no shutdown', api.get_block('mlag configuration')) def test_set_mlag_id_with_value(self): for dut in self.duts: dut.config('no interface Port-Channel10') api = dut.api('mlag') self.assertIsNone(api.get_block('interface Port-Channel10')) result = api.set_mlag_id('Port-Channel10', '100') self.assertTrue(result) self.assertIn('mlag 100', api.get_block('interface Port-Channel10')) def test_set_mlag_id_with_no_value(self): for dut in self.duts: dut.config(['no interface Port-Channel10', 'interface Port-Channel10', 'mlag 100']) api = dut.api('mlag') self.assertIn('mlag 100', api.get_block('interface Port-Channel10')) result = api.set_mlag_id('Port-Channel10', disable=True) self.assertTrue(result) self.assertIn('no mlag', api.get_block('interface Port-Channel10')) def test_set_mlag_id_with_default(self): for dut in self.duts: dut.config(['no interface Port-Channel10', 'interface Port-Channel10', 'mlag 100']) api = dut.api('mlag') self.assertIn('mlag 100', api.get_block('interface Port-Channel10')) result = api.set_mlag_id('Port-Channel10', default=True) self.assertTrue(result) self.assertIn('no mlag', api.get_block('interface Port-Channel10')) if __name__ == '__main__': unittest.main()
corburn/scikit-bio
refs/heads/master
skbio/diversity/tests/__init__.py
160
# ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function
abagh0703/RetailTrail
refs/heads/master
flask/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.py
310
from __future__ import absolute_import, division, unicode_literals from pip._vendor.six import with_metaclass import types from . import inputstream from . import tokenizer from . import treebuilders from .treebuilders._base import Marker from . import utils from . import constants from .constants import spaceCharacters, asciiUpper2Lower from .constants import specialElements from .constants import headingElements from .constants import cdataElements, rcdataElements from .constants import tokenTypes, ReparseException, namespaces from .constants import htmlIntegrationPointElements, mathmlTextIntegrationPointElements from .constants import adjustForeignAttributes as adjustForeignAttributesMap def parse(doc, treebuilder="etree", encoding=None, namespaceHTMLElements=True): """Parse a string or file-like object into a tree""" tb = treebuilders.getTreeBuilder(treebuilder) p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) return p.parse(doc, encoding=encoding) def parseFragment(doc, container="div", treebuilder="etree", encoding=None, namespaceHTMLElements=True): tb = treebuilders.getTreeBuilder(treebuilder) p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) return p.parseFragment(doc, container=container, encoding=encoding) def method_decorator_metaclass(function): class Decorated(type): def __new__(meta, classname, bases, classDict): for attributeName, attribute in classDict.items(): if isinstance(attribute, types.FunctionType): attribute = function(attribute) classDict[attributeName] = attribute return type.__new__(meta, classname, bases, classDict) return Decorated class HTMLParser(object): """HTML parser. Generates a tree structure from a stream of (possibly malformed) HTML""" def __init__(self, tree=None, tokenizer=tokenizer.HTMLTokenizer, strict=False, namespaceHTMLElements=True, debug=False): """ strict - raise an exception when a parse error is encountered tree - a treebuilder class controlling the type of tree that will be returned. Built in treebuilders can be accessed through html5lib.treebuilders.getTreeBuilder(treeType) tokenizer - a class that provides a stream of tokens to the treebuilder. This may be replaced for e.g. a sanitizer which converts some tags to text """ # Raise an exception on the first error encountered self.strict = strict if tree is None: tree = treebuilders.getTreeBuilder("etree") self.tree = tree(namespaceHTMLElements) self.tokenizer_class = tokenizer self.errors = [] self.phases = dict([(name, cls(self, self.tree)) for name, cls in getPhases(debug).items()]) def _parse(self, stream, innerHTML=False, container="div", encoding=None, parseMeta=True, useChardet=True, **kwargs): self.innerHTMLMode = innerHTML self.container = container self.tokenizer = self.tokenizer_class(stream, encoding=encoding, parseMeta=parseMeta, useChardet=useChardet, parser=self, **kwargs) self.reset() while True: try: self.mainLoop() break except ReparseException: self.reset() def reset(self): self.tree.reset() self.firstStartTag = False self.errors = [] self.log = [] # only used with debug mode # "quirks" / "limited quirks" / "no quirks" self.compatMode = "no quirks" if self.innerHTMLMode: self.innerHTML = self.container.lower() if self.innerHTML in cdataElements: self.tokenizer.state = self.tokenizer.rcdataState elif self.innerHTML in rcdataElements: self.tokenizer.state = self.tokenizer.rawtextState elif self.innerHTML == 'plaintext': self.tokenizer.state = self.tokenizer.plaintextState else: # state already is data state # self.tokenizer.state = self.tokenizer.dataState pass self.phase = self.phases["beforeHtml"] self.phase.insertHtmlElement() self.resetInsertionMode() else: self.innerHTML = False self.phase = self.phases["initial"] self.lastPhase = None self.beforeRCDataPhase = None self.framesetOK = True def isHTMLIntegrationPoint(self, element): if (element.name == "annotation-xml" and element.namespace == namespaces["mathml"]): return ("encoding" in element.attributes and element.attributes["encoding"].translate( asciiUpper2Lower) in ("text/html", "application/xhtml+xml")) else: return (element.namespace, element.name) in htmlIntegrationPointElements def isMathMLTextIntegrationPoint(self, element): return (element.namespace, element.name) in mathmlTextIntegrationPointElements def mainLoop(self): CharactersToken = tokenTypes["Characters"] SpaceCharactersToken = tokenTypes["SpaceCharacters"] StartTagToken = tokenTypes["StartTag"] EndTagToken = tokenTypes["EndTag"] CommentToken = tokenTypes["Comment"] DoctypeToken = tokenTypes["Doctype"] ParseErrorToken = tokenTypes["ParseError"] for token in self.normalizedTokens(): new_token = token while new_token is not None: currentNode = self.tree.openElements[-1] if self.tree.openElements else None currentNodeNamespace = currentNode.namespace if currentNode else None currentNodeName = currentNode.name if currentNode else None type = new_token["type"] if type == ParseErrorToken: self.parseError(new_token["data"], new_token.get("datavars", {})) new_token = None else: if (len(self.tree.openElements) == 0 or currentNodeNamespace == self.tree.defaultNamespace or (self.isMathMLTextIntegrationPoint(currentNode) and ((type == StartTagToken and token["name"] not in frozenset(["mglyph", "malignmark"])) or type in (CharactersToken, SpaceCharactersToken))) or (currentNodeNamespace == namespaces["mathml"] and currentNodeName == "annotation-xml" and token["name"] == "svg") or (self.isHTMLIntegrationPoint(currentNode) and type in (StartTagToken, CharactersToken, SpaceCharactersToken))): phase = self.phase else: phase = self.phases["inForeignContent"] if type == CharactersToken: new_token = phase.processCharacters(new_token) elif type == SpaceCharactersToken: new_token = phase.processSpaceCharacters(new_token) elif type == StartTagToken: new_token = phase.processStartTag(new_token) elif type == EndTagToken: new_token = phase.processEndTag(new_token) elif type == CommentToken: new_token = phase.processComment(new_token) elif type == DoctypeToken: new_token = phase.processDoctype(new_token) if (type == StartTagToken and token["selfClosing"] and not token["selfClosingAcknowledged"]): self.parseError("non-void-element-with-trailing-solidus", {"name": token["name"]}) # When the loop finishes it's EOF reprocess = True phases = [] while reprocess: phases.append(self.phase) reprocess = self.phase.processEOF() if reprocess: assert self.phase not in phases def normalizedTokens(self): for token in self.tokenizer: yield self.normalizeToken(token) def parse(self, stream, encoding=None, parseMeta=True, useChardet=True): """Parse a HTML document into a well-formed tree stream - a filelike object or string containing the HTML to be parsed The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) """ self._parse(stream, innerHTML=False, encoding=encoding, parseMeta=parseMeta, useChardet=useChardet) return self.tree.getDocument() def parseFragment(self, stream, container="div", encoding=None, parseMeta=False, useChardet=True): """Parse a HTML fragment into a well-formed tree fragment container - name of the element we're setting the innerHTML property if set to None, default to 'div' stream - a filelike object or string containing the HTML to be parsed The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) """ self._parse(stream, True, container=container, encoding=encoding) return self.tree.getFragment() def parseError(self, errorcode="XXX-undefined-error", datavars={}): # XXX The idea is to make errorcode mandatory. self.errors.append((self.tokenizer.stream.position(), errorcode, datavars)) if self.strict: raise ParseError def normalizeToken(self, token): """ HTML5 specific normalizations to the token stream """ if token["type"] == tokenTypes["StartTag"]: token["data"] = dict(token["data"][::-1]) return token def adjustMathMLAttributes(self, token): replacements = {"definitionurl": "definitionURL"} for k, v in replacements.items(): if k in token["data"]: token["data"][v] = token["data"][k] del token["data"][k] def adjustSVGAttributes(self, token): replacements = { "attributename": "attributeName", "attributetype": "attributeType", "basefrequency": "baseFrequency", "baseprofile": "baseProfile", "calcmode": "calcMode", "clippathunits": "clipPathUnits", "contentscripttype": "contentScriptType", "contentstyletype": "contentStyleType", "diffuseconstant": "diffuseConstant", "edgemode": "edgeMode", "externalresourcesrequired": "externalResourcesRequired", "filterres": "filterRes", "filterunits": "filterUnits", "glyphref": "glyphRef", "gradienttransform": "gradientTransform", "gradientunits": "gradientUnits", "kernelmatrix": "kernelMatrix", "kernelunitlength": "kernelUnitLength", "keypoints": "keyPoints", "keysplines": "keySplines", "keytimes": "keyTimes", "lengthadjust": "lengthAdjust", "limitingconeangle": "limitingConeAngle", "markerheight": "markerHeight", "markerunits": "markerUnits", "markerwidth": "markerWidth", "maskcontentunits": "maskContentUnits", "maskunits": "maskUnits", "numoctaves": "numOctaves", "pathlength": "pathLength", "patterncontentunits": "patternContentUnits", "patterntransform": "patternTransform", "patternunits": "patternUnits", "pointsatx": "pointsAtX", "pointsaty": "pointsAtY", "pointsatz": "pointsAtZ", "preservealpha": "preserveAlpha", "preserveaspectratio": "preserveAspectRatio", "primitiveunits": "primitiveUnits", "refx": "refX", "refy": "refY", "repeatcount": "repeatCount", "repeatdur": "repeatDur", "requiredextensions": "requiredExtensions", "requiredfeatures": "requiredFeatures", "specularconstant": "specularConstant", "specularexponent": "specularExponent", "spreadmethod": "spreadMethod", "startoffset": "startOffset", "stddeviation": "stdDeviation", "stitchtiles": "stitchTiles", "surfacescale": "surfaceScale", "systemlanguage": "systemLanguage", "tablevalues": "tableValues", "targetx": "targetX", "targety": "targetY", "textlength": "textLength", "viewbox": "viewBox", "viewtarget": "viewTarget", "xchannelselector": "xChannelSelector", "ychannelselector": "yChannelSelector", "zoomandpan": "zoomAndPan" } for originalName in list(token["data"].keys()): if originalName in replacements: svgName = replacements[originalName] token["data"][svgName] = token["data"][originalName] del token["data"][originalName] def adjustForeignAttributes(self, token): replacements = adjustForeignAttributesMap for originalName in token["data"].keys(): if originalName in replacements: foreignName = replacements[originalName] token["data"][foreignName] = token["data"][originalName] del token["data"][originalName] def reparseTokenNormal(self, token): self.parser.phase() def resetInsertionMode(self): # The name of this method is mostly historical. (It's also used in the # specification.) last = False newModes = { "select": "inSelect", "td": "inCell", "th": "inCell", "tr": "inRow", "tbody": "inTableBody", "thead": "inTableBody", "tfoot": "inTableBody", "caption": "inCaption", "colgroup": "inColumnGroup", "table": "inTable", "head": "inBody", "body": "inBody", "frameset": "inFrameset", "html": "beforeHead" } for node in self.tree.openElements[::-1]: nodeName = node.name new_phase = None if node == self.tree.openElements[0]: assert self.innerHTML last = True nodeName = self.innerHTML # Check for conditions that should only happen in the innerHTML # case if nodeName in ("select", "colgroup", "head", "html"): assert self.innerHTML if not last and node.namespace != self.tree.defaultNamespace: continue if nodeName in newModes: new_phase = self.phases[newModes[nodeName]] break elif last: new_phase = self.phases["inBody"] break self.phase = new_phase def parseRCDataRawtext(self, token, contentType): """Generic RCDATA/RAWTEXT Parsing algorithm contentType - RCDATA or RAWTEXT """ assert contentType in ("RAWTEXT", "RCDATA") self.tree.insertElement(token) if contentType == "RAWTEXT": self.tokenizer.state = self.tokenizer.rawtextState else: self.tokenizer.state = self.tokenizer.rcdataState self.originalPhase = self.phase self.phase = self.phases["text"] def getPhases(debug): def log(function): """Logger that records which phase processes each token""" type_names = dict((value, key) for key, value in constants.tokenTypes.items()) def wrapped(self, *args, **kwargs): if function.__name__.startswith("process") and len(args) > 0: token = args[0] try: info = {"type": type_names[token['type']]} except: raise if token['type'] in constants.tagTokenTypes: info["name"] = token['name'] self.parser.log.append((self.parser.tokenizer.state.__name__, self.parser.phase.__class__.__name__, self.__class__.__name__, function.__name__, info)) return function(self, *args, **kwargs) else: return function(self, *args, **kwargs) return wrapped def getMetaclass(use_metaclass, metaclass_func): if use_metaclass: return method_decorator_metaclass(metaclass_func) else: return type class Phase(with_metaclass(getMetaclass(debug, log))): """Base class for helper object that implements each phase of processing """ def __init__(self, parser, tree): self.parser = parser self.tree = tree def processEOF(self): raise NotImplementedError def processComment(self, token): # For most phases the following is correct. Where it's not it will be # overridden. self.tree.insertComment(token, self.tree.openElements[-1]) def processDoctype(self, token): self.parser.parseError("unexpected-doctype") def processCharacters(self, token): self.tree.insertText(token["data"]) def processSpaceCharacters(self, token): self.tree.insertText(token["data"]) def processStartTag(self, token): return self.startTagHandler[token["name"]](token) def startTagHtml(self, token): if not self.parser.firstStartTag and token["name"] == "html": self.parser.parseError("non-html-root") # XXX Need a check here to see if the first start tag token emitted is # this token... If it's not, invoke self.parser.parseError(). for attr, value in token["data"].items(): if attr not in self.tree.openElements[0].attributes: self.tree.openElements[0].attributes[attr] = value self.parser.firstStartTag = False def processEndTag(self, token): return self.endTagHandler[token["name"]](token) class InitialPhase(Phase): def processSpaceCharacters(self, token): pass def processComment(self, token): self.tree.insertComment(token, self.tree.document) def processDoctype(self, token): name = token["name"] publicId = token["publicId"] systemId = token["systemId"] correct = token["correct"] if (name != "html" or publicId is not None or systemId is not None and systemId != "about:legacy-compat"): self.parser.parseError("unknown-doctype") if publicId is None: publicId = "" self.tree.insertDoctype(token) if publicId != "": publicId = publicId.translate(asciiUpper2Lower) if (not correct or token["name"] != "html" or publicId.startswith( ("+//silmaril//dtd html pro v0r11 19970101//", "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", "-//as//dtd html 3.0 aswedit + extensions//", "-//ietf//dtd html 2.0 level 1//", "-//ietf//dtd html 2.0 level 2//", "-//ietf//dtd html 2.0 strict level 1//", "-//ietf//dtd html 2.0 strict level 2//", "-//ietf//dtd html 2.0 strict//", "-//ietf//dtd html 2.0//", "-//ietf//dtd html 2.1e//", "-//ietf//dtd html 3.0//", "-//ietf//dtd html 3.2 final//", "-//ietf//dtd html 3.2//", "-//ietf//dtd html 3//", "-//ietf//dtd html level 0//", "-//ietf//dtd html level 1//", "-//ietf//dtd html level 2//", "-//ietf//dtd html level 3//", "-//ietf//dtd html strict level 0//", "-//ietf//dtd html strict level 1//", "-//ietf//dtd html strict level 2//", "-//ietf//dtd html strict level 3//", "-//ietf//dtd html strict//", "-//ietf//dtd html//", "-//metrius//dtd metrius presentational//", "-//microsoft//dtd internet explorer 2.0 html strict//", "-//microsoft//dtd internet explorer 2.0 html//", "-//microsoft//dtd internet explorer 2.0 tables//", "-//microsoft//dtd internet explorer 3.0 html strict//", "-//microsoft//dtd internet explorer 3.0 html//", "-//microsoft//dtd internet explorer 3.0 tables//", "-//netscape comm. corp.//dtd html//", "-//netscape comm. corp.//dtd strict html//", "-//o'reilly and associates//dtd html 2.0//", "-//o'reilly and associates//dtd html extended 1.0//", "-//o'reilly and associates//dtd html extended relaxed 1.0//", "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", "-//spyglass//dtd html 2.0 extended//", "-//sq//dtd html 2.0 hotmetal + extensions//", "-//sun microsystems corp.//dtd hotjava html//", "-//sun microsystems corp.//dtd hotjava strict html//", "-//w3c//dtd html 3 1995-03-24//", "-//w3c//dtd html 3.2 draft//", "-//w3c//dtd html 3.2 final//", "-//w3c//dtd html 3.2//", "-//w3c//dtd html 3.2s draft//", "-//w3c//dtd html 4.0 frameset//", "-//w3c//dtd html 4.0 transitional//", "-//w3c//dtd html experimental 19960712//", "-//w3c//dtd html experimental 970421//", "-//w3c//dtd w3 html//", "-//w3o//dtd w3 html 3.0//", "-//webtechs//dtd mozilla html 2.0//", "-//webtechs//dtd mozilla html//")) or publicId in ("-//w3o//dtd w3 html strict 3.0//en//", "-/w3c/dtd html 4.0 transitional/en", "html") or publicId.startswith( ("-//w3c//dtd html 4.01 frameset//", "-//w3c//dtd html 4.01 transitional//")) and systemId is None or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"): self.parser.compatMode = "quirks" elif (publicId.startswith( ("-//w3c//dtd xhtml 1.0 frameset//", "-//w3c//dtd xhtml 1.0 transitional//")) or publicId.startswith( ("-//w3c//dtd html 4.01 frameset//", "-//w3c//dtd html 4.01 transitional//")) and systemId is not None): self.parser.compatMode = "limited quirks" self.parser.phase = self.parser.phases["beforeHtml"] def anythingElse(self): self.parser.compatMode = "quirks" self.parser.phase = self.parser.phases["beforeHtml"] def processCharacters(self, token): self.parser.parseError("expected-doctype-but-got-chars") self.anythingElse() return token def processStartTag(self, token): self.parser.parseError("expected-doctype-but-got-start-tag", {"name": token["name"]}) self.anythingElse() return token def processEndTag(self, token): self.parser.parseError("expected-doctype-but-got-end-tag", {"name": token["name"]}) self.anythingElse() return token def processEOF(self): self.parser.parseError("expected-doctype-but-got-eof") self.anythingElse() return True class BeforeHtmlPhase(Phase): # helper methods def insertHtmlElement(self): self.tree.insertRoot(impliedTagToken("html", "StartTag")) self.parser.phase = self.parser.phases["beforeHead"] # other def processEOF(self): self.insertHtmlElement() return True def processComment(self, token): self.tree.insertComment(token, self.tree.document) def processSpaceCharacters(self, token): pass def processCharacters(self, token): self.insertHtmlElement() return token def processStartTag(self, token): if token["name"] == "html": self.parser.firstStartTag = True self.insertHtmlElement() return token def processEndTag(self, token): if token["name"] not in ("head", "body", "html", "br"): self.parser.parseError("unexpected-end-tag-before-html", {"name": token["name"]}) else: self.insertHtmlElement() return token class BeforeHeadPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), ("head", self.startTagHead) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ (("head", "body", "html", "br"), self.endTagImplyHead) ]) self.endTagHandler.default = self.endTagOther def processEOF(self): self.startTagHead(impliedTagToken("head", "StartTag")) return True def processSpaceCharacters(self, token): pass def processCharacters(self, token): self.startTagHead(impliedTagToken("head", "StartTag")) return token def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagHead(self, token): self.tree.insertElement(token) self.tree.headPointer = self.tree.openElements[-1] self.parser.phase = self.parser.phases["inHead"] def startTagOther(self, token): self.startTagHead(impliedTagToken("head", "StartTag")) return token def endTagImplyHead(self, token): self.startTagHead(impliedTagToken("head", "StartTag")) return token def endTagOther(self, token): self.parser.parseError("end-tag-after-implied-root", {"name": token["name"]}) class InHeadPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), ("title", self.startTagTitle), (("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle), ("script", self.startTagScript), (("base", "basefont", "bgsound", "command", "link"), self.startTagBaseLinkCommand), ("meta", self.startTagMeta), ("head", self.startTagHead) ]) self.startTagHandler.default = self.startTagOther self. endTagHandler = utils.MethodDispatcher([ ("head", self.endTagHead), (("br", "html", "body"), self.endTagHtmlBodyBr) ]) self.endTagHandler.default = self.endTagOther # the real thing def processEOF(self): self.anythingElse() return True def processCharacters(self, token): self.anythingElse() return token def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagHead(self, token): self.parser.parseError("two-heads-are-not-better-than-one") def startTagBaseLinkCommand(self, token): self.tree.insertElement(token) self.tree.openElements.pop() token["selfClosingAcknowledged"] = True def startTagMeta(self, token): self.tree.insertElement(token) self.tree.openElements.pop() token["selfClosingAcknowledged"] = True attributes = token["data"] if self.parser.tokenizer.stream.charEncoding[1] == "tentative": if "charset" in attributes: self.parser.tokenizer.stream.changeEncoding(attributes["charset"]) elif ("content" in attributes and "http-equiv" in attributes and attributes["http-equiv"].lower() == "content-type"): # Encoding it as UTF-8 here is a hack, as really we should pass # the abstract Unicode string, and just use the # ContentAttrParser on that, but using UTF-8 allows all chars # to be encoded and as a ASCII-superset works. data = inputstream.EncodingBytes(attributes["content"].encode("utf-8")) parser = inputstream.ContentAttrParser(data) codec = parser.parse() self.parser.tokenizer.stream.changeEncoding(codec) def startTagTitle(self, token): self.parser.parseRCDataRawtext(token, "RCDATA") def startTagNoScriptNoFramesStyle(self, token): # Need to decide whether to implement the scripting-disabled case self.parser.parseRCDataRawtext(token, "RAWTEXT") def startTagScript(self, token): self.tree.insertElement(token) self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState self.parser.originalPhase = self.parser.phase self.parser.phase = self.parser.phases["text"] def startTagOther(self, token): self.anythingElse() return token def endTagHead(self, token): node = self.parser.tree.openElements.pop() assert node.name == "head", "Expected head got %s" % node.name self.parser.phase = self.parser.phases["afterHead"] def endTagHtmlBodyBr(self, token): self.anythingElse() return token def endTagOther(self, token): self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) def anythingElse(self): self.endTagHead(impliedTagToken("head")) # XXX If we implement a parser for which scripting is disabled we need to # implement this phase. # # class InHeadNoScriptPhase(Phase): class AfterHeadPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), ("body", self.startTagBody), ("frameset", self.startTagFrameset), (("base", "basefont", "bgsound", "link", "meta", "noframes", "script", "style", "title"), self.startTagFromHead), ("head", self.startTagHead) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"), self.endTagHtmlBodyBr)]) self.endTagHandler.default = self.endTagOther def processEOF(self): self.anythingElse() return True def processCharacters(self, token): self.anythingElse() return token def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagBody(self, token): self.parser.framesetOK = False self.tree.insertElement(token) self.parser.phase = self.parser.phases["inBody"] def startTagFrameset(self, token): self.tree.insertElement(token) self.parser.phase = self.parser.phases["inFrameset"] def startTagFromHead(self, token): self.parser.parseError("unexpected-start-tag-out-of-my-head", {"name": token["name"]}) self.tree.openElements.append(self.tree.headPointer) self.parser.phases["inHead"].processStartTag(token) for node in self.tree.openElements[::-1]: if node.name == "head": self.tree.openElements.remove(node) break def startTagHead(self, token): self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) def startTagOther(self, token): self.anythingElse() return token def endTagHtmlBodyBr(self, token): self.anythingElse() return token def endTagOther(self, token): self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) def anythingElse(self): self.tree.insertElement(impliedTagToken("body", "StartTag")) self.parser.phase = self.parser.phases["inBody"] self.parser.framesetOK = True class InBodyPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody # the really-really-really-very crazy mode def __init__(self, parser, tree): Phase.__init__(self, parser, tree) # Keep a ref to this for special handling of whitespace in <pre> self.processSpaceCharactersNonPre = self.processSpaceCharacters self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), (("base", "basefont", "bgsound", "command", "link", "meta", "noframes", "script", "style", "title"), self.startTagProcessInHead), ("body", self.startTagBody), ("frameset", self.startTagFrameset), (("address", "article", "aside", "blockquote", "center", "details", "details", "dir", "div", "dl", "fieldset", "figcaption", "figure", "footer", "header", "hgroup", "main", "menu", "nav", "ol", "p", "section", "summary", "ul"), self.startTagCloseP), (headingElements, self.startTagHeading), (("pre", "listing"), self.startTagPreListing), ("form", self.startTagForm), (("li", "dd", "dt"), self.startTagListItem), ("plaintext", self.startTagPlaintext), ("a", self.startTagA), (("b", "big", "code", "em", "font", "i", "s", "small", "strike", "strong", "tt", "u"), self.startTagFormatting), ("nobr", self.startTagNobr), ("button", self.startTagButton), (("applet", "marquee", "object"), self.startTagAppletMarqueeObject), ("xmp", self.startTagXmp), ("table", self.startTagTable), (("area", "br", "embed", "img", "keygen", "wbr"), self.startTagVoidFormatting), (("param", "source", "track"), self.startTagParamSource), ("input", self.startTagInput), ("hr", self.startTagHr), ("image", self.startTagImage), ("isindex", self.startTagIsIndex), ("textarea", self.startTagTextarea), ("iframe", self.startTagIFrame), (("noembed", "noframes", "noscript"), self.startTagRawtext), ("select", self.startTagSelect), (("rp", "rt"), self.startTagRpRt), (("option", "optgroup"), self.startTagOpt), (("math"), self.startTagMath), (("svg"), self.startTagSvg), (("caption", "col", "colgroup", "frame", "head", "tbody", "td", "tfoot", "th", "thead", "tr"), self.startTagMisplaced) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ ("body", self.endTagBody), ("html", self.endTagHtml), (("address", "article", "aside", "blockquote", "button", "center", "details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure", "footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre", "section", "summary", "ul"), self.endTagBlock), ("form", self.endTagForm), ("p", self.endTagP), (("dd", "dt", "li"), self.endTagListItem), (headingElements, self.endTagHeading), (("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small", "strike", "strong", "tt", "u"), self.endTagFormatting), (("applet", "marquee", "object"), self.endTagAppletMarqueeObject), ("br", self.endTagBr), ]) self.endTagHandler.default = self.endTagOther def isMatchingFormattingElement(self, node1, node2): if node1.name != node2.name or node1.namespace != node2.namespace: return False elif len(node1.attributes) != len(node2.attributes): return False else: attributes1 = sorted(node1.attributes.items()) attributes2 = sorted(node2.attributes.items()) for attr1, attr2 in zip(attributes1, attributes2): if attr1 != attr2: return False return True # helper def addFormattingElement(self, token): self.tree.insertElement(token) element = self.tree.openElements[-1] matchingElements = [] for node in self.tree.activeFormattingElements[::-1]: if node is Marker: break elif self.isMatchingFormattingElement(node, element): matchingElements.append(node) assert len(matchingElements) <= 3 if len(matchingElements) == 3: self.tree.activeFormattingElements.remove(matchingElements[-1]) self.tree.activeFormattingElements.append(element) # the real deal def processEOF(self): allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td", "tfoot", "th", "thead", "tr", "body", "html")) for node in self.tree.openElements[::-1]: if node.name not in allowed_elements: self.parser.parseError("expected-closing-tag-but-got-eof") break # Stop parsing def processSpaceCharactersDropNewline(self, token): # Sometimes (start of <pre>, <listing>, and <textarea> blocks) we # want to drop leading newlines data = token["data"] self.processSpaceCharacters = self.processSpaceCharactersNonPre if (data.startswith("\n") and self.tree.openElements[-1].name in ("pre", "listing", "textarea") and not self.tree.openElements[-1].hasContent()): data = data[1:] if data: self.tree.reconstructActiveFormattingElements() self.tree.insertText(data) def processCharacters(self, token): if token["data"] == "\u0000": # The tokenizer should always emit null on its own return self.tree.reconstructActiveFormattingElements() self.tree.insertText(token["data"]) # This must be bad for performance if (self.parser.framesetOK and any([char not in spaceCharacters for char in token["data"]])): self.parser.framesetOK = False def processSpaceCharacters(self, token): self.tree.reconstructActiveFormattingElements() self.tree.insertText(token["data"]) def startTagProcessInHead(self, token): return self.parser.phases["inHead"].processStartTag(token) def startTagBody(self, token): self.parser.parseError("unexpected-start-tag", {"name": "body"}) if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"): assert self.parser.innerHTML else: self.parser.framesetOK = False for attr, value in token["data"].items(): if attr not in self.tree.openElements[1].attributes: self.tree.openElements[1].attributes[attr] = value def startTagFrameset(self, token): self.parser.parseError("unexpected-start-tag", {"name": "frameset"}) if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"): assert self.parser.innerHTML elif not self.parser.framesetOK: pass else: if self.tree.openElements[1].parent: self.tree.openElements[1].parent.removeChild(self.tree.openElements[1]) while self.tree.openElements[-1].name != "html": self.tree.openElements.pop() self.tree.insertElement(token) self.parser.phase = self.parser.phases["inFrameset"] def startTagCloseP(self, token): if self.tree.elementInScope("p", variant="button"): self.endTagP(impliedTagToken("p")) self.tree.insertElement(token) def startTagPreListing(self, token): if self.tree.elementInScope("p", variant="button"): self.endTagP(impliedTagToken("p")) self.tree.insertElement(token) self.parser.framesetOK = False self.processSpaceCharacters = self.processSpaceCharactersDropNewline def startTagForm(self, token): if self.tree.formPointer: self.parser.parseError("unexpected-start-tag", {"name": "form"}) else: if self.tree.elementInScope("p", variant="button"): self.endTagP(impliedTagToken("p")) self.tree.insertElement(token) self.tree.formPointer = self.tree.openElements[-1] def startTagListItem(self, token): self.parser.framesetOK = False stopNamesMap = {"li": ["li"], "dt": ["dt", "dd"], "dd": ["dt", "dd"]} stopNames = stopNamesMap[token["name"]] for node in reversed(self.tree.openElements): if node.name in stopNames: self.parser.phase.processEndTag( impliedTagToken(node.name, "EndTag")) break if (node.nameTuple in specialElements and node.name not in ("address", "div", "p")): break if self.tree.elementInScope("p", variant="button"): self.parser.phase.processEndTag( impliedTagToken("p", "EndTag")) self.tree.insertElement(token) def startTagPlaintext(self, token): if self.tree.elementInScope("p", variant="button"): self.endTagP(impliedTagToken("p")) self.tree.insertElement(token) self.parser.tokenizer.state = self.parser.tokenizer.plaintextState def startTagHeading(self, token): if self.tree.elementInScope("p", variant="button"): self.endTagP(impliedTagToken("p")) if self.tree.openElements[-1].name in headingElements: self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) self.tree.openElements.pop() self.tree.insertElement(token) def startTagA(self, token): afeAElement = self.tree.elementInActiveFormattingElements("a") if afeAElement: self.parser.parseError("unexpected-start-tag-implies-end-tag", {"startName": "a", "endName": "a"}) self.endTagFormatting(impliedTagToken("a")) if afeAElement in self.tree.openElements: self.tree.openElements.remove(afeAElement) if afeAElement in self.tree.activeFormattingElements: self.tree.activeFormattingElements.remove(afeAElement) self.tree.reconstructActiveFormattingElements() self.addFormattingElement(token) def startTagFormatting(self, token): self.tree.reconstructActiveFormattingElements() self.addFormattingElement(token) def startTagNobr(self, token): self.tree.reconstructActiveFormattingElements() if self.tree.elementInScope("nobr"): self.parser.parseError("unexpected-start-tag-implies-end-tag", {"startName": "nobr", "endName": "nobr"}) self.processEndTag(impliedTagToken("nobr")) # XXX Need tests that trigger the following self.tree.reconstructActiveFormattingElements() self.addFormattingElement(token) def startTagButton(self, token): if self.tree.elementInScope("button"): self.parser.parseError("unexpected-start-tag-implies-end-tag", {"startName": "button", "endName": "button"}) self.processEndTag(impliedTagToken("button")) return token else: self.tree.reconstructActiveFormattingElements() self.tree.insertElement(token) self.parser.framesetOK = False def startTagAppletMarqueeObject(self, token): self.tree.reconstructActiveFormattingElements() self.tree.insertElement(token) self.tree.activeFormattingElements.append(Marker) self.parser.framesetOK = False def startTagXmp(self, token): if self.tree.elementInScope("p", variant="button"): self.endTagP(impliedTagToken("p")) self.tree.reconstructActiveFormattingElements() self.parser.framesetOK = False self.parser.parseRCDataRawtext(token, "RAWTEXT") def startTagTable(self, token): if self.parser.compatMode != "quirks": if self.tree.elementInScope("p", variant="button"): self.processEndTag(impliedTagToken("p")) self.tree.insertElement(token) self.parser.framesetOK = False self.parser.phase = self.parser.phases["inTable"] def startTagVoidFormatting(self, token): self.tree.reconstructActiveFormattingElements() self.tree.insertElement(token) self.tree.openElements.pop() token["selfClosingAcknowledged"] = True self.parser.framesetOK = False def startTagInput(self, token): framesetOK = self.parser.framesetOK self.startTagVoidFormatting(token) if ("type" in token["data"] and token["data"]["type"].translate(asciiUpper2Lower) == "hidden"): # input type=hidden doesn't change framesetOK self.parser.framesetOK = framesetOK def startTagParamSource(self, token): self.tree.insertElement(token) self.tree.openElements.pop() token["selfClosingAcknowledged"] = True def startTagHr(self, token): if self.tree.elementInScope("p", variant="button"): self.endTagP(impliedTagToken("p")) self.tree.insertElement(token) self.tree.openElements.pop() token["selfClosingAcknowledged"] = True self.parser.framesetOK = False def startTagImage(self, token): # No really... self.parser.parseError("unexpected-start-tag-treated-as", {"originalName": "image", "newName": "img"}) self.processStartTag(impliedTagToken("img", "StartTag", attributes=token["data"], selfClosing=token["selfClosing"])) def startTagIsIndex(self, token): self.parser.parseError("deprecated-tag", {"name": "isindex"}) if self.tree.formPointer: return form_attrs = {} if "action" in token["data"]: form_attrs["action"] = token["data"]["action"] self.processStartTag(impliedTagToken("form", "StartTag", attributes=form_attrs)) self.processStartTag(impliedTagToken("hr", "StartTag")) self.processStartTag(impliedTagToken("label", "StartTag")) # XXX Localization ... if "prompt" in token["data"]: prompt = token["data"]["prompt"] else: prompt = "This is a searchable index. Enter search keywords: " self.processCharacters( {"type": tokenTypes["Characters"], "data": prompt}) attributes = token["data"].copy() if "action" in attributes: del attributes["action"] if "prompt" in attributes: del attributes["prompt"] attributes["name"] = "isindex" self.processStartTag(impliedTagToken("input", "StartTag", attributes=attributes, selfClosing= token["selfClosing"])) self.processEndTag(impliedTagToken("label")) self.processStartTag(impliedTagToken("hr", "StartTag")) self.processEndTag(impliedTagToken("form")) def startTagTextarea(self, token): self.tree.insertElement(token) self.parser.tokenizer.state = self.parser.tokenizer.rcdataState self.processSpaceCharacters = self.processSpaceCharactersDropNewline self.parser.framesetOK = False def startTagIFrame(self, token): self.parser.framesetOK = False self.startTagRawtext(token) def startTagRawtext(self, token): """iframe, noembed noframes, noscript(if scripting enabled)""" self.parser.parseRCDataRawtext(token, "RAWTEXT") def startTagOpt(self, token): if self.tree.openElements[-1].name == "option": self.parser.phase.processEndTag(impliedTagToken("option")) self.tree.reconstructActiveFormattingElements() self.parser.tree.insertElement(token) def startTagSelect(self, token): self.tree.reconstructActiveFormattingElements() self.tree.insertElement(token) self.parser.framesetOK = False if self.parser.phase in (self.parser.phases["inTable"], self.parser.phases["inCaption"], self.parser.phases["inColumnGroup"], self.parser.phases["inTableBody"], self.parser.phases["inRow"], self.parser.phases["inCell"]): self.parser.phase = self.parser.phases["inSelectInTable"] else: self.parser.phase = self.parser.phases["inSelect"] def startTagRpRt(self, token): if self.tree.elementInScope("ruby"): self.tree.generateImpliedEndTags() if self.tree.openElements[-1].name != "ruby": self.parser.parseError() self.tree.insertElement(token) def startTagMath(self, token): self.tree.reconstructActiveFormattingElements() self.parser.adjustMathMLAttributes(token) self.parser.adjustForeignAttributes(token) token["namespace"] = namespaces["mathml"] self.tree.insertElement(token) # Need to get the parse error right for the case where the token # has a namespace not equal to the xmlns attribute if token["selfClosing"]: self.tree.openElements.pop() token["selfClosingAcknowledged"] = True def startTagSvg(self, token): self.tree.reconstructActiveFormattingElements() self.parser.adjustSVGAttributes(token) self.parser.adjustForeignAttributes(token) token["namespace"] = namespaces["svg"] self.tree.insertElement(token) # Need to get the parse error right for the case where the token # has a namespace not equal to the xmlns attribute if token["selfClosing"]: self.tree.openElements.pop() token["selfClosingAcknowledged"] = True def startTagMisplaced(self, token): """ Elements that should be children of other elements that have a different insertion mode; here they are ignored "caption", "col", "colgroup", "frame", "frameset", "head", "option", "optgroup", "tbody", "td", "tfoot", "th", "thead", "tr", "noscript" """ self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]}) def startTagOther(self, token): self.tree.reconstructActiveFormattingElements() self.tree.insertElement(token) def endTagP(self, token): if not self.tree.elementInScope("p", variant="button"): self.startTagCloseP(impliedTagToken("p", "StartTag")) self.parser.parseError("unexpected-end-tag", {"name": "p"}) self.endTagP(impliedTagToken("p", "EndTag")) else: self.tree.generateImpliedEndTags("p") if self.tree.openElements[-1].name != "p": self.parser.parseError("unexpected-end-tag", {"name": "p"}) node = self.tree.openElements.pop() while node.name != "p": node = self.tree.openElements.pop() def endTagBody(self, token): if not self.tree.elementInScope("body"): self.parser.parseError() return elif self.tree.openElements[-1].name != "body": for node in self.tree.openElements[2:]: if node.name not in frozenset(("dd", "dt", "li", "optgroup", "option", "p", "rp", "rt", "tbody", "td", "tfoot", "th", "thead", "tr", "body", "html")): # Not sure this is the correct name for the parse error self.parser.parseError( "expected-one-end-tag-but-got-another", {"expectedName": "body", "gotName": node.name}) break self.parser.phase = self.parser.phases["afterBody"] def endTagHtml(self, token): # We repeat the test for the body end tag token being ignored here if self.tree.elementInScope("body"): self.endTagBody(impliedTagToken("body")) return token def endTagBlock(self, token): # Put us back in the right whitespace handling mode if token["name"] == "pre": self.processSpaceCharacters = self.processSpaceCharactersNonPre inScope = self.tree.elementInScope(token["name"]) if inScope: self.tree.generateImpliedEndTags() if self.tree.openElements[-1].name != token["name"]: self.parser.parseError("end-tag-too-early", {"name": token["name"]}) if inScope: node = self.tree.openElements.pop() while node.name != token["name"]: node = self.tree.openElements.pop() def endTagForm(self, token): node = self.tree.formPointer self.tree.formPointer = None if node is None or not self.tree.elementInScope(node): self.parser.parseError("unexpected-end-tag", {"name": "form"}) else: self.tree.generateImpliedEndTags() if self.tree.openElements[-1] != node: self.parser.parseError("end-tag-too-early-ignored", {"name": "form"}) self.tree.openElements.remove(node) def endTagListItem(self, token): if token["name"] == "li": variant = "list" else: variant = None if not self.tree.elementInScope(token["name"], variant=variant): self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) else: self.tree.generateImpliedEndTags(exclude=token["name"]) if self.tree.openElements[-1].name != token["name"]: self.parser.parseError( "end-tag-too-early", {"name": token["name"]}) node = self.tree.openElements.pop() while node.name != token["name"]: node = self.tree.openElements.pop() def endTagHeading(self, token): for item in headingElements: if self.tree.elementInScope(item): self.tree.generateImpliedEndTags() break if self.tree.openElements[-1].name != token["name"]: self.parser.parseError("end-tag-too-early", {"name": token["name"]}) for item in headingElements: if self.tree.elementInScope(item): item = self.tree.openElements.pop() while item.name not in headingElements: item = self.tree.openElements.pop() break def endTagFormatting(self, token): """The much-feared adoption agency algorithm""" # http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867 # XXX Better parseError messages appreciated. # Step 1 outerLoopCounter = 0 # Step 2 while outerLoopCounter < 8: # Step 3 outerLoopCounter += 1 # Step 4: # Let the formatting element be the last element in # the list of active formatting elements that: # - is between the end of the list and the last scope # marker in the list, if any, or the start of the list # otherwise, and # - has the same tag name as the token. formattingElement = self.tree.elementInActiveFormattingElements( token["name"]) if (not formattingElement or (formattingElement in self.tree.openElements and not self.tree.elementInScope(formattingElement.name))): # If there is no such node, then abort these steps # and instead act as described in the "any other # end tag" entry below. self.endTagOther(token) return # Otherwise, if there is such a node, but that node is # not in the stack of open elements, then this is a # parse error; remove the element from the list, and # abort these steps. elif formattingElement not in self.tree.openElements: self.parser.parseError("adoption-agency-1.2", {"name": token["name"]}) self.tree.activeFormattingElements.remove(formattingElement) return # Otherwise, if there is such a node, and that node is # also in the stack of open elements, but the element # is not in scope, then this is a parse error; ignore # the token, and abort these steps. elif not self.tree.elementInScope(formattingElement.name): self.parser.parseError("adoption-agency-4.4", {"name": token["name"]}) return # Otherwise, there is a formatting element and that # element is in the stack and is in scope. If the # element is not the current node, this is a parse # error. In any case, proceed with the algorithm as # written in the following steps. else: if formattingElement != self.tree.openElements[-1]: self.parser.parseError("adoption-agency-1.3", {"name": token["name"]}) # Step 5: # Let the furthest block be the topmost node in the # stack of open elements that is lower in the stack # than the formatting element, and is an element in # the special category. There might not be one. afeIndex = self.tree.openElements.index(formattingElement) furthestBlock = None for element in self.tree.openElements[afeIndex:]: if element.nameTuple in specialElements: furthestBlock = element break # Step 6: # If there is no furthest block, then the UA must # first pop all the nodes from the bottom of the stack # of open elements, from the current node up to and # including the formatting element, then remove the # formatting element from the list of active # formatting elements, and finally abort these steps. if furthestBlock is None: element = self.tree.openElements.pop() while element != formattingElement: element = self.tree.openElements.pop() self.tree.activeFormattingElements.remove(element) return # Step 7 commonAncestor = self.tree.openElements[afeIndex - 1] # Step 8: # The bookmark is supposed to help us identify where to reinsert # nodes in step 15. We have to ensure that we reinsert nodes after # the node before the active formatting element. Note the bookmark # can move in step 9.7 bookmark = self.tree.activeFormattingElements.index(formattingElement) # Step 9 lastNode = node = furthestBlock innerLoopCounter = 0 index = self.tree.openElements.index(node) while innerLoopCounter < 3: innerLoopCounter += 1 # Node is element before node in open elements index -= 1 node = self.tree.openElements[index] if node not in self.tree.activeFormattingElements: self.tree.openElements.remove(node) continue # Step 9.6 if node == formattingElement: break # Step 9.7 if lastNode == furthestBlock: bookmark = self.tree.activeFormattingElements.index(node) + 1 # Step 9.8 clone = node.cloneNode() # Replace node with clone self.tree.activeFormattingElements[ self.tree.activeFormattingElements.index(node)] = clone self.tree.openElements[ self.tree.openElements.index(node)] = clone node = clone # Step 9.9 # Remove lastNode from its parents, if any if lastNode.parent: lastNode.parent.removeChild(lastNode) node.appendChild(lastNode) # Step 9.10 lastNode = node # Step 10 # Foster parent lastNode if commonAncestor is a # table, tbody, tfoot, thead, or tr we need to foster # parent the lastNode if lastNode.parent: lastNode.parent.removeChild(lastNode) if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")): parent, insertBefore = self.tree.getTableMisnestedNodePosition() parent.insertBefore(lastNode, insertBefore) else: commonAncestor.appendChild(lastNode) # Step 11 clone = formattingElement.cloneNode() # Step 12 furthestBlock.reparentChildren(clone) # Step 13 furthestBlock.appendChild(clone) # Step 14 self.tree.activeFormattingElements.remove(formattingElement) self.tree.activeFormattingElements.insert(bookmark, clone) # Step 15 self.tree.openElements.remove(formattingElement) self.tree.openElements.insert( self.tree.openElements.index(furthestBlock) + 1, clone) def endTagAppletMarqueeObject(self, token): if self.tree.elementInScope(token["name"]): self.tree.generateImpliedEndTags() if self.tree.openElements[-1].name != token["name"]: self.parser.parseError("end-tag-too-early", {"name": token["name"]}) if self.tree.elementInScope(token["name"]): element = self.tree.openElements.pop() while element.name != token["name"]: element = self.tree.openElements.pop() self.tree.clearActiveFormattingElements() def endTagBr(self, token): self.parser.parseError("unexpected-end-tag-treated-as", {"originalName": "br", "newName": "br element"}) self.tree.reconstructActiveFormattingElements() self.tree.insertElement(impliedTagToken("br", "StartTag")) self.tree.openElements.pop() def endTagOther(self, token): for node in self.tree.openElements[::-1]: if node.name == token["name"]: self.tree.generateImpliedEndTags(exclude=token["name"]) if self.tree.openElements[-1].name != token["name"]: self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) while self.tree.openElements.pop() != node: pass break else: if node.nameTuple in specialElements: self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) break class TextPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ ("script", self.endTagScript)]) self.endTagHandler.default = self.endTagOther def processCharacters(self, token): self.tree.insertText(token["data"]) def processEOF(self): self.parser.parseError("expected-named-closing-tag-but-got-eof", {"name": self.tree.openElements[-1].name}) self.tree.openElements.pop() self.parser.phase = self.parser.originalPhase return True def startTagOther(self, token): assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name'] def endTagScript(self, token): node = self.tree.openElements.pop() assert node.name == "script" self.parser.phase = self.parser.originalPhase # The rest of this method is all stuff that only happens if # document.write works def endTagOther(self, token): self.tree.openElements.pop() self.parser.phase = self.parser.originalPhase class InTablePhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#in-table def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), ("caption", self.startTagCaption), ("colgroup", self.startTagColgroup), ("col", self.startTagCol), (("tbody", "tfoot", "thead"), self.startTagRowGroup), (("td", "th", "tr"), self.startTagImplyTbody), ("table", self.startTagTable), (("style", "script"), self.startTagStyleScript), ("input", self.startTagInput), ("form", self.startTagForm) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ ("table", self.endTagTable), (("body", "caption", "col", "colgroup", "html", "tbody", "td", "tfoot", "th", "thead", "tr"), self.endTagIgnore) ]) self.endTagHandler.default = self.endTagOther # helper methods def clearStackToTableContext(self): # "clear the stack back to a table context" while self.tree.openElements[-1].name not in ("table", "html"): # self.parser.parseError("unexpected-implied-end-tag-in-table", # {"name": self.tree.openElements[-1].name}) self.tree.openElements.pop() # When the current node is <html> it's an innerHTML case # processing methods def processEOF(self): if self.tree.openElements[-1].name != "html": self.parser.parseError("eof-in-table") else: assert self.parser.innerHTML # Stop parsing def processSpaceCharacters(self, token): originalPhase = self.parser.phase self.parser.phase = self.parser.phases["inTableText"] self.parser.phase.originalPhase = originalPhase self.parser.phase.processSpaceCharacters(token) def processCharacters(self, token): originalPhase = self.parser.phase self.parser.phase = self.parser.phases["inTableText"] self.parser.phase.originalPhase = originalPhase self.parser.phase.processCharacters(token) def insertText(self, token): # If we get here there must be at least one non-whitespace character # Do the table magic! self.tree.insertFromTable = True self.parser.phases["inBody"].processCharacters(token) self.tree.insertFromTable = False def startTagCaption(self, token): self.clearStackToTableContext() self.tree.activeFormattingElements.append(Marker) self.tree.insertElement(token) self.parser.phase = self.parser.phases["inCaption"] def startTagColgroup(self, token): self.clearStackToTableContext() self.tree.insertElement(token) self.parser.phase = self.parser.phases["inColumnGroup"] def startTagCol(self, token): self.startTagColgroup(impliedTagToken("colgroup", "StartTag")) return token def startTagRowGroup(self, token): self.clearStackToTableContext() self.tree.insertElement(token) self.parser.phase = self.parser.phases["inTableBody"] def startTagImplyTbody(self, token): self.startTagRowGroup(impliedTagToken("tbody", "StartTag")) return token def startTagTable(self, token): self.parser.parseError("unexpected-start-tag-implies-end-tag", {"startName": "table", "endName": "table"}) self.parser.phase.processEndTag(impliedTagToken("table")) if not self.parser.innerHTML: return token def startTagStyleScript(self, token): return self.parser.phases["inHead"].processStartTag(token) def startTagInput(self, token): if ("type" in token["data"] and token["data"]["type"].translate(asciiUpper2Lower) == "hidden"): self.parser.parseError("unexpected-hidden-input-in-table") self.tree.insertElement(token) # XXX associate with form self.tree.openElements.pop() else: self.startTagOther(token) def startTagForm(self, token): self.parser.parseError("unexpected-form-in-table") if self.tree.formPointer is None: self.tree.insertElement(token) self.tree.formPointer = self.tree.openElements[-1] self.tree.openElements.pop() def startTagOther(self, token): self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]}) # Do the table magic! self.tree.insertFromTable = True self.parser.phases["inBody"].processStartTag(token) self.tree.insertFromTable = False def endTagTable(self, token): if self.tree.elementInScope("table", variant="table"): self.tree.generateImpliedEndTags() if self.tree.openElements[-1].name != "table": self.parser.parseError("end-tag-too-early-named", {"gotName": "table", "expectedName": self.tree.openElements[-1].name}) while self.tree.openElements[-1].name != "table": self.tree.openElements.pop() self.tree.openElements.pop() self.parser.resetInsertionMode() else: # innerHTML case assert self.parser.innerHTML self.parser.parseError() def endTagIgnore(self, token): self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) def endTagOther(self, token): self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]}) # Do the table magic! self.tree.insertFromTable = True self.parser.phases["inBody"].processEndTag(token) self.tree.insertFromTable = False class InTableTextPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.originalPhase = None self.characterTokens = [] def flushCharacters(self): data = "".join([item["data"] for item in self.characterTokens]) if any([item not in spaceCharacters for item in data]): token = {"type": tokenTypes["Characters"], "data": data} self.parser.phases["inTable"].insertText(token) elif data: self.tree.insertText(data) self.characterTokens = [] def processComment(self, token): self.flushCharacters() self.parser.phase = self.originalPhase return token def processEOF(self): self.flushCharacters() self.parser.phase = self.originalPhase return True def processCharacters(self, token): if token["data"] == "\u0000": return self.characterTokens.append(token) def processSpaceCharacters(self, token): # pretty sure we should never reach here self.characterTokens.append(token) # assert False def processStartTag(self, token): self.flushCharacters() self.parser.phase = self.originalPhase return token def processEndTag(self, token): self.flushCharacters() self.parser.phase = self.originalPhase return token class InCaptionPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#in-caption def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th", "thead", "tr"), self.startTagTableElement) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ ("caption", self.endTagCaption), ("table", self.endTagTable), (("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th", "thead", "tr"), self.endTagIgnore) ]) self.endTagHandler.default = self.endTagOther def ignoreEndTagCaption(self): return not self.tree.elementInScope("caption", variant="table") def processEOF(self): self.parser.phases["inBody"].processEOF() def processCharacters(self, token): return self.parser.phases["inBody"].processCharacters(token) def startTagTableElement(self, token): self.parser.parseError() # XXX Have to duplicate logic here to find out if the tag is ignored ignoreEndTag = self.ignoreEndTagCaption() self.parser.phase.processEndTag(impliedTagToken("caption")) if not ignoreEndTag: return token def startTagOther(self, token): return self.parser.phases["inBody"].processStartTag(token) def endTagCaption(self, token): if not self.ignoreEndTagCaption(): # AT this code is quite similar to endTagTable in "InTable" self.tree.generateImpliedEndTags() if self.tree.openElements[-1].name != "caption": self.parser.parseError("expected-one-end-tag-but-got-another", {"gotName": "caption", "expectedName": self.tree.openElements[-1].name}) while self.tree.openElements[-1].name != "caption": self.tree.openElements.pop() self.tree.openElements.pop() self.tree.clearActiveFormattingElements() self.parser.phase = self.parser.phases["inTable"] else: # innerHTML case assert self.parser.innerHTML self.parser.parseError() def endTagTable(self, token): self.parser.parseError() ignoreEndTag = self.ignoreEndTagCaption() self.parser.phase.processEndTag(impliedTagToken("caption")) if not ignoreEndTag: return token def endTagIgnore(self, token): self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) def endTagOther(self, token): return self.parser.phases["inBody"].processEndTag(token) class InColumnGroupPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#in-column def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), ("col", self.startTagCol) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ ("colgroup", self.endTagColgroup), ("col", self.endTagCol) ]) self.endTagHandler.default = self.endTagOther def ignoreEndTagColgroup(self): return self.tree.openElements[-1].name == "html" def processEOF(self): if self.tree.openElements[-1].name == "html": assert self.parser.innerHTML return else: ignoreEndTag = self.ignoreEndTagColgroup() self.endTagColgroup(impliedTagToken("colgroup")) if not ignoreEndTag: return True def processCharacters(self, token): ignoreEndTag = self.ignoreEndTagColgroup() self.endTagColgroup(impliedTagToken("colgroup")) if not ignoreEndTag: return token def startTagCol(self, token): self.tree.insertElement(token) self.tree.openElements.pop() def startTagOther(self, token): ignoreEndTag = self.ignoreEndTagColgroup() self.endTagColgroup(impliedTagToken("colgroup")) if not ignoreEndTag: return token def endTagColgroup(self, token): if self.ignoreEndTagColgroup(): # innerHTML case assert self.parser.innerHTML self.parser.parseError() else: self.tree.openElements.pop() self.parser.phase = self.parser.phases["inTable"] def endTagCol(self, token): self.parser.parseError("no-end-tag", {"name": "col"}) def endTagOther(self, token): ignoreEndTag = self.ignoreEndTagColgroup() self.endTagColgroup(impliedTagToken("colgroup")) if not ignoreEndTag: return token class InTableBodyPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#in-table0 def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), ("tr", self.startTagTr), (("td", "th"), self.startTagTableCell), (("caption", "col", "colgroup", "tbody", "tfoot", "thead"), self.startTagTableOther) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ (("tbody", "tfoot", "thead"), self.endTagTableRowGroup), ("table", self.endTagTable), (("body", "caption", "col", "colgroup", "html", "td", "th", "tr"), self.endTagIgnore) ]) self.endTagHandler.default = self.endTagOther # helper methods def clearStackToTableBodyContext(self): while self.tree.openElements[-1].name not in ("tbody", "tfoot", "thead", "html"): # self.parser.parseError("unexpected-implied-end-tag-in-table", # {"name": self.tree.openElements[-1].name}) self.tree.openElements.pop() if self.tree.openElements[-1].name == "html": assert self.parser.innerHTML # the rest def processEOF(self): self.parser.phases["inTable"].processEOF() def processSpaceCharacters(self, token): return self.parser.phases["inTable"].processSpaceCharacters(token) def processCharacters(self, token): return self.parser.phases["inTable"].processCharacters(token) def startTagTr(self, token): self.clearStackToTableBodyContext() self.tree.insertElement(token) self.parser.phase = self.parser.phases["inRow"] def startTagTableCell(self, token): self.parser.parseError("unexpected-cell-in-table-body", {"name": token["name"]}) self.startTagTr(impliedTagToken("tr", "StartTag")) return token def startTagTableOther(self, token): # XXX AT Any ideas on how to share this with endTagTable? if (self.tree.elementInScope("tbody", variant="table") or self.tree.elementInScope("thead", variant="table") or self.tree.elementInScope("tfoot", variant="table")): self.clearStackToTableBodyContext() self.endTagTableRowGroup( impliedTagToken(self.tree.openElements[-1].name)) return token else: # innerHTML case assert self.parser.innerHTML self.parser.parseError() def startTagOther(self, token): return self.parser.phases["inTable"].processStartTag(token) def endTagTableRowGroup(self, token): if self.tree.elementInScope(token["name"], variant="table"): self.clearStackToTableBodyContext() self.tree.openElements.pop() self.parser.phase = self.parser.phases["inTable"] else: self.parser.parseError("unexpected-end-tag-in-table-body", {"name": token["name"]}) def endTagTable(self, token): if (self.tree.elementInScope("tbody", variant="table") or self.tree.elementInScope("thead", variant="table") or self.tree.elementInScope("tfoot", variant="table")): self.clearStackToTableBodyContext() self.endTagTableRowGroup( impliedTagToken(self.tree.openElements[-1].name)) return token else: # innerHTML case assert self.parser.innerHTML self.parser.parseError() def endTagIgnore(self, token): self.parser.parseError("unexpected-end-tag-in-table-body", {"name": token["name"]}) def endTagOther(self, token): return self.parser.phases["inTable"].processEndTag(token) class InRowPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#in-row def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), (("td", "th"), self.startTagTableCell), (("caption", "col", "colgroup", "tbody", "tfoot", "thead", "tr"), self.startTagTableOther) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ ("tr", self.endTagTr), ("table", self.endTagTable), (("tbody", "tfoot", "thead"), self.endTagTableRowGroup), (("body", "caption", "col", "colgroup", "html", "td", "th"), self.endTagIgnore) ]) self.endTagHandler.default = self.endTagOther # helper methods (XXX unify this with other table helper methods) def clearStackToTableRowContext(self): while self.tree.openElements[-1].name not in ("tr", "html"): self.parser.parseError("unexpected-implied-end-tag-in-table-row", {"name": self.tree.openElements[-1].name}) self.tree.openElements.pop() def ignoreEndTagTr(self): return not self.tree.elementInScope("tr", variant="table") # the rest def processEOF(self): self.parser.phases["inTable"].processEOF() def processSpaceCharacters(self, token): return self.parser.phases["inTable"].processSpaceCharacters(token) def processCharacters(self, token): return self.parser.phases["inTable"].processCharacters(token) def startTagTableCell(self, token): self.clearStackToTableRowContext() self.tree.insertElement(token) self.parser.phase = self.parser.phases["inCell"] self.tree.activeFormattingElements.append(Marker) def startTagTableOther(self, token): ignoreEndTag = self.ignoreEndTagTr() self.endTagTr(impliedTagToken("tr")) # XXX how are we sure it's always ignored in the innerHTML case? if not ignoreEndTag: return token def startTagOther(self, token): return self.parser.phases["inTable"].processStartTag(token) def endTagTr(self, token): if not self.ignoreEndTagTr(): self.clearStackToTableRowContext() self.tree.openElements.pop() self.parser.phase = self.parser.phases["inTableBody"] else: # innerHTML case assert self.parser.innerHTML self.parser.parseError() def endTagTable(self, token): ignoreEndTag = self.ignoreEndTagTr() self.endTagTr(impliedTagToken("tr")) # Reprocess the current tag if the tr end tag was not ignored # XXX how are we sure it's always ignored in the innerHTML case? if not ignoreEndTag: return token def endTagTableRowGroup(self, token): if self.tree.elementInScope(token["name"], variant="table"): self.endTagTr(impliedTagToken("tr")) return token else: self.parser.parseError() def endTagIgnore(self, token): self.parser.parseError("unexpected-end-tag-in-table-row", {"name": token["name"]}) def endTagOther(self, token): return self.parser.phases["inTable"].processEndTag(token) class InCellPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#in-cell def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th", "thead", "tr"), self.startTagTableOther) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ (("td", "th"), self.endTagTableCell), (("body", "caption", "col", "colgroup", "html"), self.endTagIgnore), (("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply) ]) self.endTagHandler.default = self.endTagOther # helper def closeCell(self): if self.tree.elementInScope("td", variant="table"): self.endTagTableCell(impliedTagToken("td")) elif self.tree.elementInScope("th", variant="table"): self.endTagTableCell(impliedTagToken("th")) # the rest def processEOF(self): self.parser.phases["inBody"].processEOF() def processCharacters(self, token): return self.parser.phases["inBody"].processCharacters(token) def startTagTableOther(self, token): if (self.tree.elementInScope("td", variant="table") or self.tree.elementInScope("th", variant="table")): self.closeCell() return token else: # innerHTML case assert self.parser.innerHTML self.parser.parseError() def startTagOther(self, token): return self.parser.phases["inBody"].processStartTag(token) def endTagTableCell(self, token): if self.tree.elementInScope(token["name"], variant="table"): self.tree.generateImpliedEndTags(token["name"]) if self.tree.openElements[-1].name != token["name"]: self.parser.parseError("unexpected-cell-end-tag", {"name": token["name"]}) while True: node = self.tree.openElements.pop() if node.name == token["name"]: break else: self.tree.openElements.pop() self.tree.clearActiveFormattingElements() self.parser.phase = self.parser.phases["inRow"] else: self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) def endTagIgnore(self, token): self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) def endTagImply(self, token): if self.tree.elementInScope(token["name"], variant="table"): self.closeCell() return token else: # sometimes innerHTML case self.parser.parseError() def endTagOther(self, token): return self.parser.phases["inBody"].processEndTag(token) class InSelectPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), ("option", self.startTagOption), ("optgroup", self.startTagOptgroup), ("select", self.startTagSelect), (("input", "keygen", "textarea"), self.startTagInput), ("script", self.startTagScript) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ ("option", self.endTagOption), ("optgroup", self.endTagOptgroup), ("select", self.endTagSelect) ]) self.endTagHandler.default = self.endTagOther # http://www.whatwg.org/specs/web-apps/current-work/#in-select def processEOF(self): if self.tree.openElements[-1].name != "html": self.parser.parseError("eof-in-select") else: assert self.parser.innerHTML def processCharacters(self, token): if token["data"] == "\u0000": return self.tree.insertText(token["data"]) def startTagOption(self, token): # We need to imply </option> if <option> is the current node. if self.tree.openElements[-1].name == "option": self.tree.openElements.pop() self.tree.insertElement(token) def startTagOptgroup(self, token): if self.tree.openElements[-1].name == "option": self.tree.openElements.pop() if self.tree.openElements[-1].name == "optgroup": self.tree.openElements.pop() self.tree.insertElement(token) def startTagSelect(self, token): self.parser.parseError("unexpected-select-in-select") self.endTagSelect(impliedTagToken("select")) def startTagInput(self, token): self.parser.parseError("unexpected-input-in-select") if self.tree.elementInScope("select", variant="select"): self.endTagSelect(impliedTagToken("select")) return token else: assert self.parser.innerHTML def startTagScript(self, token): return self.parser.phases["inHead"].processStartTag(token) def startTagOther(self, token): self.parser.parseError("unexpected-start-tag-in-select", {"name": token["name"]}) def endTagOption(self, token): if self.tree.openElements[-1].name == "option": self.tree.openElements.pop() else: self.parser.parseError("unexpected-end-tag-in-select", {"name": "option"}) def endTagOptgroup(self, token): # </optgroup> implicitly closes <option> if (self.tree.openElements[-1].name == "option" and self.tree.openElements[-2].name == "optgroup"): self.tree.openElements.pop() # It also closes </optgroup> if self.tree.openElements[-1].name == "optgroup": self.tree.openElements.pop() # But nothing else else: self.parser.parseError("unexpected-end-tag-in-select", {"name": "optgroup"}) def endTagSelect(self, token): if self.tree.elementInScope("select", variant="select"): node = self.tree.openElements.pop() while node.name != "select": node = self.tree.openElements.pop() self.parser.resetInsertionMode() else: # innerHTML case assert self.parser.innerHTML self.parser.parseError() def endTagOther(self, token): self.parser.parseError("unexpected-end-tag-in-select", {"name": token["name"]}) class InSelectInTablePhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"), self.startTagTable) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"), self.endTagTable) ]) self.endTagHandler.default = self.endTagOther def processEOF(self): self.parser.phases["inSelect"].processEOF() def processCharacters(self, token): return self.parser.phases["inSelect"].processCharacters(token) def startTagTable(self, token): self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]}) self.endTagOther(impliedTagToken("select")) return token def startTagOther(self, token): return self.parser.phases["inSelect"].processStartTag(token) def endTagTable(self, token): self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]}) if self.tree.elementInScope(token["name"], variant="table"): self.endTagOther(impliedTagToken("select")) return token def endTagOther(self, token): return self.parser.phases["inSelect"].processEndTag(token) class InForeignContentPhase(Phase): breakoutElements = frozenset(["b", "big", "blockquote", "body", "br", "center", "code", "dd", "div", "dl", "dt", "em", "embed", "h1", "h2", "h3", "h4", "h5", "h6", "head", "hr", "i", "img", "li", "listing", "menu", "meta", "nobr", "ol", "p", "pre", "ruby", "s", "small", "span", "strong", "strike", "sub", "sup", "table", "tt", "u", "ul", "var"]) def __init__(self, parser, tree): Phase.__init__(self, parser, tree) def adjustSVGTagNames(self, token): replacements = {"altglyph": "altGlyph", "altglyphdef": "altGlyphDef", "altglyphitem": "altGlyphItem", "animatecolor": "animateColor", "animatemotion": "animateMotion", "animatetransform": "animateTransform", "clippath": "clipPath", "feblend": "feBlend", "fecolormatrix": "feColorMatrix", "fecomponenttransfer": "feComponentTransfer", "fecomposite": "feComposite", "feconvolvematrix": "feConvolveMatrix", "fediffuselighting": "feDiffuseLighting", "fedisplacementmap": "feDisplacementMap", "fedistantlight": "feDistantLight", "feflood": "feFlood", "fefunca": "feFuncA", "fefuncb": "feFuncB", "fefuncg": "feFuncG", "fefuncr": "feFuncR", "fegaussianblur": "feGaussianBlur", "feimage": "feImage", "femerge": "feMerge", "femergenode": "feMergeNode", "femorphology": "feMorphology", "feoffset": "feOffset", "fepointlight": "fePointLight", "fespecularlighting": "feSpecularLighting", "fespotlight": "feSpotLight", "fetile": "feTile", "feturbulence": "feTurbulence", "foreignobject": "foreignObject", "glyphref": "glyphRef", "lineargradient": "linearGradient", "radialgradient": "radialGradient", "textpath": "textPath"} if token["name"] in replacements: token["name"] = replacements[token["name"]] def processCharacters(self, token): if token["data"] == "\u0000": token["data"] = "\uFFFD" elif (self.parser.framesetOK and any(char not in spaceCharacters for char in token["data"])): self.parser.framesetOK = False Phase.processCharacters(self, token) def processStartTag(self, token): currentNode = self.tree.openElements[-1] if (token["name"] in self.breakoutElements or (token["name"] == "font" and set(token["data"].keys()) & set(["color", "face", "size"]))): self.parser.parseError("unexpected-html-element-in-foreign-content", {"name": token["name"]}) while (self.tree.openElements[-1].namespace != self.tree.defaultNamespace and not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])): self.tree.openElements.pop() return token else: if currentNode.namespace == namespaces["mathml"]: self.parser.adjustMathMLAttributes(token) elif currentNode.namespace == namespaces["svg"]: self.adjustSVGTagNames(token) self.parser.adjustSVGAttributes(token) self.parser.adjustForeignAttributes(token) token["namespace"] = currentNode.namespace self.tree.insertElement(token) if token["selfClosing"]: self.tree.openElements.pop() token["selfClosingAcknowledged"] = True def processEndTag(self, token): nodeIndex = len(self.tree.openElements) - 1 node = self.tree.openElements[-1] if node.name != token["name"]: self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) while True: if node.name.translate(asciiUpper2Lower) == token["name"]: # XXX this isn't in the spec but it seems necessary if self.parser.phase == self.parser.phases["inTableText"]: self.parser.phase.flushCharacters() self.parser.phase = self.parser.phase.originalPhase while self.tree.openElements.pop() != node: assert self.tree.openElements new_token = None break nodeIndex -= 1 node = self.tree.openElements[nodeIndex] if node.namespace != self.tree.defaultNamespace: continue else: new_token = self.parser.phase.processEndTag(token) break return new_token class AfterBodyPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([("html", self.endTagHtml)]) self.endTagHandler.default = self.endTagOther def processEOF(self): # Stop parsing pass def processComment(self, token): # This is needed because data is to be appended to the <html> element # here and not to whatever is currently open. self.tree.insertComment(token, self.tree.openElements[0]) def processCharacters(self, token): self.parser.parseError("unexpected-char-after-body") self.parser.phase = self.parser.phases["inBody"] return token def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagOther(self, token): self.parser.parseError("unexpected-start-tag-after-body", {"name": token["name"]}) self.parser.phase = self.parser.phases["inBody"] return token def endTagHtml(self, name): if self.parser.innerHTML: self.parser.parseError("unexpected-end-tag-after-body-innerhtml") else: self.parser.phase = self.parser.phases["afterAfterBody"] def endTagOther(self, token): self.parser.parseError("unexpected-end-tag-after-body", {"name": token["name"]}) self.parser.phase = self.parser.phases["inBody"] return token class InFramesetPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#in-frameset def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), ("frameset", self.startTagFrameset), ("frame", self.startTagFrame), ("noframes", self.startTagNoframes) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ ("frameset", self.endTagFrameset) ]) self.endTagHandler.default = self.endTagOther def processEOF(self): if self.tree.openElements[-1].name != "html": self.parser.parseError("eof-in-frameset") else: assert self.parser.innerHTML def processCharacters(self, token): self.parser.parseError("unexpected-char-in-frameset") def startTagFrameset(self, token): self.tree.insertElement(token) def startTagFrame(self, token): self.tree.insertElement(token) self.tree.openElements.pop() def startTagNoframes(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagOther(self, token): self.parser.parseError("unexpected-start-tag-in-frameset", {"name": token["name"]}) def endTagFrameset(self, token): if self.tree.openElements[-1].name == "html": # innerHTML case self.parser.parseError("unexpected-frameset-in-frameset-innerhtml") else: self.tree.openElements.pop() if (not self.parser.innerHTML and self.tree.openElements[-1].name != "frameset"): # If we're not in innerHTML mode and the the current node is not a # "frameset" element (anymore) then switch. self.parser.phase = self.parser.phases["afterFrameset"] def endTagOther(self, token): self.parser.parseError("unexpected-end-tag-in-frameset", {"name": token["name"]}) class AfterFramesetPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#after3 def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), ("noframes", self.startTagNoframes) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ ("html", self.endTagHtml) ]) self.endTagHandler.default = self.endTagOther def processEOF(self): # Stop parsing pass def processCharacters(self, token): self.parser.parseError("unexpected-char-after-frameset") def startTagNoframes(self, token): return self.parser.phases["inHead"].processStartTag(token) def startTagOther(self, token): self.parser.parseError("unexpected-start-tag-after-frameset", {"name": token["name"]}) def endTagHtml(self, token): self.parser.phase = self.parser.phases["afterAfterFrameset"] def endTagOther(self, token): self.parser.parseError("unexpected-end-tag-after-frameset", {"name": token["name"]}) class AfterAfterBodyPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml) ]) self.startTagHandler.default = self.startTagOther def processEOF(self): pass def processComment(self, token): self.tree.insertComment(token, self.tree.document) def processSpaceCharacters(self, token): return self.parser.phases["inBody"].processSpaceCharacters(token) def processCharacters(self, token): self.parser.parseError("expected-eof-but-got-char") self.parser.phase = self.parser.phases["inBody"] return token def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagOther(self, token): self.parser.parseError("expected-eof-but-got-start-tag", {"name": token["name"]}) self.parser.phase = self.parser.phases["inBody"] return token def processEndTag(self, token): self.parser.parseError("expected-eof-but-got-end-tag", {"name": token["name"]}) self.parser.phase = self.parser.phases["inBody"] return token class AfterAfterFramesetPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), ("noframes", self.startTagNoFrames) ]) self.startTagHandler.default = self.startTagOther def processEOF(self): pass def processComment(self, token): self.tree.insertComment(token, self.tree.document) def processSpaceCharacters(self, token): return self.parser.phases["inBody"].processSpaceCharacters(token) def processCharacters(self, token): self.parser.parseError("expected-eof-but-got-char") def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagNoFrames(self, token): return self.parser.phases["inHead"].processStartTag(token) def startTagOther(self, token): self.parser.parseError("expected-eof-but-got-start-tag", {"name": token["name"]}) def processEndTag(self, token): self.parser.parseError("expected-eof-but-got-end-tag", {"name": token["name"]}) return { "initial": InitialPhase, "beforeHtml": BeforeHtmlPhase, "beforeHead": BeforeHeadPhase, "inHead": InHeadPhase, # XXX "inHeadNoscript": InHeadNoScriptPhase, "afterHead": AfterHeadPhase, "inBody": InBodyPhase, "text": TextPhase, "inTable": InTablePhase, "inTableText": InTableTextPhase, "inCaption": InCaptionPhase, "inColumnGroup": InColumnGroupPhase, "inTableBody": InTableBodyPhase, "inRow": InRowPhase, "inCell": InCellPhase, "inSelect": InSelectPhase, "inSelectInTable": InSelectInTablePhase, "inForeignContent": InForeignContentPhase, "afterBody": AfterBodyPhase, "inFrameset": InFramesetPhase, "afterFrameset": AfterFramesetPhase, "afterAfterBody": AfterAfterBodyPhase, "afterAfterFrameset": AfterAfterFramesetPhase, # XXX after after frameset } def impliedTagToken(name, type="EndTag", attributes=None, selfClosing=False): if attributes is None: attributes = {} return {"type": tokenTypes[type], "name": name, "data": attributes, "selfClosing": selfClosing} class ParseError(Exception): """Error in parsed document""" pass
psychopenguin/st2contrib
refs/heads/master
packs/reamaze/actions/article_get.py
7
from lib.actions import BaseAction class ArticleGet(BaseAction): def run(self, slug): slug = self._convert_slug(slug) path = '/articles/%s' % slug response = self._api_get(path) return response
aptrishu/coala
refs/heads/master
tests/results/result_actions/ApplyPatchActionTest.py
16
import unittest import os from os.path import isfile from coala_utils.ContextManagers import make_temp from coalib.results.Diff import Diff from coalib.results.Result import Result from coalib.results.result_actions.ApplyPatchAction import ApplyPatchAction from coalib.settings.Section import Section class ApplyPatchActionTest(unittest.TestCase): def test_apply(self): uut = ApplyPatchAction() with make_temp() as f_a, make_temp() as f_b, make_temp() as f_c: file_dict = { f_a: ['1\n', '2\n', '3\n'], f_b: ['1\n', '2\n', '3\n'], f_c: ['1\n', '2\n', '3\n'] } expected_file_dict = { f_a: ['1\n', '3_changed\n'], f_b: ['1\n', '2\n', '3_changed\n'], f_c: ['1\n', '2\n', '3\n'] } file_diff_dict = {} diff = Diff(file_dict[f_a]) diff.delete_line(2) uut.apply_from_section(Result('origin', 'msg', diffs={f_a: diff}), file_dict, file_diff_dict, Section('t')) diff = Diff(file_dict[f_a]) diff.change_line(3, '3\n', '3_changed\n') uut.apply_from_section(Result('origin', 'msg', diffs={f_a: diff}), file_dict, file_diff_dict, Section('t')) diff = Diff(file_dict[f_b]) diff.change_line(3, '3\n', '3_changed\n') uut.apply(Result('origin', 'msg', diffs={f_b: diff}), file_dict, file_diff_dict) for filename in file_diff_dict: file_dict[filename] = file_diff_dict[filename].modified self.assertEqual(file_dict, expected_file_dict) with open(f_a) as fa: self.assertEqual(file_dict[f_a], fa.readlines()) with open(f_b) as fb: self.assertEqual(file_dict[f_b], fb.readlines()) with open(f_c) as fc: # File c is unchanged and should be untouched self.assertEqual([], fc.readlines()) def test_apply_orig_option(self): uut = ApplyPatchAction() with make_temp() as f_a, make_temp() as f_b: file_dict = { f_a: ['1\n', '2\n', '3\n'], f_b: ['1\n', '2\n', '3\n'] } expected_file_dict = { f_a: ['1\n', '2\n', '3_changed\n'], f_b: ['1\n', '2\n', '3_changed\n'] } file_diff_dict = {} diff = Diff(file_dict[f_a]) diff.change_line(3, '3\n', '3_changed\n') uut.apply(Result('origin', 'msg', diffs={f_a: diff}), file_dict, file_diff_dict, no_orig=True) diff = Diff(file_dict[f_b]) diff.change_line(3, '3\n', '3_changed\n') uut.apply(Result('origin', 'msg', diffs={f_b: diff}), file_dict, file_diff_dict, no_orig=False) self.assertFalse(isfile(f_a+'.orig')) self.assertTrue(isfile(f_b+'.orig')) for filename in file_diff_dict: file_dict[filename] = file_diff_dict[filename].modified self.assertEqual(file_dict, expected_file_dict) def test_apply_rename(self): uut = ApplyPatchAction() with make_temp() as f_a: file_dict = {f_a: ['1\n', '2\n', '3\n']} expected_file_dict = {f_a+'.renamed': ['1\n', '2_changed\n', '3_changed\n']} file_diff_dict = {} diff = Diff(file_dict[f_a], rename=f_a+'.renamed') diff.change_line(3, '3\n', '3_changed\n') uut.apply(Result('origin', 'msg', diffs={f_a: diff}), file_dict, file_diff_dict) self.assertTrue(isfile(f_a+'.orig')) self.assertTrue(isfile(f_a+'.renamed')) self.assertFalse(isfile(f_a)) diff = Diff(file_dict[f_a]) diff.change_line(2, '2\n', '2_changed\n') uut.apply(Result('origin', 'msg', diffs={f_a: diff}), file_dict, file_diff_dict) self.assertFalse(isfile(f_a+'.renamed.orig')) with open(f_a+'.renamed') as fh: file_dict = {f_a+'.renamed': fh.readlines()} self.assertEqual(file_dict, expected_file_dict) # Recreate file so that context manager make_temp() can delete it open(f_a, 'w').close() def test_apply_delete(self): uut = ApplyPatchAction() with make_temp() as f_a: file_dict = {f_a: ['1\n', '2\n', '3\n']} file_diff_dict = {} diff = Diff(file_dict[f_a], delete=True) uut.apply(Result('origin', 'msg', diffs={f_a: diff}), file_dict, file_diff_dict) self.assertFalse(isfile(f_a)) self.assertTrue(isfile(f_a+'.orig')) os.remove(f_a+'.orig') diff = Diff(file_dict[f_a]) diff.change_line(3, '3\n', '3_changed\n') uut.apply(Result('origin', 'msg', diffs={f_a: diff}), file_dict, file_diff_dict) self.assertFalse(isfile(f_a+'.orig')) # Recreate file so that context manager make_temp() can delete it open(f_a, 'w').close() def test_is_applicable(self): diff = Diff(['1\n', '2\n', '3\n']) diff.delete_line(2) patch_result = Result('', '', diffs={'f': diff}) self.assertTrue( ApplyPatchAction.is_applicable(patch_result, {}, {})) def test_is_applicable_conflict(self): diff = Diff(['1\n', '2\n', '3\n']) diff.add_lines(2, ['a line']) conflict_result = Result('', '', diffs={'f': diff}) # Applying the same diff twice will result in a conflict self.assertIn( 'Two or more patches conflict with each other: ', ApplyPatchAction.is_applicable(conflict_result, {}, {'f': diff}) ) def test_is_applicable_empty_patch(self): diff = Diff([], rename='new_name') result = Result('', '', diffs={'f': diff}) # Two renames donot result in any change self.assertEqual( ApplyPatchAction.is_applicable(result, {}, {'f': diff}), 'The given patches do not change anything anymore.' ) def test_is_applicable_without_patch(self): result = Result('', '') self.assertEqual( ApplyPatchAction.is_applicable(result, {}, {}), 'This result has no patch attached.' )
kingsdigitallab/tvof-django
refs/heads/master
merge_production_dotenvs_in_dotenv.py
5
import os from pathlib import Path from typing import Sequence import pytest ROOT_DIR_PATH = Path(__file__).parent.resolve() PRODUCTION_DOTENVS_DIR_PATH = ROOT_DIR_PATH / ".envs" / ".production" PRODUCTION_DOTENV_FILE_PATHS = [ PRODUCTION_DOTENVS_DIR_PATH / ".django", PRODUCTION_DOTENVS_DIR_PATH / ".postgres", ] DOTENV_FILE_PATH = ROOT_DIR_PATH / ".env" def merge( output_file_path: str, merged_file_paths: Sequence[str], append_linesep: bool = True ) -> None: with open(output_file_path, "w") as output_file: for merged_file_path in merged_file_paths: with open(merged_file_path, "r") as merged_file: merged_file_content = merged_file.read() output_file.write(merged_file_content) if append_linesep: output_file.write(os.linesep) def main(): merge(DOTENV_FILE_PATH, PRODUCTION_DOTENV_FILE_PATHS) @pytest.mark.parametrize("merged_file_count", range(3)) @pytest.mark.parametrize("append_linesep", [True, False]) def test_merge(tmpdir_factory, merged_file_count: int, append_linesep: bool): tmp_dir_path = Path(str(tmpdir_factory.getbasetemp())) output_file_path = tmp_dir_path / ".env" expected_output_file_content = "" merged_file_paths = [] for i in range(merged_file_count): merged_file_ord = i + 1 merged_filename = ".service{}".format(merged_file_ord) merged_file_path = tmp_dir_path / merged_filename merged_file_content = merged_filename * merged_file_ord with open(merged_file_path, "w+") as file: file.write(merged_file_content) expected_output_file_content += merged_file_content if append_linesep: expected_output_file_content += os.linesep merged_file_paths.append(merged_file_path) merge(output_file_path, merged_file_paths, append_linesep) with open(output_file_path, "r") as output_file: actual_output_file_content = output_file.read() assert actual_output_file_content == expected_output_file_content if __name__ == "__main__": main()
JioCloud/cinder
refs/heads/master
cinder/cmd/manage.py
2
#!/usr/bin/env python # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Interactive shell based on Django: # # Copyright (c) 2005, the Lawrence Journal-World # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of Django nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ CLI interface for cinder management. """ from __future__ import print_function import os import sys from oslo_config import cfg from oslo_db.sqlalchemy import migration from oslo_log import log as logging import oslo_messaging as messaging from oslo_utils import uuidutils from cinder import i18n i18n.enable_lazy() # Need to register global_opts from cinder.common import config # noqa from cinder import context from cinder import db from cinder.db import migration as db_migration from cinder.db.sqlalchemy import api as db_api from cinder.i18n import _ from cinder import objects from cinder import rpc from cinder import utils from cinder import version from cinder.volume import utils as vutils CONF = cfg.CONF # Decorators for actions def args(*args, **kwargs): def _decorator(func): func.__dict__.setdefault('args', []).insert(0, (args, kwargs)) return func return _decorator def param2id(object_id): """Helper function to convert various id types to internal id. :param object_id: e.g. 'vol-0000000a' or 'volume-0000000a' or '10' """ if uuidutils.is_uuid_like(object_id): return object_id elif '-' in object_id: # FIXME(ja): mapping occurs in nova? pass else: try: return int(object_id) except ValueError: return object_id class ShellCommands(object): def bpython(self): """Runs a bpython shell. Falls back to Ipython/python shell if unavailable """ self.run('bpython') def ipython(self): """Runs an Ipython shell. Falls back to Python shell if unavailable """ self.run('ipython') def python(self): """Runs a python shell. Falls back to Python shell if unavailable """ self.run('python') @args('--shell', dest="shell", metavar='<bpython|ipython|python>', help='Python shell') def run(self, shell=None): """Runs a Python interactive interpreter.""" if not shell: shell = 'bpython' if shell == 'bpython': try: import bpython bpython.embed() except ImportError: shell = 'ipython' if shell == 'ipython': try: from IPython import embed embed() except ImportError: try: # Ipython < 0.11 # Explicitly pass an empty list as arguments, because # otherwise IPython would use sys.argv from this script. import IPython shell = IPython.Shell.IPShell(argv=[]) shell.mainloop() except ImportError: # no IPython module shell = 'python' if shell == 'python': import code try: # Try activating rlcompleter, because it's handy. import readline except ImportError: pass else: # We don't have to wrap the following import in a 'try', # because we already know 'readline' was imported successfully. import rlcompleter # noqa readline.parse_and_bind("tab:complete") code.interact() @args('--path', required=True, help='Script path') def script(self, path): """Runs the script from the specified path with flags set properly.""" exec(compile(open(path).read(), path, 'exec'), locals(), globals()) def _db_error(caught_exception): print('%s' % caught_exception) print(_("The above error may show that the database has not " "been created.\nPlease create a database using " "'cinder-manage db sync' before running this command.")) exit(1) class HostCommands(object): """List hosts.""" @args('zone', nargs='?', default=None, help='Availability Zone (default: %(default)s)') def list(self, zone=None): """Show a list of all physical hosts. Can be filtered by zone. args: [zone] """ print(_("%(host)-25s\t%(zone)-15s") % {'host': 'host', 'zone': 'zone'}) ctxt = context.get_admin_context() services = db.service_get_all(ctxt) if zone: services = [s for s in services if s['availability_zone'] == zone] hosts = [] for srv in services: if not [h for h in hosts if h['host'] == srv['host']]: hosts.append(srv) for h in hosts: print(_("%(host)-25s\t%(availability_zone)-15s") % {'host': h['host'], 'availability_zone': h['availability_zone']}) class DbCommands(object): """Class for managing the database.""" def __init__(self): pass @args('version', nargs='?', default=None, help='Database version') def sync(self, version=None): """Sync the database up to the most recent version.""" return db_migration.db_sync(version) def version(self): """Print the current database version.""" print(migration.db_version(db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, db_migration.INIT_VERSION)) @args('age_in_days', type=int, help='Purge deleted rows older than age in days') def purge(self, age_in_days): """Purge deleted rows older than a given age from cinder tables.""" age_in_days = int(age_in_days) if age_in_days <= 0: print(_("Must supply a positive, non-zero value for age")) exit(1) ctxt = context.get_admin_context() db.purge_deleted_rows(ctxt, age_in_days) class VersionCommands(object): """Class for exposing the codebase version.""" def __init__(self): pass def list(self): print(version.version_string()) def __call__(self): self.list() class VolumeCommands(object): """Methods for dealing with a cloud in an odd state.""" def __init__(self): self._client = None def rpc_client(self): if self._client is None: if not rpc.initialized(): rpc.init(CONF) target = messaging.Target(topic=CONF.volume_topic) serializer = objects.base.CinderObjectSerializer() self._client = rpc.get_client(target, serializer=serializer) return self._client @args('volume_id', help='Volume ID to be deleted') def delete(self, volume_id): """Delete a volume, bypassing the check that it must be available.""" ctxt = context.get_admin_context() volume = db.volume_get(ctxt, param2id(volume_id)) host = vutils.extract_host(volume['host']) if volume['host'] else None if not host: print(_("Volume not yet assigned to host.")) print(_("Deleting volume from database and skipping rpc.")) db.volume_destroy(ctxt, param2id(volume_id)) return if volume['status'] == 'in-use': print(_("Volume is in-use.")) print(_("Detach volume from instance and then try again.")) return cctxt = self.rpc_client().prepare(server=host) cctxt.cast(ctxt, "delete_volume", volume_id=volume['id']) @args('--currenthost', required=True, help='Existing volume host name') @args('--newhost', required=True, help='New volume host name') def update_host(self, currenthost, newhost): """Modify the host name associated with a volume. Particularly to recover from cases where one has moved their Cinder Volume node, or modified their backend_name in a multi-backend config. """ ctxt = context.get_admin_context() volumes = db.volume_get_all_by_host(ctxt, currenthost) for v in volumes: db.volume_update(ctxt, v['id'], {'host': newhost}) class ConfigCommands(object): """Class for exposing the flags defined by flag_file(s).""" def __init__(self): pass @args('param', nargs='?', default=None, help='Configuration parameter to display (default: %(default)s)') def list(self, param=None): """List parameters configured for cinder. Lists all parameters configured for cinder unless an optional argument is specified. If the parameter is specified we only print the requested parameter. If the parameter is not found an appropriate error is produced by .get*(). """ param = param and param.strip() if param: print('%s = %s' % (param, CONF.get(param))) else: for key, value in CONF.items(): print('%s = %s' % (key, value)) class GetLogCommands(object): """Get logging information.""" def errors(self): """Get all of the errors from the log files.""" error_found = 0 if CONF.log_dir: logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')] for file in logs: log_file = os.path.join(CONF.log_dir, file) lines = [line.strip() for line in open(log_file, "r")] lines.reverse() print_name = 0 for index, line in enumerate(lines): if line.find(" ERROR ") > 0: error_found += 1 if print_name == 0: print(log_file + ":-") print_name = 1 print(_("Line %(dis)d : %(line)s") % {'dis': len(lines) - index, 'line': line}) if error_found == 0: print(_("No errors in logfiles!")) @args('num_entries', nargs='?', type=int, default=10, help='Number of entries to list (default: %(default)d)') def syslog(self, num_entries=10): """Get <num_entries> of the cinder syslog events.""" entries = int(num_entries) count = 0 log_file = '' if os.path.exists('/var/log/syslog'): log_file = '/var/log/syslog' elif os.path.exists('/var/log/messages'): log_file = '/var/log/messages' else: print(_("Unable to find system log file!")) sys.exit(1) lines = [line.strip() for line in open(log_file, "r")] lines.reverse() print(_("Last %s cinder syslog entries:-") % (entries)) for line in lines: if line.find("cinder") > 0: count += 1 print(_("%s") % (line)) if count == entries: break if count == 0: print(_("No cinder entries in syslog!")) class BackupCommands(object): """Methods for managing backups.""" def list(self): """List all backups. List all backups (including ones in progress) and the host on which the backup operation is running. """ ctxt = context.get_admin_context() backups = objects.BackupList.get_all(ctxt) hdr = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s\t%-12s" print(hdr % (_('ID'), _('User ID'), _('Project ID'), _('Host'), _('Name'), _('Container'), _('Status'), _('Size'), _('Object Count'))) res = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12d\t%-12d" for backup in backups: object_count = 0 if backup['object_count'] is not None: object_count = backup['object_count'] print(res % (backup['id'], backup['user_id'], backup['project_id'], backup['host'], backup['display_name'], backup['container'], backup['status'], backup['size'], object_count)) class ServiceCommands(object): """Methods for managing services.""" def list(self): """Show a list of all cinder services.""" ctxt = context.get_admin_context() services = db.service_get_all(ctxt) print_format = "%-16s %-36s %-16s %-10s %-5s %-10s" print(print_format % (_('Binary'), _('Host'), _('Zone'), _('Status'), _('State'), _('Updated At'))) for svc in services: alive = utils.service_is_up(svc) art = ":-)" if alive else "XXX" status = 'enabled' if svc['disabled']: status = 'disabled' print(print_format % (svc['binary'], svc['host'].partition('.')[0], svc['availability_zone'], status, art, svc['updated_at'])) CATEGORIES = { 'backup': BackupCommands, 'config': ConfigCommands, 'db': DbCommands, 'host': HostCommands, 'logs': GetLogCommands, 'service': ServiceCommands, 'shell': ShellCommands, 'version': VersionCommands, 'volume': VolumeCommands, } def methods_of(obj): """Return non-private methods from an object. Get all callable methods of an object that don't start with underscore :return: a list of tuples of the form (method_name, method) """ result = [] for i in dir(obj): if callable(getattr(obj, i)) and not i.startswith('_'): result.append((i, getattr(obj, i))) return result def add_command_parsers(subparsers): for category in CATEGORIES: command_object = CATEGORIES[category]() parser = subparsers.add_parser(category) parser.set_defaults(command_object=command_object) category_subparsers = parser.add_subparsers(dest='action') for (action, action_fn) in methods_of(command_object): parser = category_subparsers.add_parser(action) action_kwargs = [] for args, kwargs in getattr(action_fn, 'args', []): parser.add_argument(*args, **kwargs) parser.set_defaults(action_fn=action_fn) parser.set_defaults(action_kwargs=action_kwargs) category_opt = cfg.SubCommandOpt('category', title='Command categories', handler=add_command_parsers) def get_arg_string(args): arg = None if args[0] == '-': # (Note)zhiteng: args starts with FLAGS.oparser.prefix_chars # is optional args. Notice that cfg module takes care of # actual ArgParser so prefix_chars is always '-'. if args[1] == '-': # This is long optional arg arg = args[2:] else: arg = args[1:] else: arg = args return arg def fetch_func_args(func): fn_args = [] for args, kwargs in getattr(func, 'args', []): arg = get_arg_string(args[0]) fn_args.append(getattr(CONF.category, arg)) return fn_args def main(): objects.register_all() """Parse options and call the appropriate class/method.""" CONF.register_cli_opt(category_opt) script_name = sys.argv[0] if len(sys.argv) < 2: print(_("\nOpenStack Cinder version: %(version)s\n") % {'version': version.version_string()}) print(script_name + " category action [<args>]") print(_("Available categories:")) for category in CATEGORIES: print(_("\t%s") % category) sys.exit(2) try: CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.setup(CONF, "cinder") except cfg.ConfigDirNotFoundError as details: print(_("Invalid directory: %s") % details) sys.exit(2) except cfg.ConfigFilesNotFoundError: cfgfile = CONF.config_file[-1] if CONF.config_file else None if cfgfile and not os.access(cfgfile, os.R_OK): st = os.stat(cfgfile) print(_("Could not read %s. Re-running with sudo") % cfgfile) try: os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv) except Exception: print(_('sudo failed, continuing as if nothing happened')) print(_('Please re-run cinder-manage as root.')) sys.exit(2) fn = CONF.category.action_fn fn_args = fetch_func_args(fn) fn(*fn_args)
ChristianKniep/QNIB
refs/heads/master
serverfiles/usr/local/lib/networkx-1.6/build/lib/networkx/external/decorator/__init__.py
13
""" Hack for including decorator-3.3.1 in NetworkX. """ import sys if sys.version >= '3': from ._decorator3 import * _decorator = _decorator3 else: from ._decorator import *
mbayon/TFG-MachineLearning
refs/heads/master
vbig/lib/python2.7/site-packages/pandas/tests/indexes/period/test_asfreq.py
15
import pytest import numpy as np import pandas as pd from pandas.util import testing as tm from pandas import PeriodIndex, Series, DataFrame class TestPeriodIndex(object): def setup_method(self, method): pass def test_asfreq(self): pi1 = PeriodIndex(freq='A', start='1/1/2001', end='1/1/2001') pi2 = PeriodIndex(freq='Q', start='1/1/2001', end='1/1/2001') pi3 = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2001') pi4 = PeriodIndex(freq='D', start='1/1/2001', end='1/1/2001') pi5 = PeriodIndex(freq='H', start='1/1/2001', end='1/1/2001 00:00') pi6 = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 00:00') pi7 = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 00:00:00') assert pi1.asfreq('Q', 'S') == pi2 assert pi1.asfreq('Q', 's') == pi2 assert pi1.asfreq('M', 'start') == pi3 assert pi1.asfreq('D', 'StarT') == pi4 assert pi1.asfreq('H', 'beGIN') == pi5 assert pi1.asfreq('Min', 'S') == pi6 assert pi1.asfreq('S', 'S') == pi7 assert pi2.asfreq('A', 'S') == pi1 assert pi2.asfreq('M', 'S') == pi3 assert pi2.asfreq('D', 'S') == pi4 assert pi2.asfreq('H', 'S') == pi5 assert pi2.asfreq('Min', 'S') == pi6 assert pi2.asfreq('S', 'S') == pi7 assert pi3.asfreq('A', 'S') == pi1 assert pi3.asfreq('Q', 'S') == pi2 assert pi3.asfreq('D', 'S') == pi4 assert pi3.asfreq('H', 'S') == pi5 assert pi3.asfreq('Min', 'S') == pi6 assert pi3.asfreq('S', 'S') == pi7 assert pi4.asfreq('A', 'S') == pi1 assert pi4.asfreq('Q', 'S') == pi2 assert pi4.asfreq('M', 'S') == pi3 assert pi4.asfreq('H', 'S') == pi5 assert pi4.asfreq('Min', 'S') == pi6 assert pi4.asfreq('S', 'S') == pi7 assert pi5.asfreq('A', 'S') == pi1 assert pi5.asfreq('Q', 'S') == pi2 assert pi5.asfreq('M', 'S') == pi3 assert pi5.asfreq('D', 'S') == pi4 assert pi5.asfreq('Min', 'S') == pi6 assert pi5.asfreq('S', 'S') == pi7 assert pi6.asfreq('A', 'S') == pi1 assert pi6.asfreq('Q', 'S') == pi2 assert pi6.asfreq('M', 'S') == pi3 assert pi6.asfreq('D', 'S') == pi4 assert pi6.asfreq('H', 'S') == pi5 assert pi6.asfreq('S', 'S') == pi7 assert pi7.asfreq('A', 'S') == pi1 assert pi7.asfreq('Q', 'S') == pi2 assert pi7.asfreq('M', 'S') == pi3 assert pi7.asfreq('D', 'S') == pi4 assert pi7.asfreq('H', 'S') == pi5 assert pi7.asfreq('Min', 'S') == pi6 pytest.raises(ValueError, pi7.asfreq, 'T', 'foo') result1 = pi1.asfreq('3M') result2 = pi1.asfreq('M') expected = PeriodIndex(freq='M', start='2001-12', end='2001-12') tm.assert_numpy_array_equal(result1.asi8, expected.asi8) assert result1.freqstr == '3M' tm.assert_numpy_array_equal(result2.asi8, expected.asi8) assert result2.freqstr == 'M' def test_asfreq_nat(self): idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M') result = idx.asfreq(freq='Q') expected = PeriodIndex(['2011Q1', '2011Q1', 'NaT', '2011Q2'], freq='Q') tm.assert_index_equal(result, expected) def test_asfreq_mult_pi(self): pi = PeriodIndex(['2001-01', '2001-02', 'NaT', '2001-03'], freq='2M') for freq in ['D', '3D']: result = pi.asfreq(freq) exp = PeriodIndex(['2001-02-28', '2001-03-31', 'NaT', '2001-04-30'], freq=freq) tm.assert_index_equal(result, exp) assert result.freq == exp.freq result = pi.asfreq(freq, how='S') exp = PeriodIndex(['2001-01-01', '2001-02-01', 'NaT', '2001-03-01'], freq=freq) tm.assert_index_equal(result, exp) assert result.freq == exp.freq def test_asfreq_combined_pi(self): pi = pd.PeriodIndex(['2001-01-01 00:00', '2001-01-02 02:00', 'NaT'], freq='H') exp = PeriodIndex(['2001-01-01 00:00', '2001-01-02 02:00', 'NaT'], freq='25H') for freq, how in zip(['1D1H', '1H1D'], ['S', 'E']): result = pi.asfreq(freq, how=how) tm.assert_index_equal(result, exp) assert result.freq == exp.freq for freq in ['1D1H', '1H1D']: pi = pd.PeriodIndex(['2001-01-01 00:00', '2001-01-02 02:00', 'NaT'], freq=freq) result = pi.asfreq('H') exp = PeriodIndex(['2001-01-02 00:00', '2001-01-03 02:00', 'NaT'], freq='H') tm.assert_index_equal(result, exp) assert result.freq == exp.freq pi = pd.PeriodIndex(['2001-01-01 00:00', '2001-01-02 02:00', 'NaT'], freq=freq) result = pi.asfreq('H', how='S') exp = PeriodIndex(['2001-01-01 00:00', '2001-01-02 02:00', 'NaT'], freq='H') tm.assert_index_equal(result, exp) assert result.freq == exp.freq def test_asfreq_ts(self): index = PeriodIndex(freq='A', start='1/1/2001', end='12/31/2010') ts = Series(np.random.randn(len(index)), index=index) df = DataFrame(np.random.randn(len(index), 3), index=index) result = ts.asfreq('D', how='end') df_result = df.asfreq('D', how='end') exp_index = index.asfreq('D', how='end') assert len(result) == len(ts) tm.assert_index_equal(result.index, exp_index) tm.assert_index_equal(df_result.index, exp_index) result = ts.asfreq('D', how='start') assert len(result) == len(ts) tm.assert_index_equal(result.index, index.asfreq('D', how='start')) def test_astype_asfreq(self): pi1 = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01'], freq='D') exp = PeriodIndex(['2011-01', '2011-02', '2011-03'], freq='M') tm.assert_index_equal(pi1.asfreq('M'), exp) tm.assert_index_equal(pi1.astype('period[M]'), exp) exp = PeriodIndex(['2011-01', '2011-02', '2011-03'], freq='3M') tm.assert_index_equal(pi1.asfreq('3M'), exp) tm.assert_index_equal(pi1.astype('period[3M]'), exp)
Lujeni/ansible
refs/heads/devel
lib/ansible/modules/cloud/google/gcp_bigquery_table_info.py
7
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2017 Google # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # ---------------------------------------------------------------------------- # # *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** # # ---------------------------------------------------------------------------- # # This file is automatically generated by Magic Modules and manual # changes will be clobbered when the file is regenerated. # # Please read more about how to change this file at # https://www.github.com/GoogleCloudPlatform/magic-modules # # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function __metaclass__ = type ################################################################################ # Documentation ################################################################################ ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gcp_bigquery_table_info description: - Gather info for GCP Table short_description: Gather info for GCP Table version_added: '2.8' author: Google Inc. (@googlecloudplatform) requirements: - python >= 2.6 - requests >= 2.18.4 - google-auth >= 1.3.0 options: dataset: description: - Name of the dataset. required: false type: str project: description: - The Google Cloud Platform project to use. type: str auth_kind: description: - The type of credential used. type: str required: true choices: - application - machineaccount - serviceaccount service_account_contents: description: - The contents of a Service Account JSON file, either in a dictionary or as a JSON string that represents it. type: jsonarg service_account_file: description: - The path of a Service Account JSON file if serviceaccount is selected as type. type: path service_account_email: description: - An optional service account email address if machineaccount is selected and the user does not wish to use the default email. type: str scopes: description: - Array of scopes to be used type: list env_type: description: - Specifies which Ansible environment you're running this module within. - This should not be set unless you know what you're doing. - This only alters the User Agent string for any API requests. type: str notes: - for authentication, you can set service_account_file using the C(gcp_service_account_file) env variable. - for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) env variable. - For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) env variable. - For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. - For authentication, you can set scopes using the C(GCP_SCOPES) env variable. - Environment variables values will only be used if the playbook values are not set. - The I(service_account_email) and I(service_account_file) options are mutually exclusive. ''' EXAMPLES = ''' - name: get info on a table gcp_bigquery_table_info: dataset: example_dataset project: test_project auth_kind: serviceaccount service_account_file: "/tmp/auth.pem" ''' RETURN = ''' resources: description: List of resources returned: always type: complex contains: tableReference: description: - Reference describing the ID of this table. returned: success type: complex contains: datasetId: description: - The ID of the dataset containing this table. returned: success type: str projectId: description: - The ID of the project containing this table. returned: success type: str tableId: description: - The ID of the the table. returned: success type: str clustering: description: - One or more fields on which data should be clustered. Only top-level, non-repeated, simple-type fields are supported. When you cluster a table using multiple columns, the order of columns you specify is important. The order of the specified columns determines the sort order of the data. returned: success type: list creationTime: description: - The time when this dataset was created, in milliseconds since the epoch. returned: success type: int description: description: - A user-friendly description of the dataset. returned: success type: str friendlyName: description: - A descriptive name for this table. returned: success type: str id: description: - An opaque ID uniquely identifying the table. returned: success type: str labels: description: - The labels associated with this dataset. You can use these to organize and group your datasets . returned: success type: dict lastModifiedTime: description: - The time when this table was last modified, in milliseconds since the epoch. returned: success type: int location: description: - The geographic location where the table resides. This value is inherited from the dataset. returned: success type: str name: description: - Name of the table. returned: success type: str numBytes: description: - The size of this table in bytes, excluding any data in the streaming buffer. returned: success type: int numLongTermBytes: description: - The number of bytes in the table that are considered "long-term storage". returned: success type: int numRows: description: - The number of rows of data in this table, excluding any data in the streaming buffer. returned: success type: int requirePartitionFilter: description: - If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. returned: success type: bool type: description: - Describes the table type. returned: success type: str view: description: - The view definition. returned: success type: complex contains: useLegacySql: description: - Specifies whether to use BigQuery's legacy SQL for this view . returned: success type: bool userDefinedFunctionResources: description: - Describes user-defined function resources used in the query. returned: success type: complex contains: inlineCode: description: - An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code. returned: success type: str resourceUri: description: - A code resource to load from a Google Cloud Storage URI (gs://bucket/path). returned: success type: str timePartitioning: description: - If specified, configures time-based partitioning for this table. returned: success type: complex contains: expirationMs: description: - Number of milliseconds for which to keep the storage for a partition. returned: success type: int field: description: - If not set, the table is partitioned by pseudo column, referenced via either '_PARTITIONTIME' as TIMESTAMP type, or '_PARTITIONDATE' as DATE type. If field is specified, the table is instead partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. returned: success type: str type: description: - The only type supported is DAY, which will generate one partition per day. returned: success type: str streamingBuffer: description: - Contains information regarding this table's streaming buffer, if one is present. This field will be absent if the table is not being streamed to or if there is no data in the streaming buffer. returned: success type: complex contains: estimatedBytes: description: - A lower-bound estimate of the number of bytes currently in the streaming buffer. returned: success type: int estimatedRows: description: - A lower-bound estimate of the number of rows currently in the streaming buffer. returned: success type: int oldestEntryTime: description: - Contains the timestamp of the oldest entry in the streaming buffer, in milliseconds since the epoch, if the streaming buffer is available. returned: success type: int schema: description: - Describes the schema of this table. returned: success type: complex contains: fields: description: - Describes the fields in a table. returned: success type: complex contains: description: description: - The field description. The maximum length is 1,024 characters. returned: success type: str fields: description: - Describes the nested schema fields if the type property is set to RECORD. returned: success type: list mode: description: - The field mode. returned: success type: str name: description: - The field name. returned: success type: str type: description: - The field data type. returned: success type: str encryptionConfiguration: description: - Custom encryption configuration. returned: success type: complex contains: kmsKeyName: description: - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. returned: success type: str expirationTime: description: - The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. returned: success type: int externalDataConfiguration: description: - Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. returned: success type: complex contains: autodetect: description: - Try to detect schema and format options automatically. Any option specified explicitly will be honored. returned: success type: bool compression: description: - The compression type of the data source. returned: success type: str ignoreUnknownValues: description: - Indicates if BigQuery should allow extra values that are not represented in the table schema . returned: success type: bool maxBadRecords: description: - The maximum number of bad records that BigQuery can ignore when reading data . returned: success type: int sourceFormat: description: - The data format. returned: success type: str sourceUris: description: - The fully-qualified URIs that point to your data in Google Cloud. - 'For Google Cloud Storage URIs: Each URI can contain one ''*'' wildcard character and it must come after the ''bucket'' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups, exactly one URI can be specified. Also, the ''*'' wildcard character is not allowed.' returned: success type: list schema: description: - The schema for the data. Schema is required for CSV and JSON formats. returned: success type: complex contains: fields: description: - Describes the fields in a table. returned: success type: complex contains: description: description: - The field description. returned: success type: str fields: description: - Describes the nested schema fields if the type property is set to RECORD . returned: success type: list mode: description: - Field mode. returned: success type: str name: description: - Field name. returned: success type: str type: description: - Field data type. returned: success type: str googleSheetsOptions: description: - Additional options if sourceFormat is set to GOOGLE_SHEETS. returned: success type: complex contains: skipLeadingRows: description: - The number of rows at the top of a Google Sheet that BigQuery will skip when reading the data. returned: success type: int csvOptions: description: - Additional properties to set if sourceFormat is set to CSV. returned: success type: complex contains: allowJaggedRows: description: - Indicates if BigQuery should accept rows that are missing trailing optional columns . returned: success type: bool allowQuotedNewlines: description: - Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file . returned: success type: bool encoding: description: - The character encoding of the data. returned: success type: str fieldDelimiter: description: - The separator for fields in a CSV file. returned: success type: str quote: description: - The value that is used to quote data sections in a CSV file. returned: success type: str skipLeadingRows: description: - The number of rows at the top of a CSV file that BigQuery will skip when reading the data. returned: success type: int bigtableOptions: description: - Additional options if sourceFormat is set to BIGTABLE. returned: success type: complex contains: ignoreUnspecifiedColumnFamilies: description: - If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema . returned: success type: bool readRowkeyAsString: description: - If field is true, then the rowkey column families will be read and converted to string. returned: success type: bool columnFamilies: description: - List of column families to expose in the table schema along with their types. returned: success type: complex contains: columns: description: - Lists of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. returned: success type: complex contains: encoding: description: - The encoding of the values when the type is not STRING. returned: success type: str fieldName: description: - If the qualifier is not a valid BigQuery field identifier, a valid identifier must be provided as the column field name and is used as field name in queries. returned: success type: str onlyReadLatest: description: - If this is set, only the latest version of value in this column are exposed . returned: success type: bool qualifierString: description: - Qualifier of the column. returned: success type: str type: description: - The type to convert the value in cells of this column. returned: success type: str encoding: description: - The encoding of the values when the type is not STRING. returned: success type: str familyId: description: - Identifier of the column family. returned: success type: str onlyReadLatest: description: - If this is set only the latest version of value are exposed for all columns in this column family . returned: success type: bool type: description: - The type to convert the value in cells of this column family. returned: success type: str dataset: description: - Name of the dataset. returned: success type: str ''' ################################################################################ # Imports ################################################################################ from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest import json ################################################################################ # Main ################################################################################ def main(): module = GcpModule(argument_spec=dict(dataset=dict(type='str'))) if not module.params['scopes']: module.params['scopes'] = ['https://www.googleapis.com/auth/bigquery'] return_value = {'resources': fetch_list(module, collection(module))} module.exit_json(**return_value) def collection(module): return "https://www.googleapis.com/bigquery/v2/projects/{project}/datasets/{dataset}/tables".format(**module.params) def fetch_list(module, link): auth = GcpSession(module, 'bigquery') return auth.list(link, return_if_object, array_name='tables') def return_if_object(module, response): # If not found, return nothing. if response.status_code == 404: return None # If no content, return nothing. if response.status_code == 204: return None try: module.raise_for_status(response) result = response.json() except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: module.fail_json(msg="Invalid JSON response with error: %s" % inst) if navigate_hash(result, ['error', 'errors']): module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) return result if __name__ == "__main__": main()
sorig/shogun
refs/heads/develop
examples/undocumented/python/graphical/lda.py
10
from pylab import figure,pcolor,scatter,contour,colorbar,show,subplot,plot,connect from shogun import * import util util.set_title('LDA') util.DISTANCE=0.5 gamma=0.1 # positive examples pos=util.get_realdata(True) plot(pos[0,:], pos[1,:], "r.") # negative examples neg=util.get_realdata(False) plot(neg[0,:], neg[1,:], "b.") # train lda labels=util.get_labels() features=util.get_realfeatures(pos, neg) lda=LDA(gamma, features, labels) lda.train() # compute output plot iso-lines x, y, z=util.compute_output_plot_isolines(lda) c=pcolor(x, y, z) contour(x, y, z, linewidths=1, colors='black', hold=True) colorbar(c) connect('key_press_event', util.quit) show()
hustlzp/eve
refs/heads/develop
docs/_themes/flask_theme_support.py
2228
# flasky extensions. flasky pygments style based on tango style from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic, Whitespace, Punctuation, Other, Literal class FlaskyStyle(Style): background_color = "#f8f8f8" default_style = "" styles = { # No corresponding class for the following: #Text: "", # class: '' Whitespace: "underline #f8f8f8", # class: 'w' Error: "#a40000 border:#ef2929", # class: 'err' Other: "#000000", # class 'x' Comment: "italic #8f5902", # class: 'c' Comment.Preproc: "noitalic", # class: 'cp' Keyword: "bold #004461", # class: 'k' Keyword.Constant: "bold #004461", # class: 'kc' Keyword.Declaration: "bold #004461", # class: 'kd' Keyword.Namespace: "bold #004461", # class: 'kn' Keyword.Pseudo: "bold #004461", # class: 'kp' Keyword.Reserved: "bold #004461", # class: 'kr' Keyword.Type: "bold #004461", # class: 'kt' Operator: "#582800", # class: 'o' Operator.Word: "bold #004461", # class: 'ow' - like keywords Punctuation: "bold #000000", # class: 'p' # because special names such as Name.Class, Name.Function, etc. # are not recognized as such later in the parsing, we choose them # to look the same as ordinary variables. Name: "#000000", # class: 'n' Name.Attribute: "#c4a000", # class: 'na' - to be revised Name.Builtin: "#004461", # class: 'nb' Name.Builtin.Pseudo: "#3465a4", # class: 'bp' Name.Class: "#000000", # class: 'nc' - to be revised Name.Constant: "#000000", # class: 'no' - to be revised Name.Decorator: "#888", # class: 'nd' - to be revised Name.Entity: "#ce5c00", # class: 'ni' Name.Exception: "bold #cc0000", # class: 'ne' Name.Function: "#000000", # class: 'nf' Name.Property: "#000000", # class: 'py' Name.Label: "#f57900", # class: 'nl' Name.Namespace: "#000000", # class: 'nn' - to be revised Name.Other: "#000000", # class: 'nx' Name.Tag: "bold #004461", # class: 'nt' - like a keyword Name.Variable: "#000000", # class: 'nv' - to be revised Name.Variable.Class: "#000000", # class: 'vc' - to be revised Name.Variable.Global: "#000000", # class: 'vg' - to be revised Name.Variable.Instance: "#000000", # class: 'vi' - to be revised Number: "#990000", # class: 'm' Literal: "#000000", # class: 'l' Literal.Date: "#000000", # class: 'ld' String: "#4e9a06", # class: 's' String.Backtick: "#4e9a06", # class: 'sb' String.Char: "#4e9a06", # class: 'sc' String.Doc: "italic #8f5902", # class: 'sd' - like a comment String.Double: "#4e9a06", # class: 's2' String.Escape: "#4e9a06", # class: 'se' String.Heredoc: "#4e9a06", # class: 'sh' String.Interpol: "#4e9a06", # class: 'si' String.Other: "#4e9a06", # class: 'sx' String.Regex: "#4e9a06", # class: 'sr' String.Single: "#4e9a06", # class: 's1' String.Symbol: "#4e9a06", # class: 'ss' Generic: "#000000", # class: 'g' Generic.Deleted: "#a40000", # class: 'gd' Generic.Emph: "italic #000000", # class: 'ge' Generic.Error: "#ef2929", # class: 'gr' Generic.Heading: "bold #000080", # class: 'gh' Generic.Inserted: "#00A000", # class: 'gi' Generic.Output: "#888", # class: 'go' Generic.Prompt: "#745334", # class: 'gp' Generic.Strong: "bold #000000", # class: 'gs' Generic.Subheading: "bold #800080", # class: 'gu' Generic.Traceback: "bold #a40000", # class: 'gt' }
kalyons11/kevin
refs/heads/main
kevin/leet/spiral_matrix.py
1
""" https://leetcode.com/explore/interview/card/top-interview-questions-hard/116/array-and-strings/828/ """ from typing import List class Direction: RIGHT = 0 DOWN = 1 LEFT = 2 UP = 3 # TODO eventually clean this up - DRY up this code for sure class Solution: def spiral_order(self, matrix: List[List[int]]) -> List[int]: m = len(matrix) n = len(matrix[0]) dir = Direction.RIGHT min_row = 0 max_row = m - 1 min_col = 0 max_col = n - 1 i, j = 0, 0 result = [] while True: val = matrix[i][j] result.append(val) if dir == Direction.RIGHT: # try to go right if j == max_col: # start going down i += 1 min_row += 1 dir = Direction.DOWN else: # keep going right j += 1 elif dir == Direction.DOWN: # try to go down if i == max_row: # start going left j -= 1 max_col -= 1 dir = Direction.LEFT else: # keep going down i += 1 elif dir == Direction.LEFT: # try to go left if j == min_col: # start going up i -= 1 max_row -= 1 dir = Direction.UP else: # keep going left j -= 1 elif dir == Direction.UP: # try to go up if i == min_row: # start going right j += 1 min_col += 1 dir = Direction.RIGHT else: # keep going up i -= 1 # check for end case if min_row > max_row or min_col > max_col: break return result
veger/ansible
refs/heads/devel
lib/ansible/modules/cloud/ovirt/ovirt_host_storage_facts.py
59
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (c) 2017 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ovirt_host_storage_facts short_description: Retrieve facts about one or more oVirt/RHV HostStorages (applicable only for block storage) author: "Daniel Erez (@derez)" version_added: "2.4" description: - "Retrieve facts about one or more oVirt/RHV HostStorages (applicable only for block storage)." options: host: description: - "Host to get device list from." required: true iscsi: description: - "Dictionary with values for iSCSI storage type:" - "C(address) - Address of the iSCSI storage server." - "C(target) - The target IQN for the storage device." - "C(username) - A CHAP user name for logging into a target." - "C(password) - A CHAP password for logging into a target." fcp: description: - "Dictionary with values for fibre channel storage type:" - "C(address) - Address of the fibre channel storage server." - "C(port) - Port of the fibre channel storage server." - "C(lun_id) - LUN id." extends_documentation_fragment: ovirt_facts ''' EXAMPLES = ''' # Examples don't contain auth parameter for simplicity, # look at ovirt_auth module to see how to reuse authentication: # Gather facts about HostStorages with specified target and address: - ovirt_host_storage_facts: host: myhost iscsi: target: iqn.2016-08-09.domain-01:nickname address: 10.34.63.204 - debug: var: ovirt_host_storages ''' RETURN = ''' ovirt_host_storages: description: "List of dictionaries describing the HostStorage. HostStorage attributes are mapped to dictionary keys, all HostStorage attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host_storage." returned: On success. type: list ''' import traceback try: import ovirtsdk4.types as otypes except ImportError: pass from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ovirt import ( check_sdk, create_connection, get_dict_of_struct, ovirt_facts_full_argument_spec, get_id_by_name, ) def _login(host_service, iscsi): host_service.iscsi_login( iscsi=otypes.IscsiDetails( username=iscsi.get('username'), password=iscsi.get('password'), address=iscsi.get('address'), target=iscsi.get('target'), ), ) def _get_storage_type(params): for sd_type in ['iscsi', 'fcp']: if params.get(sd_type) is not None: return sd_type def main(): argument_spec = ovirt_facts_full_argument_spec( host=dict(required=True), iscsi=dict(default=None, type='dict'), fcp=dict(default=None, type='dict'), ) module = AnsibleModule(argument_spec) check_sdk(module) try: auth = module.params.pop('auth') connection = create_connection(auth) # Get Host hosts_service = connection.system_service().hosts_service() host_id = get_id_by_name(hosts_service, module.params['host']) storage_type = _get_storage_type(module.params) host_service = hosts_service.host_service(host_id) if storage_type == 'iscsi': # Login iscsi = module.params.get('iscsi') _login(host_service, iscsi) # Get LUNs exposed from the specified target host_storages = host_service.storage_service().list() if storage_type == 'iscsi': filterred_host_storages = [host_storage for host_storage in host_storages if host_storage.type == otypes.StorageType.ISCSI] if 'target' in iscsi: filterred_host_storages = [host_storage for host_storage in filterred_host_storages if iscsi.get('target') == host_storage.logical_units[0].target] elif storage_type == 'fcp': filterred_host_storages = [host_storage for host_storage in host_storages if host_storage.type == otypes.StorageType.FCP] module.exit_json( changed=False, ansible_facts=dict( ovirt_host_storages=[ get_dict_of_struct( struct=c, connection=connection, fetch_nested=module.params.get('fetch_nested'), attributes=module.params.get('nested_attributes'), ) for c in filterred_host_storages ], ), ) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=auth.get('token') is None) if __name__ == '__main__': main()
NKUCodingCat/jolla
refs/heads/master
jolla/HTTPerror.py
1
#!/usr/bin/env python # -*- coding: utf-8 -*- class HTTPError(Exception): error_code = None def __init__(self, info=None): self.info = info if self.info: print '<' + self.info + '>' class HTTP404Error(HTTPError): error_code = 404 def __str__(self): return "<404 NOT FOUND>" class HTTP502Error(HTTPError): error_code = 502 def __str__(self): return "<502 SERVER ERROR>" class HTTP403Error(HTTPError): error_code = 403 def __str__(self): return "<403 FORBBIDEN>" class HTTP500Error(HTTPError): error_code = 500 def __str__(self): return "<server error>"
GhostThrone/django
refs/heads/master
django/conf/locale/sk/formats.py
504
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j. F Y' TIME_FORMAT = 'G:i' DATETIME_FORMAT = 'j. F Y G:i' YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j. F' SHORT_DATE_FORMAT = 'd.m.Y' SHORT_DATETIME_FORMAT = 'd.m.Y G:i' FIRST_DAY_OF_WEEK = 1 # Monday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = [ '%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06' '%y-%m-%d', # '06-10-25' # '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006' ] DATETIME_INPUT_FORMATS = [ '%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59' '%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200' '%d.%m.%Y %H:%M', # '25.10.2006 14:30' '%d.%m.%Y', # '25.10.2006' ] DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '\xa0' # non-breaking space NUMBER_GROUPING = 3
gangadharkadam/v4_frappe
refs/heads/develop
frappe/core/doctype/communication/communication.py
2
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import frappe import json import urllib from email.utils import formataddr from frappe.website.utils import is_signup_enabled from frappe.utils import get_url, cstr from frappe.utils.email_lib.email_body import get_email from frappe.utils.email_lib.smtp import send from frappe.utils import scrub_urls, cint from frappe import _ from frappe.model.document import Document class Communication(Document): def validate(self): if not self.parentfield: self.parentfield = "communications" def get_parent_doc(self): return frappe.get_doc(self.parenttype, self.parent) def update_parent(self): """update status of parent Lead or Contact based on who is replying""" if self.parenttype and self.parent: parent_doc = self.get_parent_doc() parent_doc.run_method("on_communication") def on_update(self): self.update_parent() @frappe.whitelist() def make(doctype=None, name=None, content=None, subject=None, sent_or_received = "Sent", sender=None, recipients=None, communication_medium="Email", send_email=False, print_html=None, print_format=None, attachments='[]', send_me_a_copy=False, set_lead=True, date=None): if doctype and name and not frappe.has_permission(doctype, "email", name): raise frappe.PermissionError("You are not allowed to send emails related to: {doctype} {name}".format( doctype=doctype, name=name)) _make(doctype=doctype, name=name, content=content, subject=subject, sent_or_received=sent_or_received, sender=sender, recipients=recipients, communication_medium=communication_medium, send_email=send_email, print_html=print_html, print_format=print_format, attachments=attachments, send_me_a_copy=send_me_a_copy, set_lead=set_lead, date=date) def _make(doctype=None, name=None, content=None, subject=None, sent_or_received = "Sent", sender=None, recipients=None, communication_medium="Email", send_email=False, print_html=None, print_format=None, attachments='[]', send_me_a_copy=False, set_lead=True, date=None): # add to Communication sent_via = None # since we are using fullname and email, # if the fullname has any incompatible characters,formataddr can deal with it try: sender = json.loads(sender) except ValueError: pass if isinstance(sender, (tuple, list)) and len(sender)==2: sender = formataddr(sender) comm = frappe.new_doc('Communication') d = comm d.subject = subject d.content = content d.sent_or_received = sent_or_received d.sender = sender or frappe.db.get_value("User", frappe.session.user, "email") d.recipients = recipients # add as child sent_via = frappe.get_doc(doctype, name) d.parent = name d.parenttype = doctype d.parentfield = "communications" if date: d.communication_date = date d.communication_medium = communication_medium d.idx = cint(frappe.db.sql("""select max(idx) from `tabCommunication` where parenttype=%s and parent=%s""", (doctype, name))[0][0]) + 1 comm.ignore_permissions = True comm.insert() if send_email: d = comm send_comm_email(d, name, sent_via, print_html, print_format, attachments, send_me_a_copy) @frappe.whitelist() def get_customer_supplier(args=None): """ Get Customer/Supplier, given a contact, if a unique match exists """ if not args: args = frappe.local.form_dict if not args.get('contact'): raise Exception, "Please specify a contact to fetch Customer/Supplier" result = frappe.db.sql("""\ select customer, supplier from `tabContact` where name = %s""", args.get('contact'), as_dict=1) if result and len(result)==1 and (result[0]['customer'] or result[0]['supplier']): return { 'fieldname': result[0]['customer'] and 'customer' or 'supplier', 'value': result[0]['customer'] or result[0]['supplier'] } return {} def send_comm_email(d, name, sent_via=None, print_html=None, print_format=None, attachments='[]', send_me_a_copy=False): footer = None if sent_via: if hasattr(sent_via, "get_sender"): d.sender = sent_via.get_sender(d) or d.sender if hasattr(sent_via, "get_subject"): d.subject = sent_via.get_subject(d) if hasattr(sent_via, "get_content"): d.content = sent_via.get_content(d) footer = "<hr>" + set_portal_link(sent_via, d) mail = get_email(d.recipients, sender=d.sender, subject=d.subject, msg=d.content, footer=footer) if send_me_a_copy: mail.cc.append(frappe.db.get_value("User", frappe.session.user, "email")) if print_html or print_format: attach_print(mail, sent_via, print_html, print_format) for a in json.loads(attachments): try: mail.attach_file(a) except IOError: frappe.throw(_("Unable to find attachment {0}").format(a)) send(mail) def attach_print(mail, sent_via, print_html, print_format): name = sent_via.name if not print_html and print_format: print_html = frappe.get_print_format(sent_via.doctype, sent_via.name, print_format) print_settings = frappe.db.get_singles_dict("Print Settings") send_print_as_pdf = cint(print_settings.send_print_as_pdf) if send_print_as_pdf: try: mail.add_pdf_attachment(name.replace(' ','').replace('/','-') + '.pdf', print_html) except Exception: frappe.msgprint(_("Error generating PDF, attachment sent as HTML")) frappe.errprint(frappe.get_traceback()) send_print_as_pdf = 0 if not send_print_as_pdf: print_html = scrub_urls(print_html) mail.add_attachment(name.replace(' ','').replace('/','-') + '.html', print_html, 'text/html') def set_portal_link(sent_via, comm): """set portal link in footer""" footer = "" if is_signup_enabled(): is_valid_recipient = cstr(sent_via.get("email") or sent_via.get("email_id") or sent_via.get("contact_email")) in comm.recipients if is_valid_recipient: url = "%s/%s/%s" % (get_url(), urllib.quote(sent_via.doctype), urllib.quote(sent_via.name)) footer = """<!-- Portal Link --> <p><a href="%s" target="_blank">View this on our website</a></p>""" % url return footer
surligas/cs436-gnuradio
refs/heads/master
gr-filter/examples/synth_to_chan.py
40
#!/usr/bin/env python # # Copyright 2010,2012,2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr from gnuradio import blocks from gnuradio import filter import sys try: from gnuradio import analog except ImportError: sys.stderr.write("Error: Program requires gr-analog.\n") sys.exit(1) try: import scipy except ImportError: sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n") sys.exit(1) try: import pylab except ImportError: sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n") sys.exit(1) def main(): N = 1000000 fs = 8000 freqs = [100, 200, 300, 400, 500] nchans = 7 sigs = list() fmtx = list() for fi in freqs: s = analog.sig_source_f(fs, analog.GR_SIN_WAVE, fi, 1) fm = analog.nbfm_tx(fs, 4*fs, max_dev=10000, tau=75e-6) sigs.append(s) fmtx.append(fm) syntaps = filter.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100) print "Synthesis Num. Taps = %d (taps per filter = %d)" % (len(syntaps), len(syntaps)/nchans) chtaps = filter.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100) print "Channelizer Num. Taps = %d (taps per filter = %d)" % (len(chtaps), len(chtaps)/nchans) filtbank = filter.pfb_synthesizer_ccf(nchans, syntaps) channelizer = filter.pfb.channelizer_ccf(nchans, chtaps) noise_level = 0.01 head = blocks.head(gr.sizeof_gr_complex, N) noise = analog.noise_source_c(analog.GR_GAUSSIAN, noise_level) addnoise = blocks.add_cc() snk_synth = blocks.vector_sink_c() tb = gr.top_block() tb.connect(noise, (addnoise,0)) tb.connect(filtbank, head, (addnoise, 1)) tb.connect(addnoise, channelizer) tb.connect(addnoise, snk_synth) snk = list() for i,si in enumerate(sigs): tb.connect(si, fmtx[i], (filtbank, i)) for i in xrange(nchans): snk.append(blocks.vector_sink_c()) tb.connect((channelizer, i), snk[i]) tb.run() if 1: channel = 1 data = snk[channel].data()[1000:] f1 = pylab.figure(1) s1 = f1.add_subplot(1,1,1) s1.plot(data[10000:10200] ) s1.set_title(("Output Signal from Channel %d" % channel)) fftlen = 2048 winfunc = scipy.blackman #winfunc = scipy.hamming f2 = pylab.figure(2) s2 = f2.add_subplot(1,1,1) s2.psd(data, NFFT=fftlen, Fs = nchans*fs, noverlap=fftlen/4, window = lambda d: d*winfunc(fftlen)) s2.set_title(("Output PSD from Channel %d" % channel)) f3 = pylab.figure(3) s3 = f3.add_subplot(1,1,1) s3.psd(snk_synth.data()[1000:], NFFT=fftlen, Fs = nchans*fs, noverlap=fftlen/4, window = lambda d: d*winfunc(fftlen)) s3.set_title("Output of Synthesis Filter") pylab.show() if __name__ == "__main__": main()
certik/cos-10-poster
refs/heads/master
graphs/hydrogen_pfem_uniform_init.py
2
R_x = { 0: [9, 11, 13, 15, 17, 19, 23, 27, 33, 39], } R_y = { (1, 0): [0.34629090548783104, 0.20598776759902926, 0.083803156392419087, 0.018816315103317549, 0.0019781945906690024, 0.00010426021658926921, 3.1495432171735338e-06, 5.9057941981244255e-08, 7.2221517655179923e-10, 3.6831648841939568e-12], (2, 0): [0.076676108804106441, 0.039076899021997666, 0.010344886772196957, 0.00083252851133155947, 1.8233569149303519e-05, 2.3435890916800872e-07, 8.604972220882523e-09, 2.5426401262240717e-10, 2.7971375216040428e-12, -2.0808355039036996e-13], (3, 0): [0.031235451682185878, 0.014914274017031902, 0.0032454118580515501, 0.0002766525185903812, 0.00015125490932969832, 0.0001502425046919359, 1.9901834519900352e-06, 1.7979640554166565e-09, 6.3702168540125115e-11, 3.0753871671507227e-13], }
malayaleecoder/servo
refs/heads/master
tests/wpt/web-platform-tests/old-tests/webdriver/base_test.py
142
import ConfigParser import json import os import sys import unittest from network import get_lan_ip repo_root = os.path.abspath(os.path.join(__file__, "../..")) sys.path.insert(1, os.path.join(repo_root, "tools", "webdriver")) sys.path.insert(1, os.path.join(repo_root, "tools", "wptserve")) from wptserve import server from selenium import webdriver class WebDriverBaseTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.driver = create_driver() cls.webserver = server.WebTestHttpd(host=get_lan_ip()) cls.webserver.start() cls.webserver.where_is = cls.webserver.get_url @classmethod def tearDownClass(cls): cls.webserver.stop() if cls.driver: cls.driver.quit() def create_driver(): config = ConfigParser.ConfigParser() config.read('webdriver.cfg') section = os.environ.get("WD_BROWSER", 'firefox') if config.has_option(section, 'url'): url = config.get(section, "url") else: url = 'http://127.0.0.1:4444/wd/hub' capabilities = None if config.has_option(section, 'capabilities'): try: capabilities = json.loads(config.get(section, "capabilities")) except: pass mode = 'compatibility' if config.has_option(section, 'mode'): mode = config.get(section, 'mode') if section == 'firefox': driver = webdriver.Firefox() elif section == 'chrome': driver = webdriver.Chrome() elif section == 'edge': driver = webdriver.Remote() elif section == 'ie': driver = webdriver.Ie() elif section == 'selendroid': driver = webdriver.Android() return driver
tmm1/graphite
refs/heads/master
webapp/graphite/__init__.py
161
# Two wrongs don't make a right, but three lefts do.
servo/servo
refs/heads/master
tests/wpt/web-platform-tests/tools/third_party/aioquic/tests/test_h0.py
17
from unittest import TestCase from aioquic.h0.connection import H0_ALPN, H0Connection from aioquic.h3.events import DataReceived, HeadersReceived from .test_connection import client_and_server, transfer def h0_client_and_server(): return client_and_server( client_options={"alpn_protocols": H0_ALPN}, server_options={"alpn_protocols": H0_ALPN}, ) def h0_transfer(quic_sender, h0_receiver): quic_receiver = h0_receiver._quic transfer(quic_sender, quic_receiver) # process QUIC events http_events = [] event = quic_receiver.next_event() while event is not None: http_events.extend(h0_receiver.handle_event(event)) event = quic_receiver.next_event() return http_events class H0ConnectionTest(TestCase): def test_connect(self): with h0_client_and_server() as (quic_client, quic_server): h0_client = H0Connection(quic_client) h0_server = H0Connection(quic_server) # send request stream_id = quic_client.get_next_available_stream_id() h0_client.send_headers( stream_id=stream_id, headers=[ (b":method", b"GET"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", b"/"), ], ) h0_client.send_data(stream_id=stream_id, data=b"", end_stream=True) # receive request events = h0_transfer(quic_client, h0_server) self.assertEqual(len(events), 2) self.assertTrue(isinstance(events[0], HeadersReceived)) self.assertEqual( events[0].headers, [(b":method", b"GET"), (b":path", b"/")] ) self.assertEqual(events[0].stream_id, stream_id) self.assertEqual(events[0].stream_ended, False) self.assertTrue(isinstance(events[1], DataReceived)) self.assertEqual(events[1].data, b"") self.assertEqual(events[1].stream_id, stream_id) self.assertEqual(events[1].stream_ended, True) # send response h0_server.send_headers( stream_id=stream_id, headers=[ (b":status", b"200"), (b"content-type", b"text/html; charset=utf-8"), ], ) h0_server.send_data( stream_id=stream_id, data=b"<html><body>hello</body></html>", end_stream=True, ) # receive response events = h0_transfer(quic_server, h0_client) self.assertEqual(len(events), 2) self.assertTrue(isinstance(events[0], HeadersReceived)) self.assertEqual(events[0].headers, []) self.assertEqual(events[0].stream_id, stream_id) self.assertEqual(events[0].stream_ended, False) self.assertTrue(isinstance(events[1], DataReceived)) self.assertEqual(events[1].data, b"<html><body>hello</body></html>") self.assertEqual(events[1].stream_id, stream_id) self.assertEqual(events[1].stream_ended, True) def test_headers_only(self): with h0_client_and_server() as (quic_client, quic_server): h0_client = H0Connection(quic_client) h0_server = H0Connection(quic_server) # send request stream_id = quic_client.get_next_available_stream_id() h0_client.send_headers( stream_id=stream_id, headers=[ (b":method", b"HEAD"), (b":scheme", b"https"), (b":authority", b"localhost"), (b":path", b"/"), ], end_stream=True, ) # receive request events = h0_transfer(quic_client, h0_server) self.assertEqual(len(events), 2) self.assertTrue(isinstance(events[0], HeadersReceived)) self.assertEqual( events[0].headers, [(b":method", b"HEAD"), (b":path", b"/")] ) self.assertEqual(events[0].stream_id, stream_id) self.assertEqual(events[0].stream_ended, False) self.assertTrue(isinstance(events[1], DataReceived)) self.assertEqual(events[1].data, b"") self.assertEqual(events[1].stream_id, stream_id) self.assertEqual(events[1].stream_ended, True) # send response h0_server.send_headers( stream_id=stream_id, headers=[ (b":status", b"200"), (b"content-type", b"text/html; charset=utf-8"), ], end_stream=True, ) # receive response events = h0_transfer(quic_server, h0_client) self.assertEqual(len(events), 2) self.assertTrue(isinstance(events[0], HeadersReceived)) self.assertEqual(events[0].headers, []) self.assertEqual(events[0].stream_id, stream_id) self.assertEqual(events[0].stream_ended, False) self.assertTrue(isinstance(events[1], DataReceived)) self.assertEqual(events[1].data, b"") self.assertEqual(events[1].stream_id, stream_id) self.assertEqual(events[1].stream_ended, True)
ENjOyAbLE1991/scrapy
refs/heads/master
scrapy/utils/markup.py
211
""" Transitional module for moving to the w3lib library. For new code, always import from w3lib.html instead of this module """ from w3lib.html import *
sivaramakrishnansr/ryu
refs/heads/master
ryu/tests/switch/run_mininet.py
19
#!/usr/bin/env python import sys from mininet.cli import CLI from mininet.link import Link from mininet.net import Mininet from mininet.node import RemoteController from mininet.node import OVSSwitch from mininet.node import UserSwitch from mininet.term import makeTerm from oslo_config import cfg from ryu import version if '__main__' == __name__: opts = [ cfg.StrOpt('switch', default='ovs', help='test switch (ovs|ovs13|ovs14|cpqd)') ] conf = cfg.ConfigOpts() conf.register_cli_opts(opts) conf(project='ryu', version='run_mininet.py %s' % version) conf(sys.argv[1:]) switch_type = {'ovs': OVSSwitch, 'ovs13': OVSSwitch, 'ovs14': OVSSwitch, 'cpqd': UserSwitch} switch = switch_type.get(conf.switch) if switch is None: raise ValueError('Invalid switch type. [%s]', conf.switch) net = Mininet(switch=switch, controller=RemoteController) c0 = net.addController('c0') s1 = net.addSwitch('s1') s2 = net.addSwitch('s2') Link(s1, s2) Link(s1, s2) Link(s1, s2) net.build() c0.start() s1.start([c0]) s2.start([c0]) if conf.switch in ['ovs', 'ovs13']: s1.cmd('ovs-vsctl set Bridge s1 protocols=OpenFlow13') s2.cmd('ovs-vsctl set Bridge s2 protocols=OpenFlow13') elif conf.switch == 'ovs14': s1.cmd('ovs-vsctl set Bridge s1 protocols=OpenFlow14') s2.cmd('ovs-vsctl set Bridge s2 protocols=OpenFlow14') CLI(net) net.stop()
blooparksystems/odoo
refs/heads/9.0
addons/account/tests/test_bank_stmt_reconciliation_widget_ui.py
47
from openerp.tests import HttpCase class TestUi(HttpCase): post_install = True at_install = False def test_01_admin_bank_statement_reconciliation(self): self.phantom_js("/", "odoo.__DEBUG__.services['web.Tour'].run('bank_statement_reconciliation', 'test')", "odoo.__DEBUG__.services['web.Tour'].tours.bank_statement_reconciliation", login="admin")
skynjupt/net_dev
refs/heads/master
src/libxml2/python/setup.py
1
#!/usr/bin/python -u # # Setup script for libxml2 and libxslt if found # import sys, os from distutils.core import setup, Extension # Below ROOT, we expect to find include, include/libxml2, lib and bin. # On *nix, it is not needed (but should not harm), # on Windows, it is set by configure.js. ROOT = r'/usr/local' # Thread-enabled libxml2 with_threads = 1 # If this flag is set (windows only), # a private copy of the dlls are included in the package. # If this flag is not set, the libxml2 and libxslt # dlls must be found somewhere in the PATH at runtime. WITHDLLS = 1 and sys.platform.startswith('win') def missing(file): if os.access(file, os.R_OK) == 0: return 1 return 0 try: HOME = os.environ['HOME'] except: HOME="C:" if WITHDLLS: # libxml dlls (expected in ROOT/bin) dlls = [ 'iconv.dll','libxml2.dll','libxslt.dll','libexslt.dll' ] dlls = map(lambda dll: os.path.join(ROOT,'bin',dll),dlls) # create __init__.py for the libxmlmods package if not os.path.exists("libxmlmods"): os.mkdir("libxmlmods") open("libxmlmods/__init__.py","w").close() def altImport(s): s = s.replace("import libxml2mod","from libxmlmods import libxml2mod") s = s.replace("import libxsltmod","from libxmlmods import libxsltmod") return s if sys.platform.startswith('win'): libraryPrefix = 'lib' platformLibs = [] else: libraryPrefix = '' platformLibs = ["m","z"] # those are examined to find # - libxml2/libxml/tree.h # - iconv.h # - libxslt/xsltconfig.h includes_dir = [ "/usr/include", "/usr/local/include", "/opt/include", os.path.join(ROOT,'include'), HOME ]; xml_includes="" for dir in includes_dir: if not missing(dir + "/libxml2/libxml/tree.h"): xml_includes=dir + "/libxml2" break; if xml_includes == "": print "failed to find headers for libxml2: update includes_dir" sys.exit(1) iconv_includes="" for dir in includes_dir: if not missing(dir + "/iconv.h"): iconv_includes=dir break; if iconv_includes == "": print "failed to find headers for libiconv: update includes_dir" sys.exit(1) # those are added in the linker search path for libraries libdirs = [ os.path.join(ROOT,'lib'), ] xml_files = ["libxml2-api.xml", "libxml2-python-api.xml", "libxml.c", "libxml.py", "libxml_wrap.h", "types.c", "xmlgenerator.py", "README", "TODO", "drv_libxml2.py"] xslt_files = ["libxslt-api.xml", "libxslt-python-api.xml", "libxslt.c", "libxsl.py", "libxslt_wrap.h", "xsltgenerator.py"] if missing("libxml2-py.c") or missing("libxml2.py"): try: try: import xmlgenerator except: import generator except: print "failed to find and generate stubs for libxml2, aborting ..." print sys.exc_type, sys.exc_value sys.exit(1) head = open("libxml.py", "r") generated = open("libxml2class.py", "r") result = open("libxml2.py", "w") for line in head.readlines(): if WITHDLLS: result.write(altImport(line)) else: result.write(line) for line in generated.readlines(): result.write(line) head.close() generated.close() result.close() with_xslt=0 if missing("libxslt-py.c") or missing("libxslt.py"): if missing("xsltgenerator.py") or missing("libxslt-api.xml"): print "libxslt stub generator not found, libxslt not built" else: try: import xsltgenerator except: print "failed to generate stubs for libxslt, aborting ..." print sys.exc_type, sys.exc_value else: head = open("libxsl.py", "r") generated = open("libxsltclass.py", "r") result = open("libxslt.py", "w") for line in head.readlines(): if WITHDLLS: result.write(altImport(line)) else: result.write(line) for line in generated.readlines(): result.write(line) head.close() generated.close() result.close() with_xslt=1 else: with_xslt=1 if with_xslt == 1: xslt_includes="" for dir in includes_dir: if not missing(dir + "/libxslt/xsltconfig.h"): xslt_includes=dir + "/libxslt" break; if xslt_includes == "": print "failed to find headers for libxslt: update includes_dir" with_xslt = 0 descr = "libxml2 package" modules = [ 'libxml2', 'drv_libxml2' ] if WITHDLLS: modules.append('libxmlmods.__init__') c_files = ['libxml2-py.c', 'libxml.c', 'types.c' ] includes= [xml_includes, iconv_includes] libs = [libraryPrefix + "xml2"] + platformLibs macros = [] if with_threads: macros.append(('_REENTRANT','1')) if with_xslt == 1: descr = "libxml2 and libxslt package" if not sys.platform.startswith('win'): # # We are gonna build 2 identical shared libs with merge initializing # both libxml2mod and libxsltmod # c_files = c_files + ['libxslt-py.c', 'libxslt.c'] xslt_c_files = c_files macros.append(('MERGED_MODULES', '1')) else: # # On windows the MERGED_MODULE option is not needed # (and does not work) # xslt_c_files = ['libxslt-py.c', 'libxslt.c', 'types.c'] libs.insert(0, libraryPrefix + 'exslt') libs.insert(0, libraryPrefix + 'xslt') includes.append(xslt_includes) modules.append('libxslt') extens=[Extension('libxml2mod', c_files, include_dirs=includes, library_dirs=libdirs, libraries=libs, define_macros=macros)] if with_xslt == 1: extens.append(Extension('libxsltmod', xslt_c_files, include_dirs=includes, library_dirs=libdirs, libraries=libs, define_macros=macros)) if missing("MANIFEST"): manifest = open("MANIFEST", "w") manifest.write("setup.py\n") for file in xml_files: manifest.write(file + "\n") if with_xslt == 1: for file in xslt_files: manifest.write(file + "\n") manifest.close() if WITHDLLS: ext_package = "libxmlmods" if sys.version >= "2.2": base = "lib/site-packages/" else: base = "" data_files = [(base+"libxmlmods",dlls)] else: ext_package = None data_files = [] setup (name = "libxml2-python", # On *nix, the version number is created from setup.py.in # On windows, it is set by configure.js version = "2.7.6", description = descr, author = "Daniel Veillard", author_email = "[email protected]", url = "http://xmlsoft.org/python.html", licence="MIT Licence", py_modules=modules, ext_modules=extens, ext_package=ext_package, data_files=data_files, ) sys.exit(0)
pombredanne/mitmproxy
refs/heads/master
test/test_protocol_http.py
8
from io import BytesIO from netlib.exceptions import HttpSyntaxException from netlib.http import http1 from netlib.tutils import treq, raises import tutils import tservers class TestHTTPResponse: def test_read_from_stringio(self): s = ( b"HTTP/1.1 200 OK\r\n" b"Content-Length: 7\r\n" b"\r\n" b"content\r\n" b"HTTP/1.1 204 OK\r\n" b"\r\n" ) rfile = BytesIO(s) r = http1.read_response(rfile, treq()) assert r.status_code == 200 assert r.content == b"content" assert http1.read_response(rfile, treq()).status_code == 204 rfile = BytesIO(s) # HEAD must not have content by spec. We should leave it on the pipe. r = http1.read_response(rfile, treq(method=b"HEAD")) assert r.status_code == 200 assert r.content == b"" with raises(HttpSyntaxException): http1.read_response(rfile, treq()) class TestHTTPFlow(object): def test_repr(self): f = tutils.tflow(resp=True, err=True) assert repr(f) class TestInvalidRequests(tservers.HTTPProxTest): ssl = True def test_double_connect(self): p = self.pathoc() r = p.request("connect:'%s:%s'" % ("127.0.0.1", self.server2.port)) assert r.status_code == 400 assert "Invalid HTTP request form" in r.body def test_relative_request(self): p = self.pathoc_raw() p.connect() r = p.request("get:/p/200") assert r.status_code == 400 assert "Invalid HTTP request form" in r.body
rex-xxx/mt6572_x201
refs/heads/master
cts/tools/utils/rerun.py
6
#!/usr/bin/env python # # Copyright (C) 2012 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import sys from xml.dom import Node from xml.dom import minidom def getChildrenWithTag(parent, tagName): children = [] for child in parent.childNodes: if (child.nodeType == Node.ELEMENT_NODE) and (child.tagName == tagName): #print "parent " + parent.getAttribute("name") + " " + tagName +\ # " " + child.getAttribute("name") children.append(child) return children def parseSuite(suite, parentName): if parentName != "": parentName += '.' failedCases = [] childSuites = getChildrenWithTag(suite, "TestSuite") for child in childSuites: for failure in parseSuite(child, parentName + child.getAttribute("name")): failedCases.append(failure) childTestCases = getChildrenWithTag(suite, "TestCase") for child in childTestCases: className = parentName + child.getAttribute("name") for test in getChildrenWithTag(child, "Test"): if test.getAttribute("result") != "pass": failureName = className + "#" + test.getAttribute("name") failedCases.append(failureName) #if len(failedCases) > 0: # print failedCases return failedCases def getFailedCases(resultXml): failedCases = [] doc = minidom.parse(resultXml) testResult = doc.getElementsByTagName("TestResult")[0] packages = getChildrenWithTag(testResult, "TestPackage") for package in packages: casesFromChild = parseSuite(package, "") for case in casesFromChild: if case not in failedCases: failedCases.append(case) return failedCases def main(argv): if len(argv) < 3: print "rerun.py cts_path result_xml [-s serial]" print " cts_path should end with android-cts" sys.exit(1) ctsPath = os.path.abspath(argv[1]) resultXml = os.path.abspath(argv[2]) deviceSerial = "" if len(argv) > 3: if argv[3] == "-s": deviceSerial = argv[4] failedCases = getFailedCases(resultXml) print "Re-run follwong cases:" for failure in failedCases: print " " + failure for failure in failedCases: [className, methodName] = failure.split('#') command = ctsPath + "/tools/cts-tradefed run singleCommand cts" if deviceSerial != "": command += " --serial " + deviceSerial command += " --class " + className + " --method " + methodName print command os.system(command) if __name__ == '__main__': main(sys.argv)
epa/sqlalchemy
refs/heads/master
test/sql/test_insert.py
8
#! coding:utf-8 from sqlalchemy import Column, Integer, MetaData, String, Table,\ bindparam, exc, func, insert, select, column, text from sqlalchemy.dialects import mysql, postgresql from sqlalchemy.engine import default from sqlalchemy.testing import AssertsCompiledSQL,\ assert_raises_message, fixtures, eq_ from sqlalchemy.sql import crud class _InsertTestBase(object): @classmethod def define_tables(cls, metadata): Table('mytable', metadata, Column('myid', Integer), Column('name', String(30)), Column('description', String(30))) Table('myothertable', metadata, Column('otherid', Integer, primary_key=True), Column('othername', String(30))) Table('table_w_defaults', metadata, Column('id', Integer, primary_key=True), Column('x', Integer, default=10), Column('y', Integer, server_default=text('5')), Column('z', Integer, default=lambda: 10) ) class InsertTest(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL): __dialect__ = 'default' def test_generic_insert_bind_params_all_columns(self): table1 = self.tables.mytable self.assert_compile(insert(table1), 'INSERT INTO mytable (myid, name, description) ' 'VALUES (:myid, :name, :description)') def test_insert_with_values_dict(self): table1 = self.tables.mytable checkparams = { 'myid': 3, 'name': 'jack' } self.assert_compile( insert( table1, dict( myid=3, name='jack')), 'INSERT INTO mytable (myid, name) VALUES (:myid, :name)', checkparams=checkparams) def test_insert_with_values_tuple(self): table1 = self.tables.mytable checkparams = { 'myid': 3, 'name': 'jack', 'description': 'mydescription' } self.assert_compile(insert(table1, (3, 'jack', 'mydescription')), 'INSERT INTO mytable (myid, name, description) ' 'VALUES (:myid, :name, :description)', checkparams=checkparams) def test_insert_with_values_func(self): table1 = self.tables.mytable self.assert_compile(insert(table1, values=dict(myid=func.lala())), 'INSERT INTO mytable (myid) VALUES (lala())') def test_insert_with_user_supplied_bind_params(self): table1 = self.tables.mytable values = { table1.c.myid: bindparam('userid'), table1.c.name: bindparam('username') } self.assert_compile( insert( table1, values), 'INSERT INTO mytable (myid, name) VALUES (:userid, :username)') def test_insert_values(self): table1 = self.tables.mytable values1 = {table1.c.myid: bindparam('userid')} values2 = {table1.c.name: bindparam('username')} self.assert_compile( insert( table1, values=values1).values(values2), 'INSERT INTO mytable (myid, name) VALUES (:userid, :username)') def test_prefix_with(self): table1 = self.tables.mytable stmt = table1.insert().\ prefix_with('A', 'B', dialect='mysql').\ prefix_with('C', 'D') self.assert_compile( stmt, 'INSERT C D INTO mytable (myid, name, description) ' 'VALUES (:myid, :name, :description)') self.assert_compile( stmt, 'INSERT A B C D INTO mytable (myid, name, description) ' 'VALUES (%s, %s, %s)', dialect=mysql.dialect()) def test_inline_default(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('foo', Integer, default=func.foobar())) self.assert_compile(table.insert(values={}, inline=True), 'INSERT INTO sometable (foo) VALUES (foobar())') self.assert_compile( table.insert( inline=True), 'INSERT INTO sometable (foo) VALUES (foobar())', params={}) def test_insert_returning_not_in_default(self): table1 = self.tables.mytable stmt = table1.insert().returning(table1.c.myid) assert_raises_message( exc.CompileError, "RETURNING is not supported by this dialect's statement compiler.", stmt.compile, dialect=default.DefaultDialect() ) def test_insert_from_select_returning(self): table1 = self.tables.mytable sel = select([table1.c.myid, table1.c.name]).where( table1.c.name == 'foo') ins = self.tables.myothertable.insert().\ from_select(("otherid", "othername"), sel).returning( self.tables.myothertable.c.otherid ) self.assert_compile( ins, "INSERT INTO myothertable (otherid, othername) " "SELECT mytable.myid, mytable.name FROM mytable " "WHERE mytable.name = %(name_1)s RETURNING myothertable.otherid", checkparams={"name_1": "foo"}, dialect="postgresql" ) def test_insert_from_select_select(self): table1 = self.tables.mytable sel = select([table1.c.myid, table1.c.name]).where( table1.c.name == 'foo') ins = self.tables.myothertable.insert().\ from_select(("otherid", "othername"), sel) self.assert_compile( ins, "INSERT INTO myothertable (otherid, othername) " "SELECT mytable.myid, mytable.name FROM mytable " "WHERE mytable.name = :name_1", checkparams={"name_1": "foo"} ) def test_insert_from_select_cte_one(self): table1 = self.tables.mytable cte = select([table1.c.name]).where(table1.c.name == 'bar').cte() sel = select([table1.c.myid, table1.c.name]).where( table1.c.name == cte.c.name) ins = self.tables.myothertable.insert().\ from_select(("otherid", "othername"), sel) self.assert_compile( ins, "INSERT INTO myothertable (otherid, othername) WITH anon_1 AS " "(SELECT mytable.name AS name FROM mytable " "WHERE mytable.name = :name_1) " "SELECT mytable.myid, mytable.name FROM mytable, anon_1 " "WHERE mytable.name = anon_1.name", checkparams={"name_1": "bar"} ) def test_insert_from_select_cte_two(self): table1 = self.tables.mytable cte = table1.select().cte("c") stmt = cte.select() ins = table1.insert().from_select(table1.c, stmt) self.assert_compile( ins, "INSERT INTO mytable (myid, name, description) " "WITH c AS (SELECT mytable.myid AS myid, mytable.name AS name, " "mytable.description AS description FROM mytable) " "SELECT c.myid, c.name, c.description FROM c" ) def test_insert_from_select_select_alt_ordering(self): table1 = self.tables.mytable sel = select([table1.c.name, table1.c.myid]).where( table1.c.name == 'foo') ins = self.tables.myothertable.insert().\ from_select(("othername", "otherid"), sel) self.assert_compile( ins, "INSERT INTO myothertable (othername, otherid) " "SELECT mytable.name, mytable.myid FROM mytable " "WHERE mytable.name = :name_1", checkparams={"name_1": "foo"} ) def test_insert_from_select_no_defaults(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('foo', Integer, default=func.foobar())) table1 = self.tables.mytable sel = select([table1.c.myid]).where(table1.c.name == 'foo') ins = table.insert().\ from_select(["id"], sel, include_defaults=False) self.assert_compile( ins, "INSERT INTO sometable (id) SELECT mytable.myid " "FROM mytable WHERE mytable.name = :name_1", checkparams={"name_1": "foo"} ) def test_insert_from_select_with_sql_defaults(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('foo', Integer, default=func.foobar())) table1 = self.tables.mytable sel = select([table1.c.myid]).where(table1.c.name == 'foo') ins = table.insert().\ from_select(["id"], sel) self.assert_compile( ins, "INSERT INTO sometable (id, foo) SELECT " "mytable.myid, foobar() AS foobar_1 " "FROM mytable WHERE mytable.name = :name_1", checkparams={"name_1": "foo"} ) def test_insert_from_select_with_python_defaults(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('foo', Integer, default=12)) table1 = self.tables.mytable sel = select([table1.c.myid]).where(table1.c.name == 'foo') ins = table.insert().\ from_select(["id"], sel) self.assert_compile( ins, "INSERT INTO sometable (id, foo) SELECT " "mytable.myid, :foo AS anon_1 " "FROM mytable WHERE mytable.name = :name_1", # value filled in at execution time checkparams={"name_1": "foo", "foo": None} ) def test_insert_from_select_override_defaults(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('foo', Integer, default=12)) table1 = self.tables.mytable sel = select( [table1.c.myid, table1.c.myid.label('q')]).where( table1.c.name == 'foo') ins = table.insert().\ from_select(["id", "foo"], sel) self.assert_compile( ins, "INSERT INTO sometable (id, foo) SELECT " "mytable.myid, mytable.myid AS q " "FROM mytable WHERE mytable.name = :name_1", checkparams={"name_1": "foo"} ) def test_insert_from_select_fn_defaults(self): metadata = MetaData() def foo(ctx): return 12 table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('foo', Integer, default=foo)) table1 = self.tables.mytable sel = select( [table1.c.myid]).where( table1.c.name == 'foo') ins = table.insert().\ from_select(["id"], sel) self.assert_compile( ins, "INSERT INTO sometable (id, foo) SELECT " "mytable.myid, :foo AS anon_1 " "FROM mytable WHERE mytable.name = :name_1", # value filled in at execution time checkparams={"name_1": "foo", "foo": None} ) def test_insert_mix_select_values_exception(self): table1 = self.tables.mytable sel = select([table1.c.myid, table1.c.name]).where( table1.c.name == 'foo') ins = self.tables.myothertable.insert().\ from_select(("otherid", "othername"), sel) assert_raises_message( exc.InvalidRequestError, "This construct already inserts from a SELECT", ins.values, othername="5" ) def test_insert_mix_values_select_exception(self): table1 = self.tables.mytable sel = select([table1.c.myid, table1.c.name]).where( table1.c.name == 'foo') ins = self.tables.myothertable.insert().values(othername="5") assert_raises_message( exc.InvalidRequestError, "This construct already inserts value expressions", ins.from_select, ("otherid", "othername"), sel ) def test_insert_from_select_table(self): table1 = self.tables.mytable ins = self.tables.myothertable.insert().\ from_select(("otherid", "othername"), table1) # note we aren't checking the number of columns right now self.assert_compile( ins, "INSERT INTO myothertable (otherid, othername) " "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable", checkparams={} ) def test_insert_from_select_union(self): mytable = self.tables.mytable name = column('name') description = column('desc') sel = select( [name, mytable.c.description], ).union( select([name, description]) ) ins = mytable.insert().\ from_select( [mytable.c.name, mytable.c.description], sel) self.assert_compile( ins, "INSERT INTO mytable (name, description) " "SELECT name, mytable.description FROM mytable " 'UNION SELECT name, "desc"' ) def test_insert_from_select_col_values(self): table1 = self.tables.mytable table2 = self.tables.myothertable sel = select([table1.c.myid, table1.c.name]).where( table1.c.name == 'foo') ins = table2.insert().\ from_select((table2.c.otherid, table2.c.othername), sel) self.assert_compile( ins, "INSERT INTO myothertable (otherid, othername) " "SELECT mytable.myid, mytable.name FROM mytable " "WHERE mytable.name = :name_1", checkparams={"name_1": "foo"} ) class InsertImplicitReturningTest( _InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL): __dialect__ = postgresql.dialect(implicit_returning=True) def test_insert_select(self): table1 = self.tables.mytable sel = select([table1.c.myid, table1.c.name]).where( table1.c.name == 'foo') ins = self.tables.myothertable.insert().\ from_select(("otherid", "othername"), sel) self.assert_compile( ins, "INSERT INTO myothertable (otherid, othername) " "SELECT mytable.myid, mytable.name FROM mytable " "WHERE mytable.name = %(name_1)s", checkparams={"name_1": "foo"} ) def test_insert_select_return_defaults(self): table1 = self.tables.mytable sel = select([table1.c.myid, table1.c.name]).where( table1.c.name == 'foo') ins = self.tables.myothertable.insert().\ from_select(("otherid", "othername"), sel).\ return_defaults(self.tables.myothertable.c.otherid) self.assert_compile( ins, "INSERT INTO myothertable (otherid, othername) " "SELECT mytable.myid, mytable.name FROM mytable " "WHERE mytable.name = %(name_1)s", checkparams={"name_1": "foo"} ) def test_insert_multiple_values(self): ins = self.tables.myothertable.insert().values([ {"othername": "foo"}, {"othername": "bar"}, ]) self.assert_compile( ins, "INSERT INTO myothertable (othername) " "VALUES (%(othername_0)s), " "(%(othername_1)s)", checkparams={ 'othername_1': 'bar', 'othername_0': 'foo'} ) def test_insert_multiple_values_return_defaults(self): # TODO: not sure if this should raise an # error or what ins = self.tables.myothertable.insert().values([ {"othername": "foo"}, {"othername": "bar"}, ]).return_defaults(self.tables.myothertable.c.otherid) self.assert_compile( ins, "INSERT INTO myothertable (othername) " "VALUES (%(othername_0)s), " "(%(othername_1)s)", checkparams={ 'othername_1': 'bar', 'othername_0': 'foo'} ) def test_insert_single_list_values(self): ins = self.tables.myothertable.insert().values([ {"othername": "foo"}, ]) self.assert_compile( ins, "INSERT INTO myothertable (othername) " "VALUES (%(othername_0)s)", checkparams={'othername_0': 'foo'} ) def test_insert_single_element_values(self): ins = self.tables.myothertable.insert().values( {"othername": "foo"}, ) self.assert_compile( ins, "INSERT INTO myothertable (othername) " "VALUES (%(othername)s) RETURNING myothertable.otherid", checkparams={'othername': 'foo'} ) class EmptyTest(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL): __dialect__ = 'default' def test_empty_insert_default(self): table1 = self.tables.mytable stmt = table1.insert().values({}) # hide from 2to3 self.assert_compile(stmt, 'INSERT INTO mytable () VALUES ()') def test_supports_empty_insert_true(self): table1 = self.tables.mytable dialect = default.DefaultDialect() dialect.supports_empty_insert = dialect.supports_default_values = True stmt = table1.insert().values({}) # hide from 2to3 self.assert_compile(stmt, 'INSERT INTO mytable DEFAULT VALUES', dialect=dialect) def test_supports_empty_insert_false(self): table1 = self.tables.mytable dialect = default.DefaultDialect() dialect.supports_empty_insert = dialect.supports_default_values = False stmt = table1.insert().values({}) # hide from 2to3 assert_raises_message( exc.CompileError, "The 'default' dialect with current database version " "settings does not support empty inserts.", stmt.compile, dialect=dialect) def _test_insert_with_empty_collection_values(self, collection): table1 = self.tables.mytable ins = table1.insert().values(collection) self.assert_compile(ins, 'INSERT INTO mytable () VALUES ()', checkparams={}) # empty dict populates on next values call self.assert_compile(ins.values(myid=3), 'INSERT INTO mytable (myid) VALUES (:myid)', checkparams={'myid': 3}) def test_insert_with_empty_list_values(self): self._test_insert_with_empty_collection_values([]) def test_insert_with_empty_dict_values(self): self._test_insert_with_empty_collection_values({}) def test_insert_with_empty_tuple_values(self): self._test_insert_with_empty_collection_values(()) class MultirowTest(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL): __dialect__ = 'default' def test_not_supported(self): table1 = self.tables.mytable dialect = default.DefaultDialect() stmt = table1.insert().values([{'myid': 1}, {'myid': 2}]) assert_raises_message( exc.CompileError, "The 'default' dialect with current database version settings " "does not support in-place multirow inserts.", stmt.compile, dialect=dialect) def test_named(self): table1 = self.tables.mytable values = [ {'myid': 1, 'name': 'a', 'description': 'b'}, {'myid': 2, 'name': 'c', 'description': 'd'}, {'myid': 3, 'name': 'e', 'description': 'f'} ] checkparams = { 'myid_0': 1, 'myid_1': 2, 'myid_2': 3, 'name_0': 'a', 'name_1': 'c', 'name_2': 'e', 'description_0': 'b', 'description_1': 'd', 'description_2': 'f', } dialect = default.DefaultDialect() dialect.supports_multivalues_insert = True self.assert_compile( table1.insert().values(values), 'INSERT INTO mytable (myid, name, description) VALUES ' '(:myid_0, :name_0, :description_0), ' '(:myid_1, :name_1, :description_1), ' '(:myid_2, :name_2, :description_2)', checkparams=checkparams, dialect=dialect) def test_positional(self): table1 = self.tables.mytable values = [ {'myid': 1, 'name': 'a', 'description': 'b'}, {'myid': 2, 'name': 'c', 'description': 'd'}, {'myid': 3, 'name': 'e', 'description': 'f'} ] checkpositional = (1, 'a', 'b', 2, 'c', 'd', 3, 'e', 'f') dialect = default.DefaultDialect() dialect.supports_multivalues_insert = True dialect.paramstyle = 'format' dialect.positional = True self.assert_compile( table1.insert().values(values), 'INSERT INTO mytable (myid, name, description) VALUES ' '(%s, %s, %s), (%s, %s, %s), (%s, %s, %s)', checkpositional=checkpositional, dialect=dialect) def test_positional_w_defaults(self): table1 = self.tables.table_w_defaults values = [ {'id': 1}, {'id': 2}, {'id': 3} ] checkpositional = (1, None, None, 2, None, None, 3, None, None) dialect = default.DefaultDialect() dialect.supports_multivalues_insert = True dialect.paramstyle = 'format' dialect.positional = True self.assert_compile( table1.insert().values(values), "INSERT INTO table_w_defaults (id, x, z) VALUES " "(%s, %s, %s), (%s, %s, %s), (%s, %s, %s)", checkpositional=checkpositional, check_prefetch=[ table1.c.x, table1.c.z, crud._multiparam_column(table1.c.x, 0), crud._multiparam_column(table1.c.z, 0), crud._multiparam_column(table1.c.x, 1), crud._multiparam_column(table1.c.z, 1) ], dialect=dialect) def test_inline_default(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('data', String), Column('foo', Integer, default=func.foobar())) values = [ {'id': 1, 'data': 'data1'}, {'id': 2, 'data': 'data2', 'foo': 'plainfoo'}, {'id': 3, 'data': 'data3'}, ] checkparams = { 'id_0': 1, 'id_1': 2, 'id_2': 3, 'data_0': 'data1', 'data_1': 'data2', 'data_2': 'data3', 'foo_1': 'plainfoo', } self.assert_compile( table.insert().values(values), 'INSERT INTO sometable (id, data, foo) VALUES ' '(%(id_0)s, %(data_0)s, foobar()), ' '(%(id_1)s, %(data_1)s, %(foo_1)s), ' '(%(id_2)s, %(data_2)s, foobar())', checkparams=checkparams, dialect=postgresql.dialect()) def test_python_scalar_default(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('data', String), Column('foo', Integer, default=10)) values = [ {'id': 1, 'data': 'data1'}, {'id': 2, 'data': 'data2', 'foo': 15}, {'id': 3, 'data': 'data3'}, ] checkparams = { 'id_0': 1, 'id_1': 2, 'id_2': 3, 'data_0': 'data1', 'data_1': 'data2', 'data_2': 'data3', 'foo': None, # evaluated later 'foo_1': 15, 'foo_2': None # evaluated later } stmt = table.insert().values(values) eq_( dict([ (k, v.type._type_affinity) for (k, v) in stmt.compile(dialect=postgresql.dialect()).binds.items()]), { 'foo': Integer, 'data_2': String, 'id_0': Integer, 'id_2': Integer, 'foo_1': Integer, 'data_1': String, 'id_1': Integer, 'foo_2': Integer, 'data_0': String} ) self.assert_compile( stmt, 'INSERT INTO sometable (id, data, foo) VALUES ' '(%(id_0)s, %(data_0)s, %(foo)s), ' '(%(id_1)s, %(data_1)s, %(foo_1)s), ' '(%(id_2)s, %(data_2)s, %(foo_2)s)', checkparams=checkparams, dialect=postgresql.dialect()) def test_python_fn_default(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('data', String), Column('foo', Integer, default=lambda: 10)) values = [ {'id': 1, 'data': 'data1'}, {'id': 2, 'data': 'data2', 'foo': 15}, {'id': 3, 'data': 'data3'}, ] checkparams = { 'id_0': 1, 'id_1': 2, 'id_2': 3, 'data_0': 'data1', 'data_1': 'data2', 'data_2': 'data3', 'foo': None, # evaluated later 'foo_1': 15, 'foo_2': None, # evaluated later } stmt = table.insert().values(values) eq_( dict([ (k, v.type._type_affinity) for (k, v) in stmt.compile(dialect=postgresql.dialect()).binds.items()]), { 'foo': Integer, 'data_2': String, 'id_0': Integer, 'id_2': Integer, 'foo_1': Integer, 'data_1': String, 'id_1': Integer, 'foo_2': Integer, 'data_0': String} ) self.assert_compile( stmt, "INSERT INTO sometable (id, data, foo) VALUES " "(%(id_0)s, %(data_0)s, %(foo)s), " "(%(id_1)s, %(data_1)s, %(foo_1)s), " "(%(id_2)s, %(data_2)s, %(foo_2)s)", checkparams=checkparams, dialect=postgresql.dialect()) def test_sql_functions(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('data', String), Column('foo', Integer)) values = [ {"id": 1, "data": "foo", "foo": func.foob()}, {"id": 2, "data": "bar", "foo": func.foob()}, {"id": 3, "data": "bar", "foo": func.bar()}, {"id": 4, "data": "bar", "foo": 15}, {"id": 5, "data": "bar", "foo": func.foob()}, ] checkparams = { 'id_0': 1, 'data_0': 'foo', 'id_1': 2, 'data_1': 'bar', 'id_2': 3, 'data_2': 'bar', 'id_3': 4, 'data_3': 'bar', 'foo_3': 15, 'id_4': 5, 'data_4': 'bar' } self.assert_compile( table.insert().values(values), "INSERT INTO sometable (id, data, foo) VALUES " "(%(id_0)s, %(data_0)s, foob()), " "(%(id_1)s, %(data_1)s, foob()), " "(%(id_2)s, %(data_2)s, bar()), " "(%(id_3)s, %(data_3)s, %(foo_3)s), " "(%(id_4)s, %(data_4)s, foob())", checkparams=checkparams, dialect=postgresql.dialect()) def test_server_default(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('data', String), Column('foo', Integer, server_default=func.foobar())) values = [ {'id': 1, 'data': 'data1'}, {'id': 2, 'data': 'data2', 'foo': 'plainfoo'}, {'id': 3, 'data': 'data3'}, ] checkparams = { 'id_0': 1, 'id_1': 2, 'id_2': 3, 'data_0': 'data1', 'data_1': 'data2', 'data_2': 'data3', } self.assert_compile( table.insert().values(values), 'INSERT INTO sometable (id, data) VALUES ' '(%(id_0)s, %(data_0)s), ' '(%(id_1)s, %(data_1)s), ' '(%(id_2)s, %(data_2)s)', checkparams=checkparams, dialect=postgresql.dialect()) def test_server_default_absent_value(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('data', String), Column('foo', Integer, server_default=func.foobar())) values = [ {'id': 1, 'data': 'data1', 'foo': 'plainfoo'}, {'id': 2, 'data': 'data2'}, {'id': 3, 'data': 'data3', 'foo': 'otherfoo'}, ] assert_raises_message( exc.CompileError, "INSERT value for column sometable.foo is explicitly rendered " "as a boundparameter in the VALUES clause; a Python-side value or " "SQL expression is required", table.insert().values(values).compile )
CiscoSystems/project-config-third-party
refs/heads/master
tools/jenkins-projects-checks.py
1
#! /usr/bin/env python # Copyright 2014 SUSE Linux Products GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys def normalize(s): "Normalize string for comparison." return s.lower().replace("_", "-") def check_sections(): """Check that the projects are in alphabetical order per section and that indenting looks correct""" # Note that the file has different sections and we need to check # entries within these sections only errors = False last = "" count = 1 for line in open('jenkins/jobs/projects.yaml', 'r'): if line.startswith('# Section:'): last = "" section = line[10:].strip() print("Checking section '%s'" % section) if line.startswith(' name: '): i = line.find(' name: ') current = line[i + 7:].strip() if normalize(last) > normalize(current): print(" Wrong alphabetical order: %(last)s, %(current)s" % {"last": last, "current": current}) errors = True last = current if (len(line) - len(line.lstrip(' '))) % 2 != 0: print("Line %(count)s not indented by multiple of 2:\n\t%(line)s" % {"count": count, "line": line}) errors = True count = count + 1 return errors def check_all(): errors = check_sections() if errors: print("Found errors in jenkins/jobs/projects.yaml!") else: print("No errors found in jenkins/jobs/projects.yaml!") return errors if __name__ == "__main__": sys.exit(check_all())
rven/odoo
refs/heads/14.0-fix-partner-merge-mail-activity
addons/account_debit_note/wizard/__init__.py
11
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from . import account_debit_note
axbaretto/beam
refs/heads/master
sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/test/unittest_checker_typecheck.py
1
# -*- coding: utf-8 -*- # Copyright (c) 2014 Holger Peters <[email protected]> # Copyright (c) 2014 Google, Inc. # Copyright (c) 2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]> # Copyright (c) 2015-2016 Claudiu Popa <[email protected]> # Copyright (c) 2015 Ionel Cristian Maries <[email protected]> # Copyright (c) 2015 Dmitry Pribysh <[email protected]> # Copyright (c) 2016 Filipe Brandenburger <[email protected]> # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/master/COPYING """Unittest for the type checker.""" import unittest from astroid import test_utils from pylint.checkers import typecheck from pylint.testutils import CheckerTestCase, Message, set_config class TypeCheckerTest(CheckerTestCase): "Tests for pylint.checkers.typecheck" CHECKER_CLASS = typecheck.TypeChecker def test_no_member_in_getattr(self): """Make sure that a module attribute access is checked by pylint. """ node = test_utils.extract_node(""" import optparse optparse.THIS_does_not_EXIST """) with self.assertAddsMessages( Message( 'no-member', node=node, args=('Module', 'optparse', 'THIS_does_not_EXIST'))): self.checker.visit_attribute(node) @set_config(ignored_modules=('argparse',)) def test_no_member_in_getattr_ignored(self): """Make sure that a module attribute access check is omitted with a module that is configured to be ignored. """ node = test_utils.extract_node(""" import argparse argparse.THIS_does_not_EXIST """) with self.assertNoMessages(): self.checker.visit_attribute(node) @set_config(ignored_classes=('xml.etree.', )) def test_ignored_modules_invalid_pattern(self): node = test_utils.extract_node(''' import xml xml.etree.Lala ''') message = Message('no-member', node=node, args=('Module', 'xml.etree', 'Lala')) with self.assertAddsMessages(message): self.checker.visit_attribute(node) @set_config(ignored_modules=('xml.etree*', )) def test_ignored_modules_patterns(self): node = test_utils.extract_node(''' import xml xml.etree.portocola #@ ''') with self.assertNoMessages(): self.checker.visit_attribute(node) @set_config(ignored_classes=('xml.*', )) def test_ignored_classes_no_recursive_pattern(self): node = test_utils.extract_node(''' import xml xml.etree.ElementTree.Test ''') message = Message('no-member', node=node, args=('Module', 'xml.etree.ElementTree', 'Test')) with self.assertAddsMessages(message): self.checker.visit_attribute(node) @set_config(ignored_classes=('optparse.Values', )) def test_ignored_classes_qualified_name(self): """Test that ignored-classes supports qualified name for ignoring.""" node = test_utils.extract_node(''' import optparse optparse.Values.lala ''') with self.assertNoMessages(): self.checker.visit_attribute(node) @set_config(ignored_classes=('Values', )) def test_ignored_classes_only_name(self): """Test that ignored_classes works with the name only.""" node = test_utils.extract_node(''' import optparse optparse.Values.lala ''') with self.assertNoMessages(): self.checker.visit_attribute(node) @set_config(contextmanager_decorators=('contextlib.contextmanager', '.custom_contextmanager')) def test_custom_context_manager(self): """Test that @custom_contextmanager is recognized as configured.""" node = test_utils.extract_node(''' from contextlib import contextmanager def custom_contextmanager(f): return contextmanager(f) @custom_contextmanager def dec(): yield with dec(): pass ''') with self.assertNoMessages(): self.checker.visit_with(node) if __name__ == '__main__': unittest.main()
longmazhanfeng/interface_web
refs/heads/master
interface_platform/resource_manage.py
1
# -*- coding: UTF-8 -*- from models import * from account.models import Account from django.contrib.auth.models import User from django.shortcuts import render, get_object_or_404, redirect from django.http import HttpResponse from base_message import get_its from django.views.generic import TemplateView from model.typemap import VAR_TYPE import json class ResourcesView(TemplateView): """ 资源配置相关处理 """ template_name = "resource_config.html" def get_context_data(self, **kwargs): context = super(ResourcesView, self).get_context_data(**kwargs) context["selected_project"] = get_object_or_404(Project, pk=int(kwargs["project_id"])) context["project_table"] = Project.objects.all() context["variable_table"] = Variable.objects.filter(project=context["selected_project"]) return context @staticmethod def create_variable(request, project_id): """ 新建变量 :param request: HTTP请求 :param project_id: 当前项目ID :return: 新建变量页面 """ project = get_object_or_404(Project, pk=int(project_id)) its = get_its(project) user_table = User.objects.all()[1:] context = {"selected_project": project, "user_table": user_table, "its": its, "project_table": Project.objects.all()} return render(request, "new_variable.html", context) @staticmethod def variable_detail(request, project_id, var_id): """ 变量详情 :param request: HTTP请求 :param project_id: 当前项目ID :param var_id: 当前变量ID :return: 变量详情页面 """ project_id = int(project_id) project = get_object_or_404(Project, pk=project_id) var_id = int(var_id) var = get_object_or_404(Variable, pk=var_id) user_table = User.objects.all()[1:] its = get_its(project) context = dict() context["project"] = project context["var"] = var context["user_table"] = user_table context["its"] = its context["selected_project"] = project context["var_types"] = VAR_TYPE context["project_table"] = Project.objects.all() return render(request, "variable_detail.html", context) @staticmethod def save_variable(request, project_id): """ 保存新建变量 :param request: HTTP请求 :param project_id: 当前项目ID :return: 保存结果 """ project = get_object_or_404(Project, pk=int(project_id)) if request.method == "POST": # print request.POST name = request.POST["variable_name"].strip() var_type = request.POST["variable_type"] value = request.POST["variable_value"].strip() desc = request.POST["variable_desc"] creator_name = request.POST['variable_creator'] user = User.objects.get(username=creator_name) creator = Account.objects.get(user=user) responsible = Account.objects.get(user=user) if var_type == "1": # 创建host变量 val_type = "HOST" elif var_type == "3": val_type = "普通变量" elif var_type == "2": val_type = "接口返回值" if name and len(name) <= 100: var = Variable(name=name, value=value, type=val_type, desc=desc, project=project, creator=creator, responsible=responsible) var.save() if var.type == "接口返回值": # 当变量是“接口返回值”类型时要记录该变量所关联的接口,以及该接口的某一具体参数 rep_para_id = int(request.POST["rep_para_id"]) rep_para_type = request.POST["rep_para_type"] assoc_it_id = int(request.POST["assoc_it"]) it = get_object_or_404(ITStatement, pk=assoc_it_id) if rep_para_type == "body": assoc_type = "6" VariableIT.objects.create(var=var, it=it, assoc_id=rep_para_id, assoc_type=assoc_type) elif rep_para_type == "head": assoc_type = "5" VariableIT.objects.create(var=var, it=it, assoc_id=rep_para_id, assoc_type=assoc_type) return redirect("resource_configuration", project_id) return redirect("resource_configuration", project.id) # 变量详情页更改后保存 @staticmethod def update_variable(request, var_id): meta = {"code": 0, "message": "fail"} if request.is_ajax() and request.method == "GET": var_id = int(var_id) var = get_object_or_404(Variable, pk=var_id) # 对变量进行修改,点击保存 update_flag = False name = request.GET["name"] value = request.GET["value"] if name == "variable_name" and value != var.name: var.name = value update_flag = True elif name == "variable_type" and value != var.type: if var.type == u"接口返回值": # 如果之前时接口返回值类型的变量,更改变量类型时要去除变量和接口的关联关系 VariableIT.objects.get(var=var).delete() # 删除var和接口某一响应参数的关联关系 var.type = value update_flag = True elif name == "variable_value" and value != var.value: if "flag" in request.GET and request.GET["flag"] == "it_return": it_id = int(request.GET["it_id"]) it = get_object_or_404(ITStatement, pk=it_id) assoc_id = int(request.GET["assoc_id"]) assoc_type = request.GET["assoc_type"] if assoc_type == "body": VariableIT.objects.create(var=var, it=it, assoc_id=assoc_id, assoc_type="6") elif assoc_type == "head": VariableIT.objects.create(var=var, it=it, assoc_id=assoc_id, assoc_type="5") var.value = value update_flag = True elif name == "variable_desc" and value != var.desc: var.desc = value update_flag = True if update_flag and request.user.is_authenticated(): user = request.user var.responsible = Account.objects.get(user=user) var.save() meta["code"] = 201 meta["message"] = "变量修改成功" meta["responsible"] = request.user.username return HttpResponse(json.dumps(meta)) return HttpResponse(json.dumps(meta)) @staticmethod def delete_variable(request): """ 删除变量 :param request:HTTP请求 :return:删除结果 """ if request.is_ajax() and request.method == "GET": var_id = int(request.GET["var_id"]) if Variable.objects.filter(id=var_id).exists(): Variable.objects.get(pk=var_id).delete() return HttpResponse("success") else: return HttpResponse("不存在的变量ID: %s" % var_id)
facebook/FBSimulatorControl
refs/heads/master
idb/common/tar.py
1
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import asyncio import os import sys import tempfile import uuid from typing import AsyncContextManager, AsyncIterator, List, Optional from idb.utils.contextlib import asynccontextmanager from idb.utils.typing import none_throws class TarException(BaseException): pass def _has_executable(exe: str) -> bool: return any((os.path.exists(os.path.join(path, exe)) for path in os.get_exec_path())) COMPRESSION_COMMAND = "pigz -c" if _has_executable("pigz") else "gzip -4" READ_CHUNK_SIZE: int = 1024 * 1024 * 4 # 4Mb, the default max read for gRPC @asynccontextmanager # noqa T484 async def _create_tar_command( paths: List[str], additional_tar_args: Optional[List[str]], place_in_subfolders: bool, verbose: bool = False, ) -> AsyncContextManager[asyncio.subprocess.Process]: with tempfile.TemporaryDirectory(prefix="tar_link_") as temp_dir: tar_args = additional_tar_args or [] if place_in_subfolders: for path in paths: sub_dir_name = str(uuid.uuid4()) temp_subdir = os.path.join(temp_dir, sub_dir_name) os.symlink(os.path.dirname(path), temp_subdir) path_to_file = os.path.join(sub_dir_name, os.path.basename(path)) tar_args.append(f"-C '{temp_dir}' '{path_to_file}'") else: tar_args.extend( [ f"-C '{os.path.dirname(path)}' '{os.path.basename(path)}'" for path in paths ] ) process = await asyncio.create_subprocess_shell( ( f'tar cf{"v" if verbose else ""} - ' + f'{" ".join(tar_args)} | {COMPRESSION_COMMAND}' ), stderr=sys.stderr, stdout=asyncio.subprocess.PIPE, ) yield process @asynccontextmanager # noqa T484 async def _create_untar_command( output_path: str, verbose: bool = False ) -> AsyncContextManager[asyncio.subprocess.Process]: process = await asyncio.create_subprocess_shell( f"tar -C '{output_path}' -xzpf{'v' if verbose else ''} -", stdin=asyncio.subprocess.PIPE, stderr=sys.stderr, stdout=sys.stderr, ) yield process async def _generator_from_data(data: bytes) -> AsyncIterator[bytes]: yield data async def create_tar( paths: List[str], additional_tar_args: Optional[List[str]] = None, place_in_subfolders: bool = False, verbose: bool = False, ) -> bytes: async with _create_tar_command( paths=paths, additional_tar_args=additional_tar_args, place_in_subfolders=place_in_subfolders, verbose=verbose, ) as process: tar_contents = (await process.communicate())[0] if process.returncode != 0: raise TarException( "Failed to create tar file, " "tar command exited with non-zero exit code {process.returncode}" ) return tar_contents async def generate_tar( paths: List[str], additional_tar_args: Optional[List[str]] = None, place_in_subfolders: bool = False, verbose: bool = False, ) -> AsyncIterator[bytes]: async with _create_tar_command( paths=paths, additional_tar_args=additional_tar_args, place_in_subfolders=place_in_subfolders, verbose=verbose, ) as process: reader = none_throws(process.stdout) while not reader.at_eof(): data = await reader.read(READ_CHUNK_SIZE) if not data: break yield data returncode = await process.wait() if returncode != 0: raise TarException( "Failed to generate tar file, " f"tar command exited with non-zero exit code {returncode}" ) async def drain_untar( generator: AsyncIterator[bytes], output_path: str, verbose: bool = False ) -> None: try: os.mkdir(output_path) except FileExistsError: pass async with _create_untar_command( output_path=output_path, verbose=verbose ) as process: writer = none_throws(process.stdin) async for data in generator: writer.write(data) await writer.drain() writer.write_eof() await writer.drain() await process.wait() async def untar(data: bytes, output_path: str, verbose: bool = False) -> None: await drain_untar( generator=_generator_from_data(data=data), output_path=output_path, verbose=verbose, )
dana-i2cat/felix
refs/heads/master
ofam/src/setup.py
3
from distutils.core import setup import os.path import os import pprint import glob import fnmatch def generatePluginFiles (): pfiles = [] foamroot = "/opt/ofelia/ofam/local" pdirs = os.listdir(os.getcwd() + "/plugins") for pd in pdirs: dl = [] for plgfile in os.listdir(os.getcwd() + "/plugins/%s/" % (pd)): dl.append("plugins/%s/%s" % (pd, plgfile)) pfiles.append(("%s/plugins/%s" % (foamroot, pd), dl)) return pfiles def generateDataDir (cur_dir_loc): files = [] for f in os.listdir(os.getcwd() + "/%s" % (cur_dir_loc)): files.append("%s/%s" % (cur_dir_loc, f)) return files def opj (*args): # Handy utility from Robin Dunn path = os.path.join(*args) return os.path.normpath(path) # distutils should include this function... def generateDataFiles (srcdir, *wildcards, **kw): def walkHelper (arg, dirname, files): #print dirname names = [] lst, wildcards, newroot, srclen = arg for wc in wildcards: wc_name = opj(dirname, wc) for f in files: filename = opj(dirname, f) if fnmatch.fnmatch(filename, wc_name) and not os.path.isdir(filename): names.append(filename) if names: lst.append(("%s/%s" % (newroot, dirname[srclen:]), names)) file_list = [] recursive = kw.get('recursive', True) newroot = kw.get('newroot', srcdir) if recursive: os.path.walk(srcdir, walkHelper, (file_list, wildcards, newroot, len(srcdir))) else: walkHelper((file_list, wildcards, newroot, len(srcdir)), srcdir, [os.path.basename(f) for f in glob.glob(opj(srcdir, '*'))]) return file_list def main (): data_files=[('/opt/ofelia/ofam/local/sbin', ['src/scripts/foam.fcgi']), ('/opt/ofelia/ofam/local/bin', ['src/scripts/expire', 'src/scripts/expire-emails', 'src/scripts/daily-queue']), ('/opt/ofelia/ofam/local/schemas', ['schemas/ad.xsd', 'schemas/request.xsd', 'schemas/of-resv-3.xsd', 'schemas/any-extension-schema.xsd', 'schemas/request-common.xsd', 'schemas/of-resv-4.xsd']), ('/etc/nginx/sites-available/', ['src/foam.conf']), ('/etc/nginx/', ['src/trusted'])] data_files.extend(generatePluginFiles()) data_files.extend(generateDataFiles('src/foamext/', "*.py", newroot='/opt/ofelia/ofam/local/lib/foamext')) data_files.extend(generateDataFiles('src/ext/', "*.py", newroot='/opt/ofelia/ofam/local/lib')) #data_files.extend(generateDataFiles('src/foam/', "*.py", newroot='/opt/ofelia/ofam/local/lib/foam')) data_files.extend(generateDataFiles('src/foam/', "*.*", newroot='/opt/ofelia/ofam/local/lib/foam')) data_files.extend(generateDataFiles('templates/', "*.txt", newroot='/opt/ofelia/ofam/local/etc/templates/default')) # pprint.pprint(data_files) setup(name='foam', version='foam_0.8.2|ofelia_0.1', description='Flowvisor Openflow Aggregate Manager', author='Nick Bastin and Vasileios Kotronis', author_email='[email protected]', url='https://openflow.stanford.edu/display/FOAM/Home', packages=[], package_dir={'foam': 'src/foam', 'jsonrpc' : 'src/ext/jsonrpc', 'sfa' : 'src/ext/sfa', 'geni' : 'src/ext/geni'}, scripts=['src/scripts/foamctl', 'src/scripts/foam-db-convert.py'], data_files = data_files ) if __name__ == '__main__': main()
andrenatal/DeepSpeech
refs/heads/master
util/text.py
1
import numpy as np import tensorflow as tf # Constants SPACE_TOKEN = '<space>' SPACE_INDEX = 0 FIRST_INDEX = ord('a') - 1 # 0 is reserved to space def text_to_char_array(original): # Create list of sentence's words w/spaces replaced by '' result = original.replace(" '", "") # TODO: Deal with this properly result = result.replace("'", "") # TODO: Deal with this properly result = result.replace(' ', ' ') result = result.split(' ') # Tokenize words into letters adding in SPACE_TOKEN where required result = np.hstack([SPACE_TOKEN if xt == '' else list(xt) for xt in result]) # Map characters into indicies result = np.asarray([SPACE_INDEX if xt == SPACE_TOKEN else ord(xt) - FIRST_INDEX for xt in result]) # Add result to results return result def sparse_tuple_from(sequences, dtype=np.int32): """Create a sparse representention of x. Args: sequences: a list of lists of type dtype where each element is a sequence Returns: A tuple with (indices, values, shape) """ indices = [] values = [] for n, seq in enumerate(sequences): indices.extend(zip([n]*len(seq), xrange(len(seq)))) values.extend(seq) indices = np.asarray(indices, dtype=np.int64) values = np.asarray(values, dtype=dtype) shape = np.asarray([len(sequences), indices.max(0)[1]+1], dtype=np.int64) return tf.SparseTensor(indices=indices, values=values, shape=shape) def sparse_tensor_value_to_texts(value): return sparse_tuple_to_texts((value.indices, value.values, value.shape)) def sparse_tuple_to_texts(tuple): indices = tuple[0] values = tuple[1] results = [''] * tuple[2][0] for i in range(len(indices)): index = indices[i][0] c = values[i] c = ' ' if c == SPACE_INDEX else chr(c + FIRST_INDEX) results[index] = results[index] + c # List of strings return results def ndarray_to_text(value): results = '' for i in range(len(value)): results += chr(value[i] + FIRST_INDEX) return results.replace('`', ' ') def wer(original, result): """ The WER is defined as the editing/Levenshtein distance on word level divided by the amount of words in the original text. In case of the original having more words (N) than the result and both being totally different (all N words resulting in 1 edit operation each), the WER will always be 1 (N / N = 1). """ # The WER ist calculated on word (and NOT on character) level. # Therefore we split the strings into words first: original = original.split() result = result.split() return levenshtein(original, result) / float(len(original)) def wers(originals, results): count = len(originals) rates = [] mean = 0.0 assert count == len(results) for i in range(count): rate = wer(originals[i], results[i]) mean = mean + rate rates.append(rate) return rates, mean / float(count) # The following code is from: http://hetland.org/coding/python/levenshtein.py # This is a straightforward implementation of a well-known algorithm, and thus # probably shouldn't be covered by copyright to begin with. But in case it is, # the author (Magnus Lie Hetland) has, to the extent possible under law, # dedicated all copyright and related and neighboring rights to this software # to the public domain worldwide, by distributing it under the CC0 license, # version 1.0. This software is distributed without any warranty. For more # information, see <http://creativecommons.org/publicdomain/zero/1.0> def levenshtein(a,b): "Calculates the Levenshtein distance between a and b." n, m = len(a), len(b) if n > m: # Make sure n <= m, to use O(min(n,m)) space a,b = b,a n,m = m,n current = range(n+1) for i in range(1,m+1): previous, current = current, [i]+[0]*n for j in range(1,n+1): add, delete = previous[j]+1, current[j-1]+1 change = previous[j-1] if a[j-1] != b[i-1]: change = change + 1 current[j] = min(add, delete, change) return current[n] # gather_nd is taken from https://github.com/tensorflow/tensorflow/issues/206#issuecomment-229678962 # # Unfortunately we can't just use tf.gather_nd because it does not have gradients # implemented yet, so we need this workaround. # def gather_nd(params, indices, shape): rank = len(shape) flat_params = tf.reshape(params, [-1]) multipliers = [reduce(lambda x, y: x*y, shape[i+1:], 1) for i in range(0, rank)] indices_unpacked = tf.unpack(tf.transpose(indices, [rank - 1] + range(0, rank - 1))) flat_indices = sum([a*b for a,b in zip(multipliers, indices_unpacked)]) return tf.gather(flat_params, flat_indices) # ctc_label_dense_to_sparse is taken from https://github.com/tensorflow/tensorflow/issues/1742#issuecomment-205291527 # # The CTC implementation in TensorFlow needs labels in a sparse representation, # but sparse data and queues don't mix well, so we store padded tensors in the # queue and convert to a sparse representation after dequeuing a batch. # def ctc_label_dense_to_sparse(labels, label_lengths, batch_size): # The second dimension of labels must be equal to the longest label length in the batch correct_shape_assert = tf.assert_equal(tf.shape(labels)[1], tf.reduce_max(label_lengths)) with tf.control_dependencies([correct_shape_assert]): labels = tf.identity(labels) label_shape = tf.shape(labels) num_batches_tns = tf.pack([label_shape[0]]) max_num_labels_tns = tf.pack([label_shape[1]]) def range_less_than(previous_state, current_input): return tf.expand_dims(tf.range(label_shape[1]), 0) < current_input init = tf.cast(tf.fill(max_num_labels_tns, 0), tf.bool) init = tf.expand_dims(init, 0) dense_mask = tf.scan(range_less_than, label_lengths, initializer=init, parallel_iterations=1) dense_mask = dense_mask[:, 0, :] label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns), label_shape) label_ind = tf.boolean_mask(label_array, dense_mask) batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns), tf.reverse(label_shape, [True]))) batch_ind = tf.boolean_mask(batch_array, dense_mask) indices = tf.transpose(tf.reshape(tf.concat(0, [batch_ind, label_ind]), [2, -1])) shape = [batch_size, tf.reduce_max(label_lengths)] vals_sparse = gather_nd(labels, indices, shape) return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))
tgummerer/buck
refs/heads/master
scripts/verify-javadoc.py
24
#!/usr/bin/env python # # Examines the output from running Javadoc via Ant and checks to see if any # warnings were emitted. If so, fail the build unless the warning is in the # whitelist. When run in a CI build, Ant may not be able to reach external # URLs, so warnings about errors fetching expected URLs should be ignored. import sets import sys WARNING_WHITELIST = sets.ImmutableSet(map( lambda url: ' [javadoc] javadoc: warning - Error fetching URL: ' + url, [ 'http://docs.oracle.com/javase/7/docs/api/package-list', 'https://junit-team.github.io/junit/javadoc/latest/package-list', ]) + [' [javadoc] 2 warnings']) def main(log_file): """Exit with a non-zero return code if line is not in the warning whitelist.""" errors = [] with open(log_file) as f: for line in f.xreadlines(): line = line.rstrip() # If there is a warning from `javadoc`, check whether it is in the whitelist. if 'warning' in line.lower() and line not in WARNING_WHITELIST: errors.append(line) if len(errors): print 'Unexpected Javadoc errors (%d):' % len(errors) for error in errors: print error sys.exit(1) if __name__ == '__main__': main(sys.argv[1])
Yelp/pyes
refs/heads/master
pyes/fakettypes.py
5
# -*- coding: utf-8 -*- from __future__ import absolute_import # # Fake ttypes to use in http protocol to simulate thrift ones # class Method(object): GET = 0 PUT = 1 POST = 2 DELETE = 3 HEAD = 4 OPTIONS = 5 _VALUES_TO_NAMES = { 0: "GET", 1: "PUT", 2: "POST", 3: "DELETE", 4: "HEAD", 5: "OPTIONS", } _NAMES_TO_VALUES = { "GET": 0, "PUT": 1, "POST": 2, "DELETE": 3, "HEAD": 4, "OPTIONS": 5, } class Status(object): CONTINUE = 100 SWITCHING_PROTOCOLS = 101 OK = 200 CREATED = 201 ACCEPTED = 202 NON_AUTHORITATIVE_INFORMATION = 203 NO_CONTENT = 204 RESET_CONTENT = 205 PARTIAL_CONTENT = 206 MULTI_STATUS = 207 MULTIPLE_CHOICES = 300 MOVED_PERMANENTLY = 301 FOUND = 302 SEE_OTHER = 303 NOT_MODIFIED = 304 USE_PROXY = 305 TEMPORARY_REDIRECT = 307 BAD_REQUEST = 400 UNAUTHORIZED = 401 PAYMENT_REQUIRED = 402 FORBIDDEN = 403 NOT_FOUND = 404 METHOD_NOT_ALLOWED = 405 NOT_ACCEPTABLE = 406 PROXY_AUTHENTICATION = 407 REQUEST_TIMEOUT = 408 CONFLICT = 409 GONE = 410 LENGTH_REQUIRED = 411 PRECONDITION_FAILED = 412 REQUEST_ENTITY_TOO_LARGE = 413 REQUEST_URI_TOO_LONG = 414 UNSUPPORTED_MEDIA_TYPE = 415 REQUESTED_RANGE_NOT_SATISFIED = 416 EXPECTATION_FAILED = 417 UNPROCESSABLE_ENTITY = 422 LOCKED = 423 FAILED_DEPENDENCY = 424 INTERNAL_SERVER_ERROR = 500 NOT_IMPLEMENTED = 501 BAD_GATEWAY = 502 SERVICE_UNAVAILABLE = 503 GATEWAY_TIMEOUT = 504 INSUFFICIENT_STORAGE = 506 _VALUES_TO_NAMES = { 100: "CONTINUE", 101: "SWITCHING_PROTOCOLS", 200: "OK", 201: "CREATED", 202: "ACCEPTED", 203: "NON_AUTHORITATIVE_INFORMATION", 204: "NO_CONTENT", 205: "RESET_CONTENT", 206: "PARTIAL_CONTENT", 207: "MULTI_STATUS", 300: "MULTIPLE_CHOICES", 301: "MOVED_PERMANENTLY", 302: "FOUND", 303: "SEE_OTHER", 304: "NOT_MODIFIED", 305: "USE_PROXY", 307: "TEMPORARY_REDIRECT", 400: "BAD_REQUEST", 401: "UNAUTHORIZED", 402: "PAYMENT_REQUIRED", 403: "FORBIDDEN", 404: "NOT_FOUND", 405: "METHOD_NOT_ALLOWED", 406: "NOT_ACCEPTABLE", 407: "PROXY_AUTHENTICATION", 408: "REQUEST_TIMEOUT", 409: "CONFLICT", 410: "GONE", 411: "LENGTH_REQUIRED", 412: "PRECONDITION_FAILED", 413: "REQUEST_ENTITY_TOO_LARGE", 414: "REQUEST_URI_TOO_LONG", 415: "UNSUPPORTED_MEDIA_TYPE", 416: "REQUESTED_RANGE_NOT_SATISFIED", 417: "EXPECTATION_FAILED", 422: "UNPROCESSABLE_ENTITY", 423: "LOCKED", 424: "FAILED_DEPENDENCY", 500: "INTERNAL_SERVER_ERROR", 501: "NOT_IMPLEMENTED", 502: "BAD_GATEWAY", 503: "SERVICE_UNAVAILABLE", 504: "GATEWAY_TIMEOUT", 506: "INSUFFICIENT_STORAGE", } _NAMES_TO_VALUES = { "CONTINUE": 100, "SWITCHING_PROTOCOLS": 101, "OK": 200, "CREATED": 201, "ACCEPTED": 202, "NON_AUTHORITATIVE_INFORMATION": 203, "NO_CONTENT": 204, "RESET_CONTENT": 205, "PARTIAL_CONTENT": 206, "MULTI_STATUS": 207, "MULTIPLE_CHOICES": 300, "MOVED_PERMANENTLY": 301, "FOUND": 302, "SEE_OTHER": 303, "NOT_MODIFIED": 304, "USE_PROXY": 305, "TEMPORARY_REDIRECT": 307, "BAD_REQUEST": 400, "UNAUTHORIZED": 401, "PAYMENT_REQUIRED": 402, "FORBIDDEN": 403, "NOT_FOUND": 404, "METHOD_NOT_ALLOWED": 405, "NOT_ACCEPTABLE": 406, "PROXY_AUTHENTICATION": 407, "REQUEST_TIMEOUT": 408, "CONFLICT": 409, "GONE": 410, "LENGTH_REQUIRED": 411, "PRECONDITION_FAILED": 412, "REQUEST_ENTITY_TOO_LARGE": 413, "REQUEST_URI_TOO_LONG": 414, "UNSUPPORTED_MEDIA_TYPE": 415, "REQUESTED_RANGE_NOT_SATISFIED": 416, "EXPECTATION_FAILED": 417, "UNPROCESSABLE_ENTITY": 422, "LOCKED": 423, "FAILED_DEPENDENCY": 424, "INTERNAL_SERVER_ERROR": 500, "NOT_IMPLEMENTED": 501, "BAD_GATEWAY": 502, "SERVICE_UNAVAILABLE": 503, "GATEWAY_TIMEOUT": 504, "INSUFFICIENT_STORAGE": 506, } class RestRequest(object): """ Attributes: - method - uri - parameters - headers - body """ def __init__(self, method=None, uri=None, parameters=None, headers=None, body=None): self.method = method self.uri = uri self.parameters = parameters self.headers = headers self.body = body def __repr__(self): full_url = 'http://localhost:9200' + self.uri if len(self.parameters) > 0: full_url += '?' for k, v in self.parameters: full_url += k + '&' + v return "curl -X%s %s -d '%s'" % ( Method._VALUES_TO_NAMES[self.method], full_url, self.body, ) class RestResponse(object): """ Attributes: - status - headers - body """ def __init__(self, status=None, headers=None, body=None): self.status = status self.headers = headers self.body = body
dongweiming/web_develop
refs/heads/master
chapter12/section4/config.py
1
# coding=utf-8 DEBUG = True SQLALCHEMY_DATABASE_URI = 'mysql://web:web@localhost:3306/r' UPLOAD_FOLDER = 'permdir' SQLALCHEMY_TRACK_MODIFICATIONS = False
cstamm/PythonNetworking
refs/heads/master
python_course/base_module1.py
1
eq_break = "===================="
PeterWangIntel/chromium-crosswalk
refs/heads/master
build/android/pylib/device/commands/__init__.py
1201
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file.
mdietrichc2c/OCB
refs/heads/8.0
addons/hr_contract/__openerp__.py
52
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Employee Contracts', 'version': '1.0', 'category': 'Human Resources', 'description': """ Add all information on the employee form to manage contracts. ============================================================= * Contract * Place of Birth, * Medical Examination Date * Company Vehicle You can assign several contracts per employee. """, 'author': 'OpenERP SA', 'website': 'https://www.odoo.com/page/employees', 'images': ['images/hr_contract.jpeg'], 'depends': ['base_action_rule', 'hr'], 'data': [ 'security/ir.model.access.csv', 'hr_contract_view.xml', 'hr_contract_data.xml', 'base_action_rule_view.xml', ], 'demo': [], 'test': ['test/test_hr_contract.yml'], 'installable': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
Tesi-Luca-Davide/ryu
refs/heads/master
ryu/tests/unit/cmd/dummy_app.py
56
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation. # Copyright (C) 2013 YAMAMOTO Takashi <yamamoto at valinux co jp> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from ryu.base import app_manager class DummyApp(app_manager.RyuApp): pass