repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
irwinlove/django
tests/template_tests/filter_tests/test_linebreaks.py
310
1920
from django.template.defaultfilters import linebreaks_filter from django.test import SimpleTestCase from django.utils.safestring import mark_safe from ..utils import setup class LinebreaksTests(SimpleTestCase): """ The contents in "linebreaks" are escaped according to the current autoescape setting. """ @setup({'linebreaks01': '{{ a|linebreaks }} {{ b|linebreaks }}'}) def test_linebreaks01(self): output = self.engine.render_to_string('linebreaks01', {"a": "x&\ny", "b": mark_safe("x&\ny")}) self.assertEqual(output, "<p>x&amp;<br />y</p> <p>x&<br />y</p>") @setup({'linebreaks02': '{% autoescape off %}{{ a|linebreaks }} {{ b|linebreaks }}{% endautoescape %}'}) def test_linebreaks02(self): output = self.engine.render_to_string('linebreaks02', {"a": "x&\ny", "b": mark_safe("x&\ny")}) self.assertEqual(output, "<p>x&<br />y</p> <p>x&<br />y</p>") class FunctionTests(SimpleTestCase): def test_line(self): self.assertEqual(linebreaks_filter('line 1'), '<p>line 1</p>') def test_newline(self): self.assertEqual(linebreaks_filter('line 1\nline 2'), '<p>line 1<br />line 2</p>') def test_carriage(self): self.assertEqual(linebreaks_filter('line 1\rline 2'), '<p>line 1<br />line 2</p>') def test_carriage_newline(self): self.assertEqual(linebreaks_filter('line 1\r\nline 2'), '<p>line 1<br />line 2</p>') def test_non_string_input(self): self.assertEqual(linebreaks_filter(123), '<p>123</p>') def test_autoescape(self): self.assertEqual( linebreaks_filter('foo\n<a>bar</a>\nbuz'), '<p>foo<br />&lt;a&gt;bar&lt;/a&gt;<br />buz</p>', ) def test_autoescape_off(self): self.assertEqual( linebreaks_filter('foo\n<a>bar</a>\nbuz', autoescape=False), '<p>foo<br /><a>bar</a><br />buz</p>', )
bsd-3-clause
kevthehermit/SpearPhisher
testdata.py
2
3851
# This file will generate random test data and write it to a database. import os import sys import string import django import random # Fake Factory from faker import Faker fake = Faker() # Connect to the Django Database sys.path.insert(1,'/home/spearphisher/spearphisher') script_path = os.path.dirname(__file__) sys.path.insert(1,'/home/spearphisher/spearphisher') os.environ['DJANGO_SETTINGS_MODULE']='spearphisher.settings' django.setup() # Load the Database Classes from panel.models import Campaign, Recipient, Template, Configuration, Logging # Edit these entries for each Run company_domain = 'companyname.com' # This forms the email address company_size = 452 os_list = ['Android', 'BlackBerry OS', 'IOS', 'Windows XP', 'Windows 7', 'Windows 8.1'] browser_list = ['Chrome 27.0.1453', 'Chrome 43.0.2357', 'Chrome 32.0.1489', 'IE 6.0.0', 'IE 7.0.0', 'IE 8.0.0', 'IE 9.0.0', 'IE 10.0.0', 'IE 11.0.0'] reader_list = ['null', '', '10.1.13', '11.0.8', '9.3.0', '9.2.3', '11.0.4', '10.1.2', '9.0.0'] flash_list = ['null', '', '18.0.0.161', '13.0.0.292', '15.0.0.189', '16.0.0.257'] java_list = ['null', '', '1.7.0.60', '1.7.0.13', '1.6.0.22', '1.6.0.37', '1.7.0.15', '1.8.0.22'] silver_list = ['null', '', '', '3.0', '3.5', '4.5.1', '', ''] shock_list = ['null', '', '', '12.0.0.112', '12.0.6.147', '8.0.205', '9.0.432', '10.1.1.016'] doc_list = ['Office 2003', 'Office 2007', 'Office 2010', 'Office 2013'] email_list = ['Outlook 2003', 'Outlook 2007', 'Outlook 2010', 'Outlook 2013'] # Create a Campaign campaign = Campaign() campaign.name = 'This is another test campaign' campaign.description = 'This is another test description' campaign.template_id = '2' campaign.created = fake.date_time_between(start_date="-3d", end_date="-2d") campaign.start_data = fake.date_time_between(start_date="-3d", end_date="-2d") campaign.save() # Create Recipients for i in range(company_size): full_name = fake.name() email_add = '{0}@{1}'.format(full_name.replace(' ', '.'), company_domain) uid = ''.join([random.choice(string.ascii_letters + string.digits) for n in xrange(8)]) # Create a recipient recipient = Recipient() recipient.campaign = campaign recipient.real_name = full_name recipient.email_address = email_add recipient.uid = uid # Choose to enter details or not portal_choice = random.choice([True, False]) document_choice = random.choice([True, False]) webbug_choice = random.choice([True, False]) # Fill as per choices if portal_choice: recipient.portal_open = fake.date_time_between(start_date="-2d", end_date="now") recipient.os_system = random.choice(os_list) if recipient.os_system == 'Android': recipient.web_client = 'Chrome Mobile' elif recipient.os_system == 'BlackBerry OS': recipient.web_client = 'BlackBerry WebKit' else: recipient.web_client = random.choice(browser_list) recipient.reader_version = random.choice(reader_list) recipient.flash_version = random.choice(flash_list) recipient.java_version = random.choice(java_list) recipient.silverlight_version = random.choice(silver_list) recipient.shockwave_version = random.choice(shock_list) recipient.ip_address = '144.76.87.236' if document_choice: recipient.document_open = fake.date_time_between(start_date="-2d", end_date="now") recipient.document_client = random.choice(doc_list) recipient.ip_address = '144.76.87.236' if webbug_choice: recipient.email_open = fake.date_time_between(start_date="-2d", end_date="now") recipient.email_client = random.choice(email_list) recipient.ip_address = '144.76.87.236' # Save the Recipient recipient.save()
gpl-3.0
yanheven/cinder
cinder/volume/drivers/block_device.py
3
8381
# Copyright (c) 2013 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from cinder import context from cinder.db.sqlalchemy import api from cinder import exception from cinder.i18n import _, _LI from cinder.image import image_utils from cinder.volume import driver from cinder.volume import utils as volutils LOG = logging.getLogger(__name__) volume_opts = [ cfg.ListOpt('available_devices', default=[], help='List of all available devices'), ] CONF = cfg.CONF CONF.register_opts(volume_opts) class BlockDeviceDriver(driver.VolumeDriver): VERSION = '2.0.0' def __init__(self, *args, **kwargs): super(BlockDeviceDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(volume_opts) self.backend_name = \ self.configuration.safe_get('volume_backend_name') or "BlockDev" target_driver =\ self.target_mapping[self.configuration.safe_get('iscsi_helper')] self.target_driver = importutils.import_object( target_driver, configuration=self.configuration, db=self.db, executor=self._execute) def check_for_setup_error(self): pass def create_volume(self, volume): device = self.find_appropriate_size_device(volume['size']) LOG.info("Create %s on %s" % (volume['name'], device)) return { 'provider_location': device, } def delete_volume(self, volume): """Deletes a logical volume.""" dev_path = self.local_path(volume) if not dev_path or dev_path not in \ self.configuration.available_devices: return if os.path.exists(dev_path) and \ self.configuration.volume_clear != 'none': volutils.clear_volume( self._get_device_size(dev_path), dev_path, volume_clear=self.configuration.volume_clear, volume_clear_size=self.configuration.volume_clear_size) def local_path(self, volume): if volume['provider_location']: path = volume['provider_location'].rsplit(" ", 1) return path[-1] else: return None def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" image_utils.fetch_to_raw(context, image_service, image_id, self.local_path(volume), self.configuration.volume_dd_blocksize, size=volume['size']) def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" image_utils.upload_volume(context, image_service, image_meta, self.local_path(volume)) def create_cloned_volume(self, volume, src_vref): LOG.info(_LI('Creating clone of volume: %s') % src_vref['id']) device = self.find_appropriate_size_device(src_vref['size']) volutils.copy_volume( self.local_path(src_vref), device, self._get_device_size(device) * 2048, self.configuration.volume_dd_blocksize, execute=self._execute) return { 'provider_location': device, } def get_volume_stats(self, refresh=False): if refresh: self._update_volume_stats() return self._stats def _update_volume_stats(self): """Retrieve stats info from volume group.""" dict_of_devices_sizes = self._devices_sizes() used_devices = self._get_used_devices() total_size = 0 free_size = 0 for device, size in dict_of_devices_sizes.iteritems(): if device not in used_devices: free_size += size total_size += size LOG.debug("Updating volume stats") backend_name = self.configuration.safe_get('volume_backend_name') data = {'total_capacity_gb': total_size / 1024, 'free_capacity_gb': free_size / 1024, 'reserved_percentage': self.configuration.reserved_percentage, 'QoS_support': False, 'volume_backend_name': backend_name or self.__class__.__name__, 'vendor_name': "Open Source", 'driver_version': self.VERSION, 'storage_protocol': 'unknown'} self._stats = data def _get_used_devices(self): lst = api.volume_get_all_by_host(context.get_admin_context(), self.host) used_devices = set() for volume in lst: local_path = self.local_path(volume) if local_path: used_devices.add(local_path) return used_devices def _get_device_size(self, dev_path): out, _err = self._execute('blockdev', '--getsz', dev_path, run_as_root=True) size_in_m = int(out) return size_in_m / 2048 def _devices_sizes(self): available_devices = self.configuration.available_devices dict_of_devices_sizes = {} for device in available_devices: dict_of_devices_sizes[device] = self._get_device_size(device) return dict_of_devices_sizes def find_appropriate_size_device(self, size): dict_of_devices_sizes = self._devices_sizes() free_devices = (set(self.configuration.available_devices) - self._get_used_devices()) if not free_devices: raise exception.CinderException(_("No free disk")) possible_device = None possible_device_size = None for device in free_devices: dev_size = dict_of_devices_sizes[device] if size * 1024 <= dev_size and (possible_device is None or dev_size < possible_device_size): possible_device = device possible_device_size = dev_size if possible_device: return possible_device else: raise exception.CinderException(_("No big enough free disk")) # ####### Interface methods for DataPath (Target Driver) ######## def ensure_export(self, context, volume): volume_path = "/dev/%s/%s" % (self.configuration.volume_group, volume['name']) model_update = \ self.target_driver.ensure_export( context, volume, volume_path) return model_update def create_export(self, context, volume): volume_path = "/dev/%s/%s" % (self.configuration.volume_group, volume['name']) export_info = self.target_driver.create_export(context, volume, volume_path) return {'provider_location': export_info['location'], 'provider_auth': export_info['auth'], } def remove_export(self, context, volume): self.target_driver.remove_export(context, volume) def initialize_connection(self, volume, connector): return self.target_driver.initialize_connection(volume, connector) def validate_connector(self, connector): return self.target_driver.validate_connector(connector) def terminate_connection(self, volume, connector, **kwargs): pass
apache-2.0
YosaiProject/yosai_account_alchemy
yosai_alchemystore/accountstore/accountstore.py
2
10153
""" Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import functools from sqlalchemy import case, cast, func, Text from sqlalchemy.sql import Alias, ColumnElement from sqlalchemy.ext.compiler import compiles from yosai_alchemystore import ( init_session ) from yosai_alchemystore.models.models import ( Credential, CredentialType, User, Domain, Action, Resource, Permission, Role, role_membership as role_membership_table, role_permission as role_permission_table, ) from yosai.core import ( account_abcs, ) # ------------------------------------------------------- # Following is a recipe used to address postgres-json related shortcomings # in sqlalchemy v1.1.4. This recipe will eventually be deprecated # ---------------------------------------------------------- class as_row(ColumnElement): def __init__(self, expr): assert isinstance(expr, Alias) self.expr = expr @compiles(as_row) def _gen_as_row(element, compiler, **kw): return compiler.visit_alias(element.expr, ashint=True, **kw) # ------------------------------------------------------- # ------------------------------------------------------- def session_context(fn): """ Handles session setup and teardown """ @functools.wraps(fn) def wrap(*args, **kwargs): session = args[0].Session() # obtain from self result = fn(*args, session=session, **kwargs) session.close() return result return wrap class AlchemyAccountStore(account_abcs.CredentialsAccountStore, account_abcs.AuthorizationAccountStore, account_abcs.LockingAccountStore): """ AccountStore provides the realm-facing API to the relational database that is managed through the SQLAlchemy ORM. step 1: generate an orm query step 2: execute the query step 3: return results """ def __init__(self, db_url=None, session=None, settings=None): """ :param db_url: engine configuration that is in the 'Database URL' format as supported by SQLAlchemy: http://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls :type db_url: string """ if session is None: self.Session = init_session(db_url=db_url, settings=settings) else: self.Session = session def _get_user_query(self, session, identifier): return session.query(User).filter(User.identifier == identifier) def _get_permissions_query(self, session, identifier): """ select domain, json_agg(parts) as permissions from (select domain, row_to_json(r) as parts from (select domain, action, array_agg(distinct target) as target from (select (case when domain is null then '*' else domain end) as domain, (case when target is null then '*' else target end) as target, array_agg(distinct (case when action is null then '*' else action end)) as action from permission group by domain, target ) x group by domain, action) r) parts group by domain; """ thedomain = case([(Domain.name == None, '*')], else_=Domain.name) theaction = case([(Action.name == None, '*')], else_=Action.name) theresource = case([(Resource.name == None, '*')], else_=Resource.name) action_agg = func.array_agg(theaction.distinct()) stmt1 = ( session.query(Permission.domain_id, thedomain.label('domain'), Permission.resource_id, theresource.label('resource'), action_agg.label('action')). select_from(User). join(role_membership_table, User.pk_id == role_membership_table.c.user_id). join(role_permission_table, role_membership_table.c.role_id == role_permission_table.c.role_id). join(Permission, role_permission_table.c.permission_id == Permission.pk_id). outerjoin(Domain, Permission.domain_id == Domain.pk_id). outerjoin(Action, Permission.action_id == Action.pk_id). outerjoin(Resource, Permission.resource_id == Resource.pk_id). filter(User.identifier == identifier). group_by(Permission.domain_id, Domain.name, Permission.resource_id, Resource.name)).subquery() stmt2 = (session.query(stmt1.c.domain, stmt1.c.action, func.array_agg(stmt1.c.resource.distinct()).label('resource')). select_from(stmt1). group_by(stmt1.c.domain, stmt1.c.action)).subquery() stmt3 = (session.query(stmt2.c.domain, func.row_to_json(as_row(stmt2)).label('parts')). select_from(stmt2)).subquery() final = (session.query(stmt3.c.domain, cast(func.json_agg(stmt3.c.parts), Text)). select_from(stmt3). group_by(stmt3.c.domain)) return final def _get_roles_query(self, session, identifier): """ :type identifier: string """ return (session.query(Role). join(role_membership_table, Role.pk_id == role_membership_table.c.role_id). join(User, role_membership_table.c.user_id == User.pk_id). filter(User.identifier == identifier)) def _get_credential_query(self, session, identifier): return (session.query(CredentialType.title, Credential.credential). join(Credential, CredentialType.pk_id == Credential.credential_type_id). join(User, Credential.user_id == User.pk_id). filter(User.identifier == identifier)) @session_context def get_authc_info(self, identifier, session=None): """ If an Account requires credentials from multiple data stores, this AccountStore is responsible for aggregating them (composite) and returning the results in a single account object. :returns: a dict of account attributes """ user = self._get_user_query(session, identifier).first() creds = self._get_credential_query(session, identifier).all() if not creds: return None authc_info = {cred_type: {'credential': cred_value, 'failed_attempts': []} for cred_type, cred_value in creds} if 'totp_key' in authc_info: authc_info['totp_key']['2fa_info'] = {'phone_number': user.phone_number} return dict(account_locked=user.account_lock_millis, authc_info=authc_info) @session_context def get_authz_permissions(self, identifier, session=None): try: return dict(self._get_permissions_query(session, identifier).all()) except (AttributeError, TypeError): return None @session_context def get_authz_roles(self, identifier, session=None): try: return [r.title for r in self._get_roles_query(session, identifier).all()] except (AttributeError, TypeError): return None @session_context def lock_account(self, identifier, locked_time, session=None): session.query(User).\ filter(User.identifier == identifier).\ update({User.account_lock_millis: locked_time}) session.commit() @session_context def unlock_account(self, identifier, session=None): session.query(User).\ filter(User.identifier == identifier).\ update({User.account_lock_millis: None}) session.commit() # @session_context # def get_account(self, identifier, session=None): # """ # get_account performs the most comprehensive collection of information # from the database, including credentials AND authorization information # # :param identifier: the request object's identifier # :returns: dict # # CAUTION # -------- # This method was initially created as part of shiro porting but is # not intended for v0.1.0 use due to lack of support for get_or_create_multi # dogpile locking. If you would like to use get_account, you *should* # implement an appropriate get_or_create_multi caching process (and submit # the changes as pull requests to yosai!). Without dogpile protection, # you run the risk of concurrently calling the most expensive creational # process # # """ # cred = self.get_credential_query(session, identifier).scalar() # credential = self.credential(cred) # # roles = {self.role(r.title) # for r in self.get_roles_query(session, identifier).all()} # # perms = self.get_permissions_query(session, identifier).all() # permissions = {self.permission(permission=p.perm) # for p in perms} # # authz_info = self.authz_info(roles=roles, # permissions=permissions) # # account = dict(account_id=identifier, # credentials=credential, # authz_info=authz_info) # # return account
apache-2.0
matthewoliver/swift
test/unit/common/test_utils.py
2
275026
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for swift.common.utils""" from __future__ import print_function from test.unit import temptree, debug_logger, make_timestamp_iter, with_tempdir import ctypes import contextlib import errno import eventlet import eventlet.debug import eventlet.event import eventlet.patcher import functools import grp import logging import platform import os import mock import pwd import random import re import socket import string import sys import json import math import inspect import six from six import BytesIO, StringIO from six.moves.queue import Queue, Empty from six.moves import http_client from six.moves import range from textwrap import dedent import tempfile import time import unittest import fcntl import shutil from getpass import getuser from shutil import rmtree from functools import partial from tempfile import TemporaryFile, NamedTemporaryFile, mkdtemp from netifaces import AF_INET6 from mock import MagicMock, patch from six.moves.configparser import NoSectionError, NoOptionError from uuid import uuid4 from swift.common.exceptions import Timeout, MessageTimeout, \ ConnectionTimeout, LockTimeout, ReplicationLockTimeout, \ MimeInvalid from swift.common import utils from swift.common.utils import is_valid_ip, is_valid_ipv4, is_valid_ipv6, \ set_swift_dir from swift.common.container_sync_realms import ContainerSyncRealms from swift.common.header_key_dict import HeaderKeyDict from swift.common.storage_policy import POLICIES, reload_storage_policies from swift.common.swob import Request, Response from test.unit import FakeLogger, requires_o_tmpfile_support, \ requires_o_tmpfile_support_in_tmp, quiet_eventlet_exceptions threading = eventlet.patcher.original('threading') class MockOs(object): def __init__(self, pass_funcs=None, called_funcs=None, raise_funcs=None): if pass_funcs is None: pass_funcs = [] if called_funcs is None: called_funcs = [] if raise_funcs is None: raise_funcs = [] self.closed_fds = [] for func in pass_funcs: setattr(self, func, self.pass_func) self.called_funcs = {} for func in called_funcs: c_func = partial(self.called_func, func) setattr(self, func, c_func) for func in raise_funcs: r_func = partial(self.raise_func, func) setattr(self, func, r_func) def pass_func(self, *args, **kwargs): pass setgroups = chdir = setsid = setgid = setuid = umask = pass_func def called_func(self, name, *args, **kwargs): self.called_funcs[name] = args def raise_func(self, name, *args, **kwargs): self.called_funcs[name] = args raise OSError() def dup2(self, source, target): self.closed_fds.append(target) def geteuid(self): '''Pretend we are running as root.''' return 0 def __getattr__(self, name): # I only over-ride portions of the os module try: return object.__getattr__(self, name) except AttributeError: return getattr(os, name) class MockUdpSocket(object): def __init__(self, sendto_errno=None): self.sent = [] self.sendto_errno = sendto_errno def sendto(self, data, target): if self.sendto_errno: raise socket.error(self.sendto_errno, 'test errno %s' % self.sendto_errno) self.sent.append((data, target)) def close(self): pass class MockSys(object): def __init__(self): self.stdin = TemporaryFile('w') self.stdout = TemporaryFile('r') self.stderr = TemporaryFile('r') self.__stderr__ = self.stderr self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(), self.stderr.fileno()] def reset_loggers(): if hasattr(utils.get_logger, 'handler4logger'): for logger, handler in utils.get_logger.handler4logger.items(): logger.removeHandler(handler) delattr(utils.get_logger, 'handler4logger') if hasattr(utils.get_logger, 'console_handler4logger'): for logger, h in utils.get_logger.console_handler4logger.items(): logger.removeHandler(h) delattr(utils.get_logger, 'console_handler4logger') # Reset the LogAdapter class thread local state. Use get_logger() here # to fetch a LogAdapter instance because the items from # get_logger.handler4logger above are the underlying logger instances, # not the LogAdapter. utils.get_logger(None).thread_locals = (None, None) def reset_logger_state(f): @functools.wraps(f) def wrapper(self, *args, **kwargs): reset_loggers() try: return f(self, *args, **kwargs) finally: reset_loggers() return wrapper class TestTimestamp(unittest.TestCase): """Tests for swift.common.utils.Timestamp""" def test_invalid_input(self): self.assertRaises(ValueError, utils.Timestamp, time.time(), offset=-1) self.assertRaises(ValueError, utils.Timestamp, '123.456_78_90') def test_invalid_string_conversion(self): t = utils.Timestamp.now() self.assertRaises(TypeError, str, t) def test_offset_limit(self): t = 1417462430.78693 # can't have a offset above MAX_OFFSET self.assertRaises(ValueError, utils.Timestamp, t, offset=utils.MAX_OFFSET + 1) # exactly max offset is fine ts = utils.Timestamp(t, offset=utils.MAX_OFFSET) self.assertEqual(ts.internal, '1417462430.78693_ffffffffffffffff') # but you can't offset it further self.assertRaises(ValueError, utils.Timestamp, ts.internal, offset=1) # unless you start below it ts = utils.Timestamp(t, offset=utils.MAX_OFFSET - 1) self.assertEqual(utils.Timestamp(ts.internal, offset=1), '1417462430.78693_ffffffffffffffff') def test_normal_format_no_offset(self): expected = '1402436408.91203' test_values = ( '1402436408.91203', '1402436408.91203_00000000', '1402436408.912030000', '1402436408.912030000_0000000000000', '000001402436408.912030000', '000001402436408.912030000_0000000000', 1402436408.91203, 1402436408.912029, 1402436408.9120300000000000, 1402436408.91202999999999999, utils.Timestamp(1402436408.91203), utils.Timestamp(1402436408.91203, offset=0), utils.Timestamp(1402436408.912029), utils.Timestamp(1402436408.912029, offset=0), utils.Timestamp('1402436408.91203'), utils.Timestamp('1402436408.91203', offset=0), utils.Timestamp('1402436408.91203_00000000'), utils.Timestamp('1402436408.91203_00000000', offset=0), ) for value in test_values: timestamp = utils.Timestamp(value) self.assertEqual(timestamp.normal, expected) # timestamp instance can also compare to string or float self.assertEqual(timestamp, expected) self.assertEqual(timestamp, float(expected)) self.assertEqual(timestamp, utils.normalize_timestamp(expected)) def test_isoformat(self): expected = '2014-06-10T22:47:32.054580' test_values = ( '1402440452.05458', '1402440452.054579', '1402440452.05458_00000000', '1402440452.054579_00000000', '1402440452.054580000', '1402440452.054579999', '1402440452.054580000_0000000000000', '1402440452.054579999_0000ff00', '000001402440452.054580000', '000001402440452.0545799', '000001402440452.054580000_0000000000', '000001402440452.054579999999_00000fffff', 1402440452.05458, 1402440452.054579, 1402440452.0545800000000000, 1402440452.054579999, utils.Timestamp(1402440452.05458), utils.Timestamp(1402440452.0545799), utils.Timestamp(1402440452.05458, offset=0), utils.Timestamp(1402440452.05457999999, offset=0), utils.Timestamp(1402440452.05458, offset=100), utils.Timestamp(1402440452.054579, offset=100), utils.Timestamp('1402440452.05458'), utils.Timestamp('1402440452.054579999'), utils.Timestamp('1402440452.05458', offset=0), utils.Timestamp('1402440452.054579', offset=0), utils.Timestamp('1402440452.05458', offset=300), utils.Timestamp('1402440452.05457999', offset=300), utils.Timestamp('1402440452.05458_00000000'), utils.Timestamp('1402440452.05457999_00000000'), utils.Timestamp('1402440452.05458_00000000', offset=0), utils.Timestamp('1402440452.05457999_00000aaa', offset=0), utils.Timestamp('1402440452.05458_00000000', offset=400), utils.Timestamp('1402440452.054579_0a', offset=400), ) for value in test_values: self.assertEqual(utils.Timestamp(value).isoformat, expected) expected = '1970-01-01T00:00:00.000000' test_values = ( '0', '0000000000.00000', '0000000000.00000_ffffffffffff', 0, 0.0, ) for value in test_values: self.assertEqual(utils.Timestamp(value).isoformat, expected) def test_not_equal(self): ts = '1402436408.91203_0000000000000001' test_values = ( utils.Timestamp('1402436408.91203_0000000000000002'), utils.Timestamp('1402436408.91203'), utils.Timestamp(1402436408.91203), utils.Timestamp(1402436408.91204), utils.Timestamp(1402436408.91203, offset=0), utils.Timestamp(1402436408.91203, offset=2), ) for value in test_values: self.assertTrue(value != ts) self.assertIs(True, utils.Timestamp(ts) == ts) # sanity self.assertIs(False, utils.Timestamp(ts) != utils.Timestamp(ts)) self.assertIs(False, utils.Timestamp(ts) != ts) self.assertIs(False, utils.Timestamp(ts) is None) self.assertIs(True, utils.Timestamp(ts) is not None) def test_no_force_internal_no_offset(self): """Test that internal is the same as normal with no offset""" with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False): self.assertEqual(utils.Timestamp(0).internal, '0000000000.00000') self.assertEqual(utils.Timestamp(1402437380.58186).internal, '1402437380.58186') self.assertEqual(utils.Timestamp(1402437380.581859).internal, '1402437380.58186') self.assertEqual(utils.Timestamp(0).internal, utils.normalize_timestamp(0)) def test_no_force_internal_with_offset(self): """Test that internal always includes the offset if significant""" with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False): self.assertEqual(utils.Timestamp(0, offset=1).internal, '0000000000.00000_0000000000000001') self.assertEqual( utils.Timestamp(1402437380.58186, offset=16).internal, '1402437380.58186_0000000000000010') self.assertEqual( utils.Timestamp(1402437380.581859, offset=240).internal, '1402437380.58186_00000000000000f0') self.assertEqual( utils.Timestamp('1402437380.581859_00000001', offset=240).internal, '1402437380.58186_00000000000000f1') def test_force_internal(self): """Test that internal always includes the offset if forced""" with mock.patch('swift.common.utils.FORCE_INTERNAL', new=True): self.assertEqual(utils.Timestamp(0).internal, '0000000000.00000_0000000000000000') self.assertEqual(utils.Timestamp(1402437380.58186).internal, '1402437380.58186_0000000000000000') self.assertEqual(utils.Timestamp(1402437380.581859).internal, '1402437380.58186_0000000000000000') self.assertEqual(utils.Timestamp(0, offset=1).internal, '0000000000.00000_0000000000000001') self.assertEqual( utils.Timestamp(1402437380.58186, offset=16).internal, '1402437380.58186_0000000000000010') self.assertEqual( utils.Timestamp(1402437380.581859, offset=16).internal, '1402437380.58186_0000000000000010') def test_internal_format_no_offset(self): expected = '1402436408.91203_0000000000000000' test_values = ( '1402436408.91203', '1402436408.91203_00000000', '1402436408.912030000', '1402436408.912030000_0000000000000', '000001402436408.912030000', '000001402436408.912030000_0000000000', 1402436408.91203, 1402436408.9120300000000000, 1402436408.912029, 1402436408.912029999999999999, utils.Timestamp(1402436408.91203), utils.Timestamp(1402436408.91203, offset=0), utils.Timestamp(1402436408.912029), utils.Timestamp(1402436408.91202999999999999, offset=0), utils.Timestamp('1402436408.91203'), utils.Timestamp('1402436408.91203', offset=0), utils.Timestamp('1402436408.912029'), utils.Timestamp('1402436408.912029', offset=0), utils.Timestamp('1402436408.912029999999999'), utils.Timestamp('1402436408.912029999999999', offset=0), ) for value in test_values: # timestamp instance is always equivalent self.assertEqual(utils.Timestamp(value), expected) if utils.FORCE_INTERNAL: # the FORCE_INTERNAL flag makes the internal format always # include the offset portion of the timestamp even when it's # not significant and would be bad during upgrades self.assertEqual(utils.Timestamp(value).internal, expected) else: # unless we FORCE_INTERNAL, when there's no offset the # internal format is equivalent to the normalized format self.assertEqual(utils.Timestamp(value).internal, '1402436408.91203') def test_internal_format_with_offset(self): expected = '1402436408.91203_00000000000000f0' test_values = ( '1402436408.91203_000000f0', u'1402436408.91203_000000f0', b'1402436408.91203_000000f0', '1402436408.912030000_0000000000f0', '1402436408.912029_000000f0', '1402436408.91202999999_0000000000f0', '000001402436408.912030000_000000000f0', '000001402436408.9120299999_000000000f0', utils.Timestamp(1402436408.91203, offset=240), utils.Timestamp(1402436408.912029, offset=240), utils.Timestamp('1402436408.91203', offset=240), utils.Timestamp('1402436408.91203_00000000', offset=240), utils.Timestamp('1402436408.91203_0000000f', offset=225), utils.Timestamp('1402436408.9120299999', offset=240), utils.Timestamp('1402436408.9120299999_00000000', offset=240), utils.Timestamp('1402436408.9120299999_00000010', offset=224), ) for value in test_values: timestamp = utils.Timestamp(value) self.assertEqual(timestamp.internal, expected) # can compare with offset if the string is internalized self.assertEqual(timestamp, expected) # if comparison value only includes the normalized portion and the # timestamp includes an offset, it is considered greater normal = utils.Timestamp(expected).normal self.assertTrue(timestamp > normal, '%r is not bigger than %r given %r' % ( timestamp, normal, value)) self.assertTrue(timestamp > float(normal), '%r is not bigger than %f given %r' % ( timestamp, float(normal), value)) def test_short_format_with_offset(self): expected = '1402436408.91203_f0' timestamp = utils.Timestamp(1402436408.91203, 0xf0) self.assertEqual(expected, timestamp.short) expected = '1402436408.91203' timestamp = utils.Timestamp(1402436408.91203) self.assertEqual(expected, timestamp.short) def test_raw(self): expected = 140243640891203 timestamp = utils.Timestamp(1402436408.91203) self.assertEqual(expected, timestamp.raw) # 'raw' does not include offset timestamp = utils.Timestamp(1402436408.91203, 0xf0) self.assertEqual(expected, timestamp.raw) def test_delta(self): def _assertWithinBounds(expected, timestamp): tolerance = 0.00001 minimum = expected - tolerance maximum = expected + tolerance self.assertTrue(float(timestamp) > minimum) self.assertTrue(float(timestamp) < maximum) timestamp = utils.Timestamp(1402436408.91203, delta=100) _assertWithinBounds(1402436408.91303, timestamp) self.assertEqual(140243640891303, timestamp.raw) timestamp = utils.Timestamp(1402436408.91203, delta=-100) _assertWithinBounds(1402436408.91103, timestamp) self.assertEqual(140243640891103, timestamp.raw) timestamp = utils.Timestamp(1402436408.91203, delta=0) _assertWithinBounds(1402436408.91203, timestamp) self.assertEqual(140243640891203, timestamp.raw) # delta is independent of offset timestamp = utils.Timestamp(1402436408.91203, offset=42, delta=100) self.assertEqual(140243640891303, timestamp.raw) self.assertEqual(42, timestamp.offset) # cannot go negative self.assertRaises(ValueError, utils.Timestamp, 1402436408.91203, delta=-140243640891203) def test_int(self): expected = 1402437965 test_values = ( '1402437965.91203', '1402437965.91203_00000000', '1402437965.912030000', '1402437965.912030000_0000000000000', '000001402437965.912030000', '000001402437965.912030000_0000000000', 1402437965.91203, 1402437965.9120300000000000, 1402437965.912029, 1402437965.912029999999999999, utils.Timestamp(1402437965.91203), utils.Timestamp(1402437965.91203, offset=0), utils.Timestamp(1402437965.91203, offset=500), utils.Timestamp(1402437965.912029), utils.Timestamp(1402437965.91202999999999999, offset=0), utils.Timestamp(1402437965.91202999999999999, offset=300), utils.Timestamp('1402437965.91203'), utils.Timestamp('1402437965.91203', offset=0), utils.Timestamp('1402437965.91203', offset=400), utils.Timestamp('1402437965.912029'), utils.Timestamp('1402437965.912029', offset=0), utils.Timestamp('1402437965.912029', offset=200), utils.Timestamp('1402437965.912029999999999'), utils.Timestamp('1402437965.912029999999999', offset=0), utils.Timestamp('1402437965.912029999999999', offset=100), ) for value in test_values: timestamp = utils.Timestamp(value) self.assertEqual(int(timestamp), expected) self.assertTrue(timestamp > expected) def test_float(self): expected = 1402438115.91203 test_values = ( '1402438115.91203', '1402438115.91203_00000000', '1402438115.912030000', '1402438115.912030000_0000000000000', '000001402438115.912030000', '000001402438115.912030000_0000000000', 1402438115.91203, 1402438115.9120300000000000, 1402438115.912029, 1402438115.912029999999999999, utils.Timestamp(1402438115.91203), utils.Timestamp(1402438115.91203, offset=0), utils.Timestamp(1402438115.91203, offset=500), utils.Timestamp(1402438115.912029), utils.Timestamp(1402438115.91202999999999999, offset=0), utils.Timestamp(1402438115.91202999999999999, offset=300), utils.Timestamp('1402438115.91203'), utils.Timestamp('1402438115.91203', offset=0), utils.Timestamp('1402438115.91203', offset=400), utils.Timestamp('1402438115.912029'), utils.Timestamp('1402438115.912029', offset=0), utils.Timestamp('1402438115.912029', offset=200), utils.Timestamp('1402438115.912029999999999'), utils.Timestamp('1402438115.912029999999999', offset=0), utils.Timestamp('1402438115.912029999999999', offset=100), ) tolerance = 0.00001 minimum = expected - tolerance maximum = expected + tolerance for value in test_values: timestamp = utils.Timestamp(value) self.assertTrue(float(timestamp) > minimum, '%f is not bigger than %f given %r' % ( timestamp, minimum, value)) self.assertTrue(float(timestamp) < maximum, '%f is not smaller than %f given %r' % ( timestamp, maximum, value)) # direct comparison of timestamp works too self.assertTrue(timestamp > minimum, '%s is not bigger than %f given %r' % ( timestamp.normal, minimum, value)) self.assertTrue(timestamp < maximum, '%s is not smaller than %f given %r' % ( timestamp.normal, maximum, value)) # ... even against strings self.assertTrue(timestamp > '%f' % minimum, '%s is not bigger than %s given %r' % ( timestamp.normal, minimum, value)) self.assertTrue(timestamp < '%f' % maximum, '%s is not smaller than %s given %r' % ( timestamp.normal, maximum, value)) def test_false(self): self.assertFalse(utils.Timestamp(0)) self.assertFalse(utils.Timestamp(0, offset=0)) self.assertFalse(utils.Timestamp('0')) self.assertFalse(utils.Timestamp('0', offset=0)) self.assertFalse(utils.Timestamp(0.0)) self.assertFalse(utils.Timestamp(0.0, offset=0)) self.assertFalse(utils.Timestamp('0.0')) self.assertFalse(utils.Timestamp('0.0', offset=0)) self.assertFalse(utils.Timestamp(00000000.00000000)) self.assertFalse(utils.Timestamp(00000000.00000000, offset=0)) self.assertFalse(utils.Timestamp('00000000.00000000')) self.assertFalse(utils.Timestamp('00000000.00000000', offset=0)) def test_true(self): self.assertTrue(utils.Timestamp(1)) self.assertTrue(utils.Timestamp(1, offset=1)) self.assertTrue(utils.Timestamp(0, offset=1)) self.assertTrue(utils.Timestamp('1')) self.assertTrue(utils.Timestamp('1', offset=1)) self.assertTrue(utils.Timestamp('0', offset=1)) self.assertTrue(utils.Timestamp(1.1)) self.assertTrue(utils.Timestamp(1.1, offset=1)) self.assertTrue(utils.Timestamp(0.0, offset=1)) self.assertTrue(utils.Timestamp('1.1')) self.assertTrue(utils.Timestamp('1.1', offset=1)) self.assertTrue(utils.Timestamp('0.0', offset=1)) self.assertTrue(utils.Timestamp(11111111.11111111)) self.assertTrue(utils.Timestamp(11111111.11111111, offset=1)) self.assertTrue(utils.Timestamp(00000000.00000000, offset=1)) self.assertTrue(utils.Timestamp('11111111.11111111')) self.assertTrue(utils.Timestamp('11111111.11111111', offset=1)) self.assertTrue(utils.Timestamp('00000000.00000000', offset=1)) def test_greater_no_offset(self): now = time.time() older = now - 1 timestamp = utils.Timestamp(now) test_values = ( 0, '0', 0.0, '0.0', '0000.0000', '000.000_000', 1, '1', 1.1, '1.1', '1111.1111', '111.111_111', 1402443112.213252, '1402443112.213252', '1402443112.213252_ffff', older, '%f' % older, '%f_0000ffff' % older, ) for value in test_values: other = utils.Timestamp(value) self.assertNotEqual(timestamp, other) # sanity self.assertTrue(timestamp > value, '%r is not greater than %r given %r' % ( timestamp, value, value)) self.assertTrue(timestamp > other, '%r is not greater than %r given %r' % ( timestamp, other, value)) self.assertTrue(timestamp > other.normal, '%r is not greater than %r given %r' % ( timestamp, other.normal, value)) self.assertTrue(timestamp > other.internal, '%r is not greater than %r given %r' % ( timestamp, other.internal, value)) self.assertTrue(timestamp > float(other), '%r is not greater than %r given %r' % ( timestamp, float(other), value)) self.assertTrue(timestamp > int(other), '%r is not greater than %r given %r' % ( timestamp, int(other), value)) def _test_greater_with_offset(self, now, test_values): for offset in range(1, 1000, 100): timestamp = utils.Timestamp(now, offset=offset) for value in test_values: other = utils.Timestamp(value) self.assertNotEqual(timestamp, other) # sanity self.assertTrue(timestamp > value, '%r is not greater than %r given %r' % ( timestamp, value, value)) self.assertTrue(timestamp > other, '%r is not greater than %r given %r' % ( timestamp, other, value)) self.assertTrue(timestamp > other.normal, '%r is not greater than %r given %r' % ( timestamp, other.normal, value)) self.assertTrue(timestamp > other.internal, '%r is not greater than %r given %r' % ( timestamp, other.internal, value)) self.assertTrue(timestamp > float(other), '%r is not greater than %r given %r' % ( timestamp, float(other), value)) self.assertTrue(timestamp > int(other), '%r is not greater than %r given %r' % ( timestamp, int(other), value)) def test_greater_with_offset(self): # Part 1: use the natural time of the Python. This is deliciously # unpredictable, but completely legitimate and realistic. Finds bugs! now = time.time() older = now - 1 test_values = ( 0, '0', 0.0, '0.0', '0000.0000', '000.000_000', 1, '1', 1.1, '1.1', '1111.1111', '111.111_111', 1402443346.935174, '1402443346.93517', '1402443346.935169_ffff', older, now, ) self._test_greater_with_offset(now, test_values) # Part 2: Same as above, but with fixed time values that reproduce # specific corner cases. now = 1519830570.6949348 older = now - 1 test_values = ( 0, '0', 0.0, '0.0', '0000.0000', '000.000_000', 1, '1', 1.1, '1.1', '1111.1111', '111.111_111', 1402443346.935174, '1402443346.93517', '1402443346.935169_ffff', older, now, ) self._test_greater_with_offset(now, test_values) # Part 3: The '%f' problem. Timestamps cannot be converted to %f # strings, then back to timestamps, then compared with originals. # You can only "import" a floating point representation once. now = 1519830570.6949348 now = float('%f' % now) older = now - 1 test_values = ( 0, '0', 0.0, '0.0', '0000.0000', '000.000_000', 1, '1', 1.1, '1.1', '1111.1111', '111.111_111', older, '%f' % older, '%f_0000ffff' % older, now, '%f' % now, '%s_00000000' % now, ) self._test_greater_with_offset(now, test_values) def test_smaller_no_offset(self): now = time.time() newer = now + 1 timestamp = utils.Timestamp(now) test_values = ( 9999999999.99999, '9999999999.99999', '9999999999.99999_ffff', newer, '%f' % newer, '%f_0000ffff' % newer, ) for value in test_values: other = utils.Timestamp(value) self.assertNotEqual(timestamp, other) # sanity self.assertTrue(timestamp < value, '%r is not smaller than %r given %r' % ( timestamp, value, value)) self.assertTrue(timestamp < other, '%r is not smaller than %r given %r' % ( timestamp, other, value)) self.assertTrue(timestamp < other.normal, '%r is not smaller than %r given %r' % ( timestamp, other.normal, value)) self.assertTrue(timestamp < other.internal, '%r is not smaller than %r given %r' % ( timestamp, other.internal, value)) self.assertTrue(timestamp < float(other), '%r is not smaller than %r given %r' % ( timestamp, float(other), value)) self.assertTrue(timestamp < int(other), '%r is not smaller than %r given %r' % ( timestamp, int(other), value)) def test_smaller_with_offset(self): now = time.time() newer = now + 1 test_values = ( 9999999999.99999, '9999999999.99999', '9999999999.99999_ffff', newer, '%f' % newer, '%f_0000ffff' % newer, ) for offset in range(1, 1000, 100): timestamp = utils.Timestamp(now, offset=offset) for value in test_values: other = utils.Timestamp(value) self.assertNotEqual(timestamp, other) # sanity self.assertTrue(timestamp < value, '%r is not smaller than %r given %r' % ( timestamp, value, value)) self.assertTrue(timestamp < other, '%r is not smaller than %r given %r' % ( timestamp, other, value)) self.assertTrue(timestamp < other.normal, '%r is not smaller than %r given %r' % ( timestamp, other.normal, value)) self.assertTrue(timestamp < other.internal, '%r is not smaller than %r given %r' % ( timestamp, other.internal, value)) self.assertTrue(timestamp < float(other), '%r is not smaller than %r given %r' % ( timestamp, float(other), value)) self.assertTrue(timestamp < int(other), '%r is not smaller than %r given %r' % ( timestamp, int(other), value)) def test_cmp_with_none(self): self.assertGreater(utils.Timestamp(0), None) self.assertGreater(utils.Timestamp(1.0), None) self.assertGreater(utils.Timestamp(1.0, 42), None) def test_ordering(self): given = [ '1402444820.62590_000000000000000a', '1402444820.62589_0000000000000001', '1402444821.52589_0000000000000004', '1402444920.62589_0000000000000004', '1402444821.62589_000000000000000a', '1402444821.72589_000000000000000a', '1402444920.62589_0000000000000002', '1402444820.62589_0000000000000002', '1402444820.62589_000000000000000a', '1402444820.62590_0000000000000004', '1402444920.62589_000000000000000a', '1402444820.62590_0000000000000002', '1402444821.52589_0000000000000002', '1402444821.52589_0000000000000000', '1402444920.62589', '1402444821.62589_0000000000000004', '1402444821.72589_0000000000000001', '1402444820.62590', '1402444820.62590_0000000000000001', '1402444820.62589_0000000000000004', '1402444821.72589_0000000000000000', '1402444821.52589_000000000000000a', '1402444821.72589_0000000000000004', '1402444821.62589', '1402444821.52589_0000000000000001', '1402444821.62589_0000000000000001', '1402444821.62589_0000000000000002', '1402444821.72589_0000000000000002', '1402444820.62589', '1402444920.62589_0000000000000001'] expected = [ '1402444820.62589', '1402444820.62589_0000000000000001', '1402444820.62589_0000000000000002', '1402444820.62589_0000000000000004', '1402444820.62589_000000000000000a', '1402444820.62590', '1402444820.62590_0000000000000001', '1402444820.62590_0000000000000002', '1402444820.62590_0000000000000004', '1402444820.62590_000000000000000a', '1402444821.52589', '1402444821.52589_0000000000000001', '1402444821.52589_0000000000000002', '1402444821.52589_0000000000000004', '1402444821.52589_000000000000000a', '1402444821.62589', '1402444821.62589_0000000000000001', '1402444821.62589_0000000000000002', '1402444821.62589_0000000000000004', '1402444821.62589_000000000000000a', '1402444821.72589', '1402444821.72589_0000000000000001', '1402444821.72589_0000000000000002', '1402444821.72589_0000000000000004', '1402444821.72589_000000000000000a', '1402444920.62589', '1402444920.62589_0000000000000001', '1402444920.62589_0000000000000002', '1402444920.62589_0000000000000004', '1402444920.62589_000000000000000a', ] # less visual version """ now = time.time() given = [ utils.Timestamp(now + i, offset=offset).internal for i in (0, 0.00001, 0.9, 1.0, 1.1, 100.0) for offset in (0, 1, 2, 4, 10) ] expected = [t for t in given] random.shuffle(given) """ self.assertEqual(len(given), len(expected)) # sanity timestamps = [utils.Timestamp(t) for t in given] # our expected values don't include insignificant offsets with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False): self.assertEqual( [t.internal for t in sorted(timestamps)], expected) # string sorting works as well self.assertEqual( sorted([t.internal for t in timestamps]), expected) def test_hashable(self): ts_0 = utils.Timestamp('1402444821.72589') ts_0_also = utils.Timestamp('1402444821.72589') self.assertEqual(ts_0, ts_0_also) # sanity self.assertEqual(hash(ts_0), hash(ts_0_also)) d = {ts_0: 'whatever'} self.assertIn(ts_0, d) # sanity self.assertIn(ts_0_also, d) class TestTimestampEncoding(unittest.TestCase): def setUp(self): t0 = utils.Timestamp(0.0) t1 = utils.Timestamp(997.9996) t2 = utils.Timestamp(999) t3 = utils.Timestamp(1000, 24) t4 = utils.Timestamp(1001) t5 = utils.Timestamp(1002.00040) # encodings that are expected when explicit = False self.non_explicit_encodings = ( ('0000001000.00000_18', (t3, t3, t3)), ('0000001000.00000_18', (t3, t3, None)), ) # mappings that are expected when explicit = True self.explicit_encodings = ( ('0000001000.00000_18+0+0', (t3, t3, t3)), ('0000001000.00000_18+0', (t3, t3, None)), ) # mappings that are expected when explicit = True or False self.encodings = ( ('0000001000.00000_18+0+186a0', (t3, t3, t4)), ('0000001000.00000_18+186a0+186c8', (t3, t4, t5)), ('0000001000.00000_18-186a0+0', (t3, t2, t2)), ('0000001000.00000_18+0-186a0', (t3, t3, t2)), ('0000001000.00000_18-186a0-186c8', (t3, t2, t1)), ('0000001000.00000_18', (t3, None, None)), ('0000001000.00000_18+186a0', (t3, t4, None)), ('0000001000.00000_18-186a0', (t3, t2, None)), ('0000001000.00000_18', (t3, None, t1)), ('0000001000.00000_18-5f5e100', (t3, t0, None)), ('0000001000.00000_18+0-5f5e100', (t3, t3, t0)), ('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)), ) # decodings that are expected when explicit = False self.non_explicit_decodings = ( ('0000001000.00000_18', (t3, t3, t3)), ('0000001000.00000_18+186a0', (t3, t4, t4)), ('0000001000.00000_18-186a0', (t3, t2, t2)), ('0000001000.00000_18+186a0', (t3, t4, t4)), ('0000001000.00000_18-186a0', (t3, t2, t2)), ('0000001000.00000_18-5f5e100', (t3, t0, t0)), ) # decodings that are expected when explicit = True self.explicit_decodings = ( ('0000001000.00000_18+0+0', (t3, t3, t3)), ('0000001000.00000_18+0', (t3, t3, None)), ('0000001000.00000_18', (t3, None, None)), ('0000001000.00000_18+186a0', (t3, t4, None)), ('0000001000.00000_18-186a0', (t3, t2, None)), ('0000001000.00000_18-5f5e100', (t3, t0, None)), ) # decodings that are expected when explicit = True or False self.decodings = ( ('0000001000.00000_18+0+186a0', (t3, t3, t4)), ('0000001000.00000_18+186a0+186c8', (t3, t4, t5)), ('0000001000.00000_18-186a0+0', (t3, t2, t2)), ('0000001000.00000_18+0-186a0', (t3, t3, t2)), ('0000001000.00000_18-186a0-186c8', (t3, t2, t1)), ('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)), ) def _assertEqual(self, expected, actual, test): self.assertEqual(expected, actual, 'Got %s but expected %s for parameters %s' % (actual, expected, test)) def test_encoding(self): for test in self.explicit_encodings: actual = utils.encode_timestamps(test[1][0], test[1][1], test[1][2], True) self._assertEqual(test[0], actual, test[1]) for test in self.non_explicit_encodings: actual = utils.encode_timestamps(test[1][0], test[1][1], test[1][2], False) self._assertEqual(test[0], actual, test[1]) for explicit in (True, False): for test in self.encodings: actual = utils.encode_timestamps(test[1][0], test[1][1], test[1][2], explicit) self._assertEqual(test[0], actual, test[1]) def test_decoding(self): for test in self.explicit_decodings: actual = utils.decode_timestamps(test[0], True) self._assertEqual(test[1], actual, test[0]) for test in self.non_explicit_decodings: actual = utils.decode_timestamps(test[0], False) self._assertEqual(test[1], actual, test[0]) for explicit in (True, False): for test in self.decodings: actual = utils.decode_timestamps(test[0], explicit) self._assertEqual(test[1], actual, test[0]) class TestUtils(unittest.TestCase): """Tests for swift.common.utils """ def setUp(self): utils.HASH_PATH_SUFFIX = b'endcap' utils.HASH_PATH_PREFIX = b'startcap' def test_get_zero_indexed_base_string(self): self.assertEqual(utils.get_zero_indexed_base_string('something', 0), 'something') self.assertEqual(utils.get_zero_indexed_base_string('something', None), 'something') self.assertEqual(utils.get_zero_indexed_base_string('something', 1), 'something-1') self.assertRaises(ValueError, utils.get_zero_indexed_base_string, 'something', 'not_integer') @with_tempdir def test_lock_path(self, tmpdir): # 2 locks with limit=1 must fail success = False with utils.lock_path(tmpdir, 0.1): with self.assertRaises(LockTimeout): with utils.lock_path(tmpdir, 0.1): success = True self.assertFalse(success) # 2 locks with limit=2 must succeed success = False with utils.lock_path(tmpdir, 0.1, limit=2): try: with utils.lock_path(tmpdir, 0.1, limit=2): success = True except LockTimeout as exc: self.fail('Unexpected exception %s' % exc) self.assertTrue(success) # 3 locks with limit=2 must fail success = False with utils.lock_path(tmpdir, 0.1, limit=2): with utils.lock_path(tmpdir, 0.1, limit=2): with self.assertRaises(LockTimeout): with utils.lock_path(tmpdir, 0.1): success = True self.assertFalse(success) @with_tempdir def test_lock_path_invalid_limit(self, tmpdir): success = False with self.assertRaises(ValueError): with utils.lock_path(tmpdir, 0.1, limit=0): success = True self.assertFalse(success) with self.assertRaises(ValueError): with utils.lock_path(tmpdir, 0.1, limit=-1): success = True self.assertFalse(success) with self.assertRaises(TypeError): with utils.lock_path(tmpdir, 0.1, limit='1'): success = True self.assertFalse(success) with self.assertRaises(TypeError): with utils.lock_path(tmpdir, 0.1, limit=1.1): success = True self.assertFalse(success) @with_tempdir def test_lock_path_num_sleeps(self, tmpdir): num_short_calls = [0] exception_raised = [False] def my_sleep(to_sleep): if to_sleep == 0.01: num_short_calls[0] += 1 else: raise Exception('sleep time changed: %s' % to_sleep) try: with mock.patch('swift.common.utils.sleep', my_sleep): with utils.lock_path(tmpdir): with utils.lock_path(tmpdir): pass except Exception as e: exception_raised[0] = True self.assertTrue('sleep time changed' in str(e)) self.assertEqual(num_short_calls[0], 11) self.assertTrue(exception_raised[0]) @with_tempdir def test_lock_path_class(self, tmpdir): with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout): exc = None exc2 = None success = False try: with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout): success = True except ReplicationLockTimeout as err: exc = err except LockTimeout as err: exc2 = err self.assertTrue(exc is not None) self.assertTrue(exc2 is None) self.assertTrue(not success) exc = None exc2 = None success = False try: with utils.lock_path(tmpdir, 0.1): success = True except ReplicationLockTimeout as err: exc = err except LockTimeout as err: exc2 = err self.assertTrue(exc is None) self.assertTrue(exc2 is not None) self.assertTrue(not success) def test_normalize_timestamp(self): # Test swift.common.utils.normalize_timestamp self.assertEqual(utils.normalize_timestamp('1253327593.48174'), "1253327593.48174") self.assertEqual(utils.normalize_timestamp(1253327593.48174), "1253327593.48174") self.assertEqual(utils.normalize_timestamp('1253327593.48'), "1253327593.48000") self.assertEqual(utils.normalize_timestamp(1253327593.48), "1253327593.48000") self.assertEqual(utils.normalize_timestamp('253327593.48'), "0253327593.48000") self.assertEqual(utils.normalize_timestamp(253327593.48), "0253327593.48000") self.assertEqual(utils.normalize_timestamp('1253327593'), "1253327593.00000") self.assertEqual(utils.normalize_timestamp(1253327593), "1253327593.00000") self.assertRaises(ValueError, utils.normalize_timestamp, '') self.assertRaises(ValueError, utils.normalize_timestamp, 'abc') def test_normalize_delete_at_timestamp(self): self.assertEqual( utils.normalize_delete_at_timestamp(1253327593), '1253327593') self.assertEqual( utils.normalize_delete_at_timestamp(1253327593.67890), '1253327593') self.assertEqual( utils.normalize_delete_at_timestamp('1253327593'), '1253327593') self.assertEqual( utils.normalize_delete_at_timestamp('1253327593.67890'), '1253327593') self.assertEqual( utils.normalize_delete_at_timestamp(-1253327593), '0000000000') self.assertEqual( utils.normalize_delete_at_timestamp(-1253327593.67890), '0000000000') self.assertEqual( utils.normalize_delete_at_timestamp('-1253327593'), '0000000000') self.assertEqual( utils.normalize_delete_at_timestamp('-1253327593.67890'), '0000000000') self.assertEqual( utils.normalize_delete_at_timestamp(71253327593), '9999999999') self.assertEqual( utils.normalize_delete_at_timestamp(71253327593.67890), '9999999999') self.assertEqual( utils.normalize_delete_at_timestamp('71253327593'), '9999999999') self.assertEqual( utils.normalize_delete_at_timestamp('71253327593.67890'), '9999999999') self.assertRaises(ValueError, utils.normalize_timestamp, '') self.assertRaises(ValueError, utils.normalize_timestamp, 'abc') def test_last_modified_date_to_timestamp(self): expectations = { '1970-01-01T00:00:00.000000': 0.0, '2014-02-28T23:22:36.698390': 1393629756.698390, '2011-03-19T04:03:00.604554': 1300507380.604554, } for last_modified, ts in expectations.items(): real = utils.last_modified_date_to_timestamp(last_modified) self.assertEqual(real, ts, "failed for %s" % last_modified) def test_last_modified_date_to_timestamp_when_system_not_UTC(self): try: old_tz = os.environ.get('TZ') # Western Argentina Summer Time. Found in glibc manual; this # timezone always has a non-zero offset from UTC, so this test is # always meaningful. os.environ['TZ'] = 'WART4WARST,J1/0,J365/25' self.assertEqual(utils.last_modified_date_to_timestamp( '1970-01-01T00:00:00.000000'), 0.0) finally: if old_tz is not None: os.environ['TZ'] = old_tz else: os.environ.pop('TZ') def test_backwards(self): # Test swift.common.utils.backward # The lines are designed so that the function would encounter # all of the boundary conditions and typical conditions. # Block boundaries are marked with '<>' characters blocksize = 25 lines = [b'123456789x12345678><123456789\n', # block larger than rest b'123456789x123>\n', # block ends just before \n character b'123423456789\n', b'123456789x\n', # block ends at the end of line b'<123456789x123456789x123\n', b'<6789x123\n', # block ends at the beginning of the line b'6789x1234\n', b'1234><234\n', # block ends typically in the middle of line b'123456789x123456789\n'] with TemporaryFile() as f: for line in lines: f.write(line) count = len(lines) - 1 for line in utils.backward(f, blocksize): self.assertEqual(line, lines[count].split(b'\n')[0]) count -= 1 # Empty file case with TemporaryFile('r') as f: self.assertEqual([], list(utils.backward(f))) def test_mkdirs(self): testdir_base = mkdtemp() testroot = os.path.join(testdir_base, 'mkdirs') try: self.assertTrue(not os.path.exists(testroot)) utils.mkdirs(testroot) self.assertTrue(os.path.exists(testroot)) utils.mkdirs(testroot) self.assertTrue(os.path.exists(testroot)) rmtree(testroot, ignore_errors=1) testdir = os.path.join(testroot, 'one/two/three') self.assertTrue(not os.path.exists(testdir)) utils.mkdirs(testdir) self.assertTrue(os.path.exists(testdir)) utils.mkdirs(testdir) self.assertTrue(os.path.exists(testdir)) rmtree(testroot, ignore_errors=1) open(testroot, 'wb').close() self.assertTrue(not os.path.exists(testdir)) self.assertRaises(OSError, utils.mkdirs, testdir) os.unlink(testroot) finally: rmtree(testdir_base) def test_split_path(self): # Test swift.common.utils.split_account_path self.assertRaises(ValueError, utils.split_path, '') self.assertRaises(ValueError, utils.split_path, '/') self.assertRaises(ValueError, utils.split_path, '//') self.assertEqual(utils.split_path('/a'), ['a']) self.assertRaises(ValueError, utils.split_path, '//a') self.assertEqual(utils.split_path('/a/'), ['a']) self.assertRaises(ValueError, utils.split_path, '/a/c') self.assertRaises(ValueError, utils.split_path, '//c') self.assertRaises(ValueError, utils.split_path, '/a/c/') self.assertRaises(ValueError, utils.split_path, '/a//') self.assertRaises(ValueError, utils.split_path, '/a', 2) self.assertRaises(ValueError, utils.split_path, '/a', 2, 3) self.assertRaises(ValueError, utils.split_path, '/a', 2, 3, True) self.assertEqual(utils.split_path('/a/c', 2), ['a', 'c']) self.assertEqual(utils.split_path('/a/c/o', 3), ['a', 'c', 'o']) self.assertRaises(ValueError, utils.split_path, '/a/c/o/r', 3, 3) self.assertEqual(utils.split_path('/a/c/o/r', 3, 3, True), ['a', 'c', 'o/r']) self.assertEqual(utils.split_path('/a/c', 2, 3, True), ['a', 'c', None]) self.assertRaises(ValueError, utils.split_path, '/a', 5, 4) self.assertEqual(utils.split_path('/a/c/', 2), ['a', 'c']) self.assertEqual(utils.split_path('/a/c/', 2, 3), ['a', 'c', '']) try: utils.split_path('o\nn e', 2) except ValueError as err: self.assertEqual(str(err), 'Invalid path: o%0An%20e') try: utils.split_path('o\nn e', 2, 3, True) except ValueError as err: self.assertEqual(str(err), 'Invalid path: o%0An%20e') def test_validate_device_partition(self): # Test swift.common.utils.validate_device_partition utils.validate_device_partition('foo', 'bar') self.assertRaises(ValueError, utils.validate_device_partition, '', '') self.assertRaises(ValueError, utils.validate_device_partition, '', 'foo') self.assertRaises(ValueError, utils.validate_device_partition, 'foo', '') self.assertRaises(ValueError, utils.validate_device_partition, 'foo/bar', 'foo') self.assertRaises(ValueError, utils.validate_device_partition, 'foo', 'foo/bar') self.assertRaises(ValueError, utils.validate_device_partition, '.', 'foo') self.assertRaises(ValueError, utils.validate_device_partition, '..', 'foo') self.assertRaises(ValueError, utils.validate_device_partition, 'foo', '.') self.assertRaises(ValueError, utils.validate_device_partition, 'foo', '..') try: utils.validate_device_partition('o\nn e', 'foo') except ValueError as err: self.assertEqual(str(err), 'Invalid device: o%0An%20e') try: utils.validate_device_partition('foo', 'o\nn e') except ValueError as err: self.assertEqual(str(err), 'Invalid partition: o%0An%20e') def test_NullLogger(self): # Test swift.common.utils.NullLogger sio = StringIO() nl = utils.NullLogger() nl.write('test') self.assertEqual(sio.getvalue(), '') def test_LoggerFileObject(self): orig_stdout = sys.stdout orig_stderr = sys.stderr sio = StringIO() handler = logging.StreamHandler(sio) logger = logging.getLogger() logger.addHandler(handler) lfo_stdout = utils.LoggerFileObject(logger) lfo_stderr = utils.LoggerFileObject(logger) lfo_stderr = utils.LoggerFileObject(logger, 'STDERR') print('test1') self.assertEqual(sio.getvalue(), '') sys.stdout = lfo_stdout print('test2') self.assertEqual(sio.getvalue(), 'STDOUT: test2\n') sys.stderr = lfo_stderr print('test4', file=sys.stderr) self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n') sys.stdout = orig_stdout print('test5') self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n') print('test6', file=sys.stderr) self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n' 'STDERR: test6\n') sys.stderr = orig_stderr print('test8') self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n' 'STDERR: test6\n') lfo_stdout.writelines(['a', 'b', 'c']) self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n' 'STDERR: test6\nSTDOUT: a#012b#012c\n') lfo_stdout.close() lfo_stderr.close() lfo_stdout.write('d') self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n' 'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n') lfo_stdout.flush() self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n' 'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n') for lfo in (lfo_stdout, lfo_stderr): got_exc = False try: for line in lfo: pass except Exception: got_exc = True self.assertTrue(got_exc) got_exc = False try: for line in lfo: pass except Exception: got_exc = True self.assertTrue(got_exc) self.assertRaises(IOError, lfo.read) self.assertRaises(IOError, lfo.read, 1024) self.assertRaises(IOError, lfo.readline) self.assertRaises(IOError, lfo.readline, 1024) lfo.tell() def test_LoggerFileObject_recursion(self): crashy_calls = [0] class CrashyLogger(logging.Handler): def emit(self, record): crashy_calls[0] += 1 try: # Pretend to be trying to send to syslog, but syslogd is # dead. We need the raise here to set sys.exc_info. raise socket.error(errno.ENOTCONN, "This is an ex-syslog") except socket.error: self.handleError(record) logger = logging.getLogger() logger.addHandler(CrashyLogger()) # Set up some real file descriptors for stdio. If you run # nosetests with "-s", you already have real files there, but # otherwise they're StringIO objects. # # In any case, since capture_stdio() closes sys.stdin and friends, # we'd want to set up some sacrificial files so as to not goof up # the testrunner. new_stdin = open(os.devnull, 'r+b') new_stdout = open(os.devnull, 'w+b') new_stderr = open(os.devnull, 'w+b') with contextlib.closing(new_stdin), contextlib.closing(new_stdout), \ contextlib.closing(new_stderr): # logging.raiseExceptions is set to False in test/__init__.py, but # is True in Swift daemons, and the error doesn't manifest without # it. with mock.patch('sys.stdin', new_stdin), \ mock.patch('sys.stdout', new_stdout), \ mock.patch('sys.stderr', new_stderr), \ mock.patch.object(logging, 'raiseExceptions', True): # Note: since stdio is hooked up to /dev/null in here, using # pdb is basically impossible. Sorry about that. utils.capture_stdio(logger) logger.info("I like ham") self.assertTrue(crashy_calls[0], 1) def test_parse_options(self): # Get a file that is definitely on disk with NamedTemporaryFile() as f: conf_file = f.name conf, options = utils.parse_options(test_args=[conf_file]) self.assertEqual(conf, conf_file) # assert defaults self.assertEqual(options['verbose'], False) self.assertNotIn('once', options) # assert verbose as option conf, options = utils.parse_options(test_args=[conf_file, '-v']) self.assertEqual(options['verbose'], True) # check once option conf, options = utils.parse_options(test_args=[conf_file], once=True) self.assertEqual(options['once'], False) test_args = [conf_file, '--once'] conf, options = utils.parse_options(test_args=test_args, once=True) self.assertEqual(options['once'], True) # check options as arg parsing test_args = [conf_file, 'once', 'plugin_name', 'verbose'] conf, options = utils.parse_options(test_args=test_args, once=True) self.assertEqual(options['verbose'], True) self.assertEqual(options['once'], True) self.assertEqual(options['extra_args'], ['plugin_name']) def test_parse_options_errors(self): orig_stdout = sys.stdout orig_stderr = sys.stderr stdo = StringIO() stde = StringIO() utils.sys.stdout = stdo utils.sys.stderr = stde self.assertRaises(SystemExit, utils.parse_options, once=True, test_args=[]) self.assertTrue('missing config' in stdo.getvalue()) # verify conf file must exist, context manager will delete temp file with NamedTemporaryFile() as f: conf_file = f.name self.assertRaises(SystemExit, utils.parse_options, once=True, test_args=[conf_file]) self.assertTrue('unable to locate' in stdo.getvalue()) # reset stdio utils.sys.stdout = orig_stdout utils.sys.stderr = orig_stderr def test_dump_recon_cache(self): testdir_base = mkdtemp() testcache_file = os.path.join(testdir_base, 'cache.recon') logger = utils.get_logger(None, 'server', log_route='server') try: submit_dict = {'key0': 99, 'key1': {'value1': 1, 'value2': 2}} utils.dump_recon_cache(submit_dict, testcache_file, logger) with open(testcache_file) as fd: file_dict = json.loads(fd.readline()) self.assertEqual(submit_dict, file_dict) # Use a nested entry submit_dict = {'key0': 101, 'key1': {'key2': {'value1': 1, 'value2': 2}}} expect_dict = {'key0': 101, 'key1': {'key2': {'value1': 1, 'value2': 2}, 'value1': 1, 'value2': 2}} utils.dump_recon_cache(submit_dict, testcache_file, logger) with open(testcache_file) as fd: file_dict = json.loads(fd.readline()) self.assertEqual(expect_dict, file_dict) # cached entries are sticky submit_dict = {} utils.dump_recon_cache(submit_dict, testcache_file, logger) with open(testcache_file) as fd: file_dict = json.loads(fd.readline()) self.assertEqual(expect_dict, file_dict) # nested dicts can be erased... submit_dict = {'key1': {'key2': {}}} expect_dict = {'key0': 101, 'key1': {'value1': 1, 'value2': 2}} utils.dump_recon_cache(submit_dict, testcache_file, logger) with open(testcache_file) as fd: file_dict = json.loads(fd.readline()) self.assertEqual(expect_dict, file_dict) # ... and erasure is idempotent utils.dump_recon_cache(submit_dict, testcache_file, logger) with open(testcache_file) as fd: file_dict = json.loads(fd.readline()) self.assertEqual(expect_dict, file_dict) # top level dicts can be erased... submit_dict = {'key1': {}} expect_dict = {'key0': 101} utils.dump_recon_cache(submit_dict, testcache_file, logger) with open(testcache_file) as fd: file_dict = json.loads(fd.readline()) self.assertEqual(expect_dict, file_dict) # ... and erasure is idempotent utils.dump_recon_cache(submit_dict, testcache_file, logger) with open(testcache_file) as fd: file_dict = json.loads(fd.readline()) self.assertEqual(expect_dict, file_dict) finally: rmtree(testdir_base) def test_dump_recon_cache_set_owner(self): testdir_base = mkdtemp() testcache_file = os.path.join(testdir_base, 'cache.recon') logger = utils.get_logger(None, 'server', log_route='server') try: submit_dict = {'key1': {'value1': 1, 'value2': 2}} _ret = lambda: None _ret.pw_uid = 100 _mock_getpwnam = MagicMock(return_value=_ret) _mock_chown = mock.Mock() with patch('os.chown', _mock_chown), \ patch('pwd.getpwnam', _mock_getpwnam): utils.dump_recon_cache(submit_dict, testcache_file, logger, set_owner="swift") _mock_getpwnam.assert_called_once_with("swift") self.assertEqual(_mock_chown.call_args[0][1], 100) finally: rmtree(testdir_base) def test_dump_recon_cache_permission_denied(self): testdir_base = mkdtemp() testcache_file = os.path.join(testdir_base, 'cache.recon') class MockLogger(object): def __init__(self): self._excs = [] def exception(self, message): _junk, exc, _junk = sys.exc_info() self._excs.append(exc) logger = MockLogger() try: submit_dict = {'key1': {'value1': 1, 'value2': 2}} with mock.patch( 'swift.common.utils.NamedTemporaryFile', side_effect=IOError(13, 'Permission Denied')): utils.dump_recon_cache(submit_dict, testcache_file, logger) self.assertIsInstance(logger._excs[0], IOError) finally: rmtree(testdir_base) def test_load_recon_cache(self): stub_data = {'test': 'foo'} with NamedTemporaryFile() as f: f.write(json.dumps(stub_data).encode("utf-8")) f.flush() self.assertEqual(stub_data, utils.load_recon_cache(f.name)) # missing files are treated as empty self.assertFalse(os.path.exists(f.name)) # sanity self.assertEqual({}, utils.load_recon_cache(f.name)) # Corrupt files are treated as empty. We could crash and make an # operator fix the corrupt file, but they'll "fix" it with "rm -f # /var/cache/swift/*.recon", so let's just do it for them. with NamedTemporaryFile() as f: f.write(b"{not [valid (json") f.flush() self.assertEqual({}, utils.load_recon_cache(f.name)) def test_get_logger(self): sio = StringIO() logger = logging.getLogger('server') logger.addHandler(logging.StreamHandler(sio)) logger = utils.get_logger(None, 'server', log_route='server') logger.warning('test1') self.assertEqual(sio.getvalue(), 'test1\n') logger.debug('test2') self.assertEqual(sio.getvalue(), 'test1\n') logger = utils.get_logger({'log_level': 'DEBUG'}, 'server', log_route='server') logger.debug('test3') self.assertEqual(sio.getvalue(), 'test1\ntest3\n') # Doesn't really test that the log facility is truly being used all the # way to syslog; but exercises the code. logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server', log_route='server') logger.warning('test4') self.assertEqual(sio.getvalue(), 'test1\ntest3\ntest4\n') # make sure debug doesn't log by default logger.debug('test5') self.assertEqual(sio.getvalue(), 'test1\ntest3\ntest4\n') # make sure notice lvl logs by default logger.notice('test6') self.assertEqual(sio.getvalue(), 'test1\ntest3\ntest4\ntest6\n') def test_get_logger_sysloghandler_plumbing(self): orig_sysloghandler = utils.ThreadSafeSysLogHandler syslog_handler_args = [] def syslog_handler_catcher(*args, **kwargs): syslog_handler_args.append((args, kwargs)) return orig_sysloghandler(*args, **kwargs) syslog_handler_catcher.LOG_LOCAL0 = orig_sysloghandler.LOG_LOCAL0 syslog_handler_catcher.LOG_LOCAL3 = orig_sysloghandler.LOG_LOCAL3 with mock.patch.object(utils, 'ThreadSafeSysLogHandler', syslog_handler_catcher): utils.get_logger({ 'log_facility': 'LOG_LOCAL3', }, 'server', log_route='server') expected_args = [((), {'address': '/dev/log', 'facility': orig_sysloghandler.LOG_LOCAL3})] if not os.path.exists('/dev/log') or \ os.path.isfile('/dev/log') or \ os.path.isdir('/dev/log'): # Since socket on OSX is in /var/run/syslog, there will be # a fallback to UDP. expected_args.append( ((), {'facility': orig_sysloghandler.LOG_LOCAL3})) self.assertEqual(expected_args, syslog_handler_args) syslog_handler_args = [] utils.get_logger({ 'log_facility': 'LOG_LOCAL3', 'log_address': '/foo/bar', }, 'server', log_route='server') self.assertEqual([ ((), {'address': '/foo/bar', 'facility': orig_sysloghandler.LOG_LOCAL3}), # Second call is because /foo/bar didn't exist (and wasn't a # UNIX domain socket). ((), {'facility': orig_sysloghandler.LOG_LOCAL3})], syslog_handler_args) # Using UDP with default port syslog_handler_args = [] utils.get_logger({ 'log_udp_host': 'syslog.funtimes.com', }, 'server', log_route='server') self.assertEqual([ ((), {'address': ('syslog.funtimes.com', logging.handlers.SYSLOG_UDP_PORT), 'facility': orig_sysloghandler.LOG_LOCAL0})], syslog_handler_args) # Using UDP with non-default port syslog_handler_args = [] utils.get_logger({ 'log_udp_host': 'syslog.funtimes.com', 'log_udp_port': '2123', }, 'server', log_route='server') self.assertEqual([ ((), {'address': ('syslog.funtimes.com', 2123), 'facility': orig_sysloghandler.LOG_LOCAL0})], syslog_handler_args) @reset_logger_state def test_clean_logger_exception(self): # setup stream logging sio = StringIO() logger = utils.get_logger(None) handler = logging.StreamHandler(sio) logger.logger.addHandler(handler) def strip_value(sio): sio.seek(0) v = sio.getvalue() sio.truncate(0) return v def log_exception(exc): try: raise exc except (Exception, Timeout): logger.exception('blah') try: # establish base case self.assertEqual(strip_value(sio), '') logger.info('test') self.assertEqual(strip_value(sio), 'test\n') self.assertEqual(strip_value(sio), '') logger.info('test') logger.info('test') self.assertEqual(strip_value(sio), 'test\ntest\n') self.assertEqual(strip_value(sio), '') # test OSError for en in (errno.EIO, errno.ENOSPC): log_exception(OSError(en, 'my %s error message' % en)) log_msg = strip_value(sio) self.assertNotIn('Traceback', log_msg) self.assertIn('my %s error message' % en, log_msg) # unfiltered log_exception(OSError()) self.assertTrue('Traceback' in strip_value(sio)) # test socket.error log_exception(socket.error(errno.ECONNREFUSED, 'my error message')) log_msg = strip_value(sio) self.assertNotIn('Traceback', log_msg) self.assertNotIn('errno.ECONNREFUSED message test', log_msg) self.assertIn('Connection refused', log_msg) log_exception(socket.error(errno.EHOSTUNREACH, 'my error message')) log_msg = strip_value(sio) self.assertNotIn('Traceback', log_msg) self.assertNotIn('my error message', log_msg) self.assertIn('Host unreachable', log_msg) log_exception(socket.error(errno.ETIMEDOUT, 'my error message')) log_msg = strip_value(sio) self.assertNotIn('Traceback', log_msg) self.assertNotIn('my error message', log_msg) self.assertIn('Connection timeout', log_msg) # unfiltered log_exception(socket.error(0, 'my error message')) log_msg = strip_value(sio) self.assertIn('Traceback', log_msg) self.assertIn('my error message', log_msg) # test eventlet.Timeout connection_timeout = ConnectionTimeout(42, 'my error message') log_exception(connection_timeout) log_msg = strip_value(sio) self.assertNotIn('Traceback', log_msg) self.assertTrue('ConnectionTimeout' in log_msg) self.assertTrue('(42s)' in log_msg) self.assertNotIn('my error message', log_msg) connection_timeout.cancel() message_timeout = MessageTimeout(42, 'my error message') log_exception(message_timeout) log_msg = strip_value(sio) self.assertNotIn('Traceback', log_msg) self.assertTrue('MessageTimeout' in log_msg) self.assertTrue('(42s)' in log_msg) self.assertTrue('my error message' in log_msg) message_timeout.cancel() # test BadStatusLine log_exception(http_client.BadStatusLine('')) log_msg = strip_value(sio) self.assertNotIn('Traceback', log_msg) self.assertIn('BadStatusLine', log_msg) self.assertIn("''", log_msg) # test unhandled log_exception(Exception('my error message')) log_msg = strip_value(sio) self.assertTrue('Traceback' in log_msg) self.assertTrue('my error message' in log_msg) finally: logger.logger.removeHandler(handler) @reset_logger_state def test_swift_log_formatter_max_line_length(self): # setup stream logging sio = StringIO() logger = utils.get_logger(None) handler = logging.StreamHandler(sio) formatter = utils.SwiftLogFormatter(max_line_length=10) handler.setFormatter(formatter) logger.logger.addHandler(handler) def strip_value(sio): sio.seek(0) v = sio.getvalue() sio.truncate(0) return v try: logger.info('12345') self.assertEqual(strip_value(sio), '12345\n') logger.info('1234567890') self.assertEqual(strip_value(sio), '1234567890\n') logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '12 ... de\n') formatter.max_line_length = 11 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '123 ... cde\n') formatter.max_line_length = 0 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '1234567890abcde\n') formatter.max_line_length = 1 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '1\n') formatter.max_line_length = 2 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '12\n') formatter.max_line_length = 3 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '123\n') formatter.max_line_length = 4 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '1234\n') formatter.max_line_length = 5 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '12345\n') formatter.max_line_length = 6 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '123456\n') formatter.max_line_length = 7 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '1 ... e\n') formatter.max_line_length = -10 logger.info('1234567890abcde') self.assertEqual(strip_value(sio), '1234567890abcde\n') finally: logger.logger.removeHandler(handler) @reset_logger_state def test_swift_log_formatter(self): # setup stream logging sio = StringIO() logger = utils.get_logger(None) handler = logging.StreamHandler(sio) handler.setFormatter(utils.SwiftLogFormatter()) logger.logger.addHandler(handler) def strip_value(sio): sio.seek(0) v = sio.getvalue() sio.truncate(0) return v try: self.assertFalse(logger.txn_id) logger.error('my error message') log_msg = strip_value(sio) self.assertIn('my error message', log_msg) self.assertNotIn('txn', log_msg) logger.txn_id = '12345' logger.error('test') log_msg = strip_value(sio) self.assertIn('txn', log_msg) self.assertIn('12345', log_msg) # test txn in info message self.assertEqual(logger.txn_id, '12345') logger.info('test') log_msg = strip_value(sio) self.assertIn('txn', log_msg) self.assertIn('12345', log_msg) # test txn already in message self.assertEqual(logger.txn_id, '12345') logger.warning('test 12345 test') self.assertEqual(strip_value(sio), 'test 12345 test\n') # Test multi line collapsing logger.error('my\nerror\nmessage') log_msg = strip_value(sio) self.assertIn('my#012error#012message', log_msg) # test client_ip self.assertFalse(logger.client_ip) logger.error('my error message') log_msg = strip_value(sio) self.assertIn('my error message', log_msg) self.assertNotIn('client_ip', log_msg) logger.client_ip = '1.2.3.4' logger.error('test') log_msg = strip_value(sio) self.assertIn('client_ip', log_msg) self.assertIn('1.2.3.4', log_msg) # test no client_ip on info message self.assertEqual(logger.client_ip, '1.2.3.4') logger.info('test') log_msg = strip_value(sio) self.assertNotIn('client_ip', log_msg) self.assertNotIn('1.2.3.4', log_msg) # test client_ip (and txn) already in message self.assertEqual(logger.client_ip, '1.2.3.4') logger.warning('test 1.2.3.4 test 12345') self.assertEqual(strip_value(sio), 'test 1.2.3.4 test 12345\n') finally: logger.logger.removeHandler(handler) def test_storage_directory(self): self.assertEqual(utils.storage_directory('objects', '1', 'ABCDEF'), 'objects/1/DEF/ABCDEF') def test_is_valid_ip(self): self.assertTrue(is_valid_ip("127.0.0.1")) self.assertTrue(is_valid_ip("10.0.0.1")) ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156" self.assertTrue(is_valid_ip(ipv6)) ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156" self.assertTrue(is_valid_ip(ipv6)) ipv6 = "fe80::204:61ff:fe9d:f156" self.assertTrue(is_valid_ip(ipv6)) ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86" self.assertTrue(is_valid_ip(ipv6)) ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86" self.assertTrue(is_valid_ip(ipv6)) ipv6 = "fe80::204:61ff:254.157.241.86" self.assertTrue(is_valid_ip(ipv6)) ipv6 = "fe80::" self.assertTrue(is_valid_ip(ipv6)) ipv6 = "::1" self.assertTrue(is_valid_ip(ipv6)) not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a" self.assertFalse(is_valid_ip(not_ipv6)) not_ipv6 = "1:2:3:4:5:6::7:8" self.assertFalse(is_valid_ip(not_ipv6)) def test_is_valid_ipv4(self): self.assertTrue(is_valid_ipv4("127.0.0.1")) self.assertTrue(is_valid_ipv4("10.0.0.1")) ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156" self.assertFalse(is_valid_ipv4(ipv6)) ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156" self.assertFalse(is_valid_ipv4(ipv6)) ipv6 = "fe80::204:61ff:fe9d:f156" self.assertFalse(is_valid_ipv4(ipv6)) ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86" self.assertFalse(is_valid_ipv4(ipv6)) ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86" self.assertFalse(is_valid_ipv4(ipv6)) ipv6 = "fe80::204:61ff:254.157.241.86" self.assertFalse(is_valid_ipv4(ipv6)) ipv6 = "fe80::" self.assertFalse(is_valid_ipv4(ipv6)) ipv6 = "::1" self.assertFalse(is_valid_ipv4(ipv6)) not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a" self.assertFalse(is_valid_ipv4(not_ipv6)) not_ipv6 = "1:2:3:4:5:6::7:8" self.assertFalse(is_valid_ipv4(not_ipv6)) def test_is_valid_ipv6(self): self.assertFalse(is_valid_ipv6("127.0.0.1")) self.assertFalse(is_valid_ipv6("10.0.0.1")) ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156" self.assertTrue(is_valid_ipv6(ipv6)) ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156" self.assertTrue(is_valid_ipv6(ipv6)) ipv6 = "fe80::204:61ff:fe9d:f156" self.assertTrue(is_valid_ipv6(ipv6)) ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86" self.assertTrue(is_valid_ipv6(ipv6)) ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86" self.assertTrue(is_valid_ipv6(ipv6)) ipv6 = "fe80::204:61ff:254.157.241.86" self.assertTrue(is_valid_ipv6(ipv6)) ipv6 = "fe80::" self.assertTrue(is_valid_ipv6(ipv6)) ipv6 = "::1" self.assertTrue(is_valid_ipv6(ipv6)) not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a" self.assertFalse(is_valid_ipv6(not_ipv6)) not_ipv6 = "1:2:3:4:5:6::7:8" self.assertFalse(is_valid_ipv6(not_ipv6)) def test_expand_ipv6(self): expanded_ipv6 = "fe80::204:61ff:fe9d:f156" upper_ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156" self.assertEqual(expanded_ipv6, utils.expand_ipv6(upper_ipv6)) omit_ipv6 = "fe80:0000:0000::0204:61ff:fe9d:f156" self.assertEqual(expanded_ipv6, utils.expand_ipv6(omit_ipv6)) less_num_ipv6 = "fe80:0:00:000:0204:61ff:fe9d:f156" self.assertEqual(expanded_ipv6, utils.expand_ipv6(less_num_ipv6)) def test_whataremyips(self): myips = utils.whataremyips() self.assertTrue(len(myips) > 1) self.assertTrue('127.0.0.1' in myips) def test_whataremyips_bind_to_all(self): for any_addr in ('0.0.0.0', '0000:0000:0000:0000:0000:0000:0000:0000', '::0', '::0000', '::', # Wacky parse-error input produces all IPs 'I am a bear'): myips = utils.whataremyips(any_addr) self.assertTrue(len(myips) > 1) self.assertTrue('127.0.0.1' in myips) def test_whataremyips_bind_ip_specific(self): self.assertEqual(['1.2.3.4'], utils.whataremyips('1.2.3.4')) def test_whataremyips_error(self): def my_interfaces(): return ['eth0'] def my_ifaddress_error(interface): raise ValueError with patch('netifaces.interfaces', my_interfaces), \ patch('netifaces.ifaddresses', my_ifaddress_error): self.assertEqual(utils.whataremyips(), []) def test_whataremyips_ipv6(self): test_ipv6_address = '2001:6b0:dead:beef:2::32' test_interface = 'eth0' def my_ipv6_interfaces(): return ['eth0'] def my_ipv6_ifaddresses(interface): return {AF_INET6: [{'netmask': 'ffff:ffff:ffff:ffff::', 'addr': '%s%%%s' % (test_ipv6_address, test_interface)}]} with patch('netifaces.interfaces', my_ipv6_interfaces), \ patch('netifaces.ifaddresses', my_ipv6_ifaddresses): myips = utils.whataremyips() self.assertEqual(len(myips), 1) self.assertEqual(myips[0], test_ipv6_address) def test_hash_path(self): # Yes, these tests are deliberately very fragile. We want to make sure # that if someones changes the results hash_path produces, they know it with mock.patch('swift.common.utils.HASH_PATH_PREFIX', b''): self.assertEqual(utils.hash_path('a'), '1c84525acb02107ea475dcd3d09c2c58') self.assertEqual(utils.hash_path('a', 'c'), '33379ecb053aa5c9e356c68997cbb59e') self.assertEqual(utils.hash_path('a', 'c', 'o'), '06fbf0b514e5199dfc4e00f42eb5ea83') self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False), '06fbf0b514e5199dfc4e00f42eb5ea83') self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=True), b'\x06\xfb\xf0\xb5\x14\xe5\x19\x9d\xfcN' b'\x00\xf4.\xb5\xea\x83') self.assertRaises(ValueError, utils.hash_path, 'a', object='o') utils.HASH_PATH_PREFIX = b'abcdef' self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False), '363f9b535bfb7d17a43a46a358afca0e') def test_validate_hash_conf(self): # no section causes InvalidHashPathConfigError self._test_validate_hash_conf([], [], True) # 'swift-hash' section is there but no options causes # InvalidHashPathConfigError self._test_validate_hash_conf(['swift-hash'], [], True) # if we have the section and either of prefix or suffix, # InvalidHashPathConfigError doesn't occur self._test_validate_hash_conf( ['swift-hash'], ['swift_hash_path_prefix'], False) self._test_validate_hash_conf( ['swift-hash'], ['swift_hash_path_suffix'], False) # definitely, we have the section and both of them, # InvalidHashPathConfigError doesn't occur self._test_validate_hash_conf( ['swift-hash'], ['swift_hash_path_suffix', 'swift_hash_path_prefix'], False) # But invalid section name should make an error even if valid # options are there self._test_validate_hash_conf( ['swift-hash-xxx'], ['swift_hash_path_suffix', 'swift_hash_path_prefix'], True) def _test_validate_hash_conf(self, sections, options, should_raise_error): class FakeConfigParser(object): def read(self, conf_path, encoding=None): return [conf_path] def get(self, section, option): if section not in sections: raise NoSectionError('section error') elif option not in options: raise NoOptionError('option error', 'this option') else: return 'some_option_value' with mock.patch('swift.common.utils.HASH_PATH_PREFIX', b''), \ mock.patch('swift.common.utils.HASH_PATH_SUFFIX', b''), \ mock.patch('swift.common.utils.ConfigParser', FakeConfigParser): try: utils.validate_hash_conf() except utils.InvalidHashPathConfigError: if not should_raise_error: self.fail('validate_hash_conf should not raise an error') else: if should_raise_error: self.fail('validate_hash_conf should raise an error') def test_load_libc_function(self): self.assertTrue(callable( utils.load_libc_function('printf'))) self.assertTrue(callable( utils.load_libc_function('some_not_real_function'))) self.assertRaises(AttributeError, utils.load_libc_function, 'some_not_real_function', fail_if_missing=True) def test_readconf(self): conf = '''[section1] foo = bar [section2] log_name = yarr''' # setup a real file fd, temppath = tempfile.mkstemp() with os.fdopen(fd, 'w') as f: f.write(conf) make_filename = lambda: temppath # setup a file stream make_fp = lambda: StringIO(conf) for conf_object_maker in (make_filename, make_fp): conffile = conf_object_maker() result = utils.readconf(conffile) expected = {'__file__': conffile, 'log_name': None, 'section1': {'foo': 'bar'}, 'section2': {'log_name': 'yarr'}} self.assertEqual(result, expected) conffile = conf_object_maker() result = utils.readconf(conffile, 'section1') expected = {'__file__': conffile, 'log_name': 'section1', 'foo': 'bar'} self.assertEqual(result, expected) conffile = conf_object_maker() result = utils.readconf(conffile, 'section2').get('log_name') expected = 'yarr' self.assertEqual(result, expected) conffile = conf_object_maker() result = utils.readconf(conffile, 'section1', log_name='foo').get('log_name') expected = 'foo' self.assertEqual(result, expected) conffile = conf_object_maker() result = utils.readconf(conffile, 'section1', defaults={'bar': 'baz'}) expected = {'__file__': conffile, 'log_name': 'section1', 'foo': 'bar', 'bar': 'baz'} self.assertEqual(result, expected) self.assertRaisesRegexp( ValueError, 'Unable to find section3 config section in.*', utils.readconf, temppath, 'section3') os.unlink(temppath) self.assertRaises(IOError, utils.readconf, temppath) def test_readconf_raw(self): conf = '''[section1] foo = bar [section2] log_name = %(yarr)s''' # setup a real file fd, temppath = tempfile.mkstemp() with os.fdopen(fd, 'w') as f: f.write(conf) make_filename = lambda: temppath # setup a file stream make_fp = lambda: StringIO(conf) for conf_object_maker in (make_filename, make_fp): conffile = conf_object_maker() result = utils.readconf(conffile, raw=True) expected = {'__file__': conffile, 'log_name': None, 'section1': {'foo': 'bar'}, 'section2': {'log_name': '%(yarr)s'}} self.assertEqual(result, expected) os.unlink(temppath) self.assertRaises(IOError, utils.readconf, temppath) def test_readconf_dir(self): config_dir = { 'server.conf.d/01.conf': """ [DEFAULT] port = 8080 foo = bar [section1] name=section1 """, 'server.conf.d/section2.conf': """ [DEFAULT] port = 8081 bar = baz [section2] name=section2 """, 'other-server.conf.d/01.conf': """ [DEFAULT] port = 8082 [section3] name=section3 """ } # strip indent from test config contents config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items()) with temptree(*zip(*config_dir.items())) as path: conf_dir = os.path.join(path, 'server.conf.d') conf = utils.readconf(conf_dir) expected = { '__file__': os.path.join(path, 'server.conf.d'), 'log_name': None, 'section1': { 'port': '8081', 'foo': 'bar', 'bar': 'baz', 'name': 'section1', }, 'section2': { 'port': '8081', 'foo': 'bar', 'bar': 'baz', 'name': 'section2', }, } self.assertEqual(conf, expected) def test_readconf_dir_ignores_hidden_and_nondotconf_files(self): config_dir = { 'server.conf.d/01.conf': """ [section1] port = 8080 """, 'server.conf.d/.01.conf.swp': """ [section] port = 8081 """, 'server.conf.d/01.conf-bak': """ [section] port = 8082 """, } # strip indent from test config contents config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items()) with temptree(*zip(*config_dir.items())) as path: conf_dir = os.path.join(path, 'server.conf.d') conf = utils.readconf(conf_dir) expected = { '__file__': os.path.join(path, 'server.conf.d'), 'log_name': None, 'section1': { 'port': '8080', }, } self.assertEqual(conf, expected) def _check_drop_privileges(self, mock_os, required_func_calls, call_setsid=True): user = getuser() user_data = pwd.getpwnam(user) self.assertFalse(mock_os.called_funcs) # sanity check # over-ride os with mock with mock.patch('swift.common.utils.os', mock_os): # exercise the code utils.drop_privileges(user, call_setsid=call_setsid) for func in required_func_calls: self.assertIn(func, mock_os.called_funcs) self.assertEqual(user_data[5], mock_os.environ['HOME']) groups = {g.gr_gid for g in grp.getgrall() if user in g.gr_mem} self.assertEqual(groups, set(mock_os.called_funcs['setgroups'][0])) self.assertEqual(user_data[3], mock_os.called_funcs['setgid'][0]) self.assertEqual(user_data[2], mock_os.called_funcs['setuid'][0]) self.assertEqual('/', mock_os.called_funcs['chdir'][0]) self.assertEqual(0o22, mock_os.called_funcs['umask'][0]) def test_drop_privileges(self): required_func_calls = ('setgroups', 'setgid', 'setuid', 'setsid', 'chdir', 'umask') mock_os = MockOs(called_funcs=required_func_calls) self._check_drop_privileges(mock_os, required_func_calls) def test_drop_privileges_setsid_error(self): # OSError trying to get session leader required_func_calls = ('setgroups', 'setgid', 'setuid', 'setsid', 'chdir', 'umask') mock_os = MockOs(called_funcs=required_func_calls, raise_funcs=('setsid',)) self._check_drop_privileges(mock_os, required_func_calls) def test_drop_privileges_no_call_setsid(self): required_func_calls = ('setgroups', 'setgid', 'setuid', 'chdir', 'umask') # OSError if trying to get session leader, but it shouldn't be called bad_func_calls = ('setsid',) mock_os = MockOs(called_funcs=required_func_calls, raise_funcs=bad_func_calls) self._check_drop_privileges(mock_os, required_func_calls, call_setsid=False) for func in bad_func_calls: self.assertNotIn(func, mock_os.called_funcs) @reset_logger_state def test_capture_stdio(self): # stubs logger = utils.get_logger(None, 'dummy') # mock utils system modules _orig_sys = utils.sys _orig_os = utils.os try: utils.sys = MockSys() utils.os = MockOs() # basic test utils.capture_stdio(logger) self.assertTrue(utils.sys.excepthook is not None) self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds) self.assertTrue( isinstance(utils.sys.stdout, utils.LoggerFileObject)) self.assertTrue( isinstance(utils.sys.stderr, utils.LoggerFileObject)) # reset; test same args, but exc when trying to close stdio utils.os = MockOs(raise_funcs=('dup2',)) utils.sys = MockSys() # test unable to close stdio utils.capture_stdio(logger) self.assertTrue(utils.sys.excepthook is not None) self.assertEqual(utils.os.closed_fds, []) self.assertTrue( isinstance(utils.sys.stdout, utils.LoggerFileObject)) self.assertTrue( isinstance(utils.sys.stderr, utils.LoggerFileObject)) # reset; test some other args utils.os = MockOs() utils.sys = MockSys() logger = utils.get_logger(None, log_to_console=True) # test console log utils.capture_stdio(logger, capture_stdout=False, capture_stderr=False) self.assertTrue(utils.sys.excepthook is not None) # when logging to console, stderr remains open self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds[:2]) reset_loggers() # stdio not captured self.assertFalse(isinstance(utils.sys.stdout, utils.LoggerFileObject)) self.assertFalse(isinstance(utils.sys.stderr, utils.LoggerFileObject)) finally: utils.sys = _orig_sys utils.os = _orig_os @reset_logger_state def test_get_logger_console(self): logger = utils.get_logger(None) console_handlers = [h for h in logger.logger.handlers if isinstance(h, logging.StreamHandler)] self.assertFalse(console_handlers) logger = utils.get_logger(None, log_to_console=True) console_handlers = [h for h in logger.logger.handlers if isinstance(h, logging.StreamHandler)] self.assertTrue(console_handlers) # make sure you can't have two console handlers self.assertEqual(len(console_handlers), 1) old_handler = console_handlers[0] logger = utils.get_logger(None, log_to_console=True) console_handlers = [h for h in logger.logger.handlers if isinstance(h, logging.StreamHandler)] self.assertEqual(len(console_handlers), 1) new_handler = console_handlers[0] self.assertNotEqual(new_handler, old_handler) def verify_under_pseudo_time( self, func, target_runtime_ms=1, *args, **kwargs): curr_time = [42.0] def my_time(): curr_time[0] += 0.001 return curr_time[0] def my_sleep(duration): curr_time[0] += 0.001 curr_time[0] += duration with patch('time.time', my_time), \ patch('time.sleep', my_sleep), \ patch('eventlet.sleep', my_sleep): start = time.time() func(*args, **kwargs) # make sure it's accurate to 10th of a second, converting the time # difference to milliseconds, 100 milliseconds is 1/10 of a second diff_from_target_ms = abs( target_runtime_ms - ((time.time() - start) * 1000)) self.assertTrue(diff_from_target_ms < 100, "Expected %d < 100" % diff_from_target_ms) def test_ratelimit_sleep(self): def testfunc(): running_time = 0 for i in range(100): running_time = utils.ratelimit_sleep(running_time, -5) self.verify_under_pseudo_time(testfunc, target_runtime_ms=1) def testfunc(): running_time = 0 for i in range(100): running_time = utils.ratelimit_sleep(running_time, 0) self.verify_under_pseudo_time(testfunc, target_runtime_ms=1) def testfunc(): running_time = 0 for i in range(50): running_time = utils.ratelimit_sleep(running_time, 200) self.verify_under_pseudo_time(testfunc, target_runtime_ms=250) def test_ratelimit_sleep_with_incr(self): def testfunc(): running_time = 0 vals = [5, 17, 0, 3, 11, 30, 40, 4, 13, 2, -1] * 2 # adds up to 248 total = 0 for i in vals: running_time = utils.ratelimit_sleep(running_time, 500, incr_by=i) total += i self.assertEqual(248, total) self.verify_under_pseudo_time(testfunc, target_runtime_ms=500) def test_ratelimit_sleep_with_sleep(self): def testfunc(): running_time = 0 sleeps = [0] * 7 + [.2] * 3 + [0] * 30 for i in sleeps: running_time = utils.ratelimit_sleep(running_time, 40, rate_buffer=1) time.sleep(i) self.verify_under_pseudo_time(testfunc, target_runtime_ms=900) def test_urlparse(self): parsed = utils.urlparse('http://127.0.0.1/') self.assertEqual(parsed.scheme, 'http') self.assertEqual(parsed.hostname, '127.0.0.1') self.assertEqual(parsed.path, '/') parsed = utils.urlparse('http://127.0.0.1:8080/') self.assertEqual(parsed.port, 8080) parsed = utils.urlparse('https://127.0.0.1/') self.assertEqual(parsed.scheme, 'https') parsed = utils.urlparse('http://[::1]/') self.assertEqual(parsed.hostname, '::1') parsed = utils.urlparse('http://[::1]:8080/') self.assertEqual(parsed.hostname, '::1') self.assertEqual(parsed.port, 8080) parsed = utils.urlparse('www.example.com') self.assertEqual(parsed.hostname, '') def test_search_tree(self): # file match & ext miss with temptree(['asdf.conf', 'blarg.conf', 'asdf.cfg']) as t: asdf = utils.search_tree(t, 'a*', '.conf') self.assertEqual(len(asdf), 1) self.assertEqual(asdf[0], os.path.join(t, 'asdf.conf')) # multi-file match & glob miss & sort with temptree(['application.bin', 'apple.bin', 'apropos.bin']) as t: app_bins = utils.search_tree(t, 'app*', 'bin') self.assertEqual(len(app_bins), 2) self.assertEqual(app_bins[0], os.path.join(t, 'apple.bin')) self.assertEqual(app_bins[1], os.path.join(t, 'application.bin')) # test file in folder & ext miss & glob miss files = ( 'sub/file1.ini', 'sub/file2.conf', 'sub.bin', 'bus.ini', 'bus/file3.ini', ) with temptree(files) as t: sub_ini = utils.search_tree(t, 'sub*', '.ini') self.assertEqual(len(sub_ini), 1) self.assertEqual(sub_ini[0], os.path.join(t, 'sub/file1.ini')) # test multi-file in folder & sub-folder & ext miss & glob miss files = ( 'folder_file.txt', 'folder/1.txt', 'folder/sub/2.txt', 'folder2/3.txt', 'Folder3/4.txt' 'folder.rc', ) with temptree(files) as t: folder_texts = utils.search_tree(t, 'folder*', '.txt') self.assertEqual(len(folder_texts), 4) f1 = os.path.join(t, 'folder_file.txt') f2 = os.path.join(t, 'folder/1.txt') f3 = os.path.join(t, 'folder/sub/2.txt') f4 = os.path.join(t, 'folder2/3.txt') for f in [f1, f2, f3, f4]: self.assertTrue(f in folder_texts) def test_search_tree_with_directory_ext_match(self): files = ( 'object-server/object-server.conf-base', 'object-server/1.conf.d/base.conf', 'object-server/1.conf.d/1.conf', 'object-server/2.conf.d/base.conf', 'object-server/2.conf.d/2.conf', 'object-server/3.conf.d/base.conf', 'object-server/3.conf.d/3.conf', 'object-server/4.conf.d/base.conf', 'object-server/4.conf.d/4.conf', ) with temptree(files) as t: conf_dirs = utils.search_tree(t, 'object-server', '.conf', dir_ext='conf.d') self.assertEqual(len(conf_dirs), 4) for i in range(4): conf_dir = os.path.join(t, 'object-server/%d.conf.d' % (i + 1)) self.assertTrue(conf_dir in conf_dirs) def test_search_tree_conf_dir_with_named_conf_match(self): files = ( 'proxy-server/proxy-server.conf.d/base.conf', 'proxy-server/proxy-server.conf.d/pipeline.conf', 'proxy-server/proxy-noauth.conf.d/base.conf', 'proxy-server/proxy-noauth.conf.d/pipeline.conf', ) with temptree(files) as t: conf_dirs = utils.search_tree(t, 'proxy-server', 'noauth.conf', dir_ext='noauth.conf.d') self.assertEqual(len(conf_dirs), 1) conf_dir = conf_dirs[0] expected = os.path.join(t, 'proxy-server/proxy-noauth.conf.d') self.assertEqual(conf_dir, expected) def test_search_tree_conf_dir_pid_with_named_conf_match(self): files = ( 'proxy-server/proxy-server.pid.d', 'proxy-server/proxy-noauth.pid.d', ) with temptree(files) as t: pid_files = utils.search_tree(t, 'proxy-server', exts=['noauth.pid', 'noauth.pid.d']) self.assertEqual(len(pid_files), 1) pid_file = pid_files[0] expected = os.path.join(t, 'proxy-server/proxy-noauth.pid.d') self.assertEqual(pid_file, expected) def test_write_file(self): with temptree([]) as t: file_name = os.path.join(t, 'test') utils.write_file(file_name, 'test') with open(file_name, 'r') as f: contents = f.read() self.assertEqual(contents, 'test') # and also subdirs file_name = os.path.join(t, 'subdir/test2') utils.write_file(file_name, 'test2') with open(file_name, 'r') as f: contents = f.read() self.assertEqual(contents, 'test2') # but can't over-write files file_name = os.path.join(t, 'subdir/test2/test3') self.assertRaises(IOError, utils.write_file, file_name, 'test3') def test_remove_file(self): with temptree([]) as t: file_name = os.path.join(t, 'blah.pid') # assert no raise self.assertEqual(os.path.exists(file_name), False) self.assertIsNone(utils.remove_file(file_name)) with open(file_name, 'w') as f: f.write('1') self.assertTrue(os.path.exists(file_name)) self.assertIsNone(utils.remove_file(file_name)) self.assertFalse(os.path.exists(file_name)) def test_human_readable(self): self.assertEqual(utils.human_readable(0), '0') self.assertEqual(utils.human_readable(1), '1') self.assertEqual(utils.human_readable(10), '10') self.assertEqual(utils.human_readable(100), '100') self.assertEqual(utils.human_readable(999), '999') self.assertEqual(utils.human_readable(1024), '1Ki') self.assertEqual(utils.human_readable(1535), '1Ki') self.assertEqual(utils.human_readable(1536), '2Ki') self.assertEqual(utils.human_readable(1047552), '1023Ki') self.assertEqual(utils.human_readable(1048063), '1023Ki') self.assertEqual(utils.human_readable(1048064), '1Mi') self.assertEqual(utils.human_readable(1048576), '1Mi') self.assertEqual(utils.human_readable(1073741824), '1Gi') self.assertEqual(utils.human_readable(1099511627776), '1Ti') self.assertEqual(utils.human_readable(1125899906842624), '1Pi') self.assertEqual(utils.human_readable(1152921504606846976), '1Ei') self.assertEqual(utils.human_readable(1180591620717411303424), '1Zi') self.assertEqual(utils.human_readable(1208925819614629174706176), '1Yi') self.assertEqual(utils.human_readable(1237940039285380274899124224), '1024Yi') def test_validate_sync_to(self): fname = 'container-sync-realms.conf' fcontents = ''' [US] key = 9ff3b71c849749dbaec4ccdd3cbab62b cluster_dfw1 = http://dfw1.host/v1/ ''' with temptree([fname], [fcontents]) as tempdir: logger = FakeLogger() fpath = os.path.join(tempdir, fname) csr = ContainerSyncRealms(fpath, logger) for realms_conf in (None, csr): for goodurl, result in ( ('http://1.1.1.1/v1/a/c', (None, 'http://1.1.1.1/v1/a/c', None, None)), ('http://1.1.1.1:8080/a/c', (None, 'http://1.1.1.1:8080/a/c', None, None)), ('http://2.2.2.2/a/c', (None, 'http://2.2.2.2/a/c', None, None)), ('https://1.1.1.1/v1/a/c', (None, 'https://1.1.1.1/v1/a/c', None, None)), ('//US/DFW1/a/c', (None, 'http://dfw1.host/v1/a/c', 'US', '9ff3b71c849749dbaec4ccdd3cbab62b')), ('//us/DFW1/a/c', (None, 'http://dfw1.host/v1/a/c', 'US', '9ff3b71c849749dbaec4ccdd3cbab62b')), ('//us/dfw1/a/c', (None, 'http://dfw1.host/v1/a/c', 'US', '9ff3b71c849749dbaec4ccdd3cbab62b')), ('//', (None, None, None, None)), ('', (None, None, None, None))): if goodurl.startswith('//') and not realms_conf: self.assertEqual( utils.validate_sync_to( goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf), (None, None, None, None)) else: self.assertEqual( utils.validate_sync_to( goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf), result) for badurl, result in ( ('http://1.1.1.1', ('Path required in X-Container-Sync-To', None, None, None)), ('httpq://1.1.1.1/v1/a/c', ('Invalid scheme \'httpq\' in X-Container-Sync-To, ' 'must be "//", "http", or "https".', None, None, None)), ('http://1.1.1.1/v1/a/c?query', ('Params, queries, and fragments not allowed in ' 'X-Container-Sync-To', None, None, None)), ('http://1.1.1.1/v1/a/c#frag', ('Params, queries, and fragments not allowed in ' 'X-Container-Sync-To', None, None, None)), ('http://1.1.1.1/v1/a/c?query#frag', ('Params, queries, and fragments not allowed in ' 'X-Container-Sync-To', None, None, None)), ('http://1.1.1.1/v1/a/c?query=param', ('Params, queries, and fragments not allowed in ' 'X-Container-Sync-To', None, None, None)), ('http://1.1.1.1/v1/a/c?query=param#frag', ('Params, queries, and fragments not allowed in ' 'X-Container-Sync-To', None, None, None)), ('http://1.1.1.2/v1/a/c', ("Invalid host '1.1.1.2' in X-Container-Sync-To", None, None, None)), ('//us/invalid/a/c', ("No cluster endpoint for 'us' 'invalid'", None, None, None)), ('//invalid/dfw1/a/c', ("No realm key for 'invalid'", None, None, None)), ('//us/invalid1/a/', ("Invalid X-Container-Sync-To format " "'//us/invalid1/a/'", None, None, None)), ('//us/invalid1/a', ("Invalid X-Container-Sync-To format " "'//us/invalid1/a'", None, None, None)), ('//us/invalid1/', ("Invalid X-Container-Sync-To format " "'//us/invalid1/'", None, None, None)), ('//us/invalid1', ("Invalid X-Container-Sync-To format " "'//us/invalid1'", None, None, None)), ('//us/', ("Invalid X-Container-Sync-To format " "'//us/'", None, None, None)), ('//us', ("Invalid X-Container-Sync-To format " "'//us'", None, None, None))): if badurl.startswith('//') and not realms_conf: self.assertEqual( utils.validate_sync_to( badurl, ['1.1.1.1', '2.2.2.2'], realms_conf), (None, None, None, None)) else: self.assertEqual( utils.validate_sync_to( badurl, ['1.1.1.1', '2.2.2.2'], realms_conf), result) def test_TRUE_VALUES(self): for v in utils.TRUE_VALUES: self.assertEqual(v, v.lower()) def test_config_true_value(self): orig_trues = utils.TRUE_VALUES try: utils.TRUE_VALUES = 'hello world'.split() for val in 'hello world HELLO WORLD'.split(): self.assertTrue(utils.config_true_value(val) is True) self.assertTrue(utils.config_true_value(True) is True) self.assertTrue(utils.config_true_value('foo') is False) self.assertTrue(utils.config_true_value(False) is False) finally: utils.TRUE_VALUES = orig_trues def test_config_positive_int_value(self): expectations = { # value : expected, u'1': 1, b'1': 1, 1: 1, u'2': 2, b'2': 2, u'1024': 1024, b'1024': 1024, u'0': ValueError, b'0': ValueError, u'-1': ValueError, b'-1': ValueError, u'0x01': ValueError, b'0x01': ValueError, u'asdf': ValueError, b'asdf': ValueError, None: ValueError, 0: ValueError, -1: ValueError, u'1.2': ValueError, # string expresses float should be value error b'1.2': ValueError, # string expresses float should be value error } for value, expected in expectations.items(): try: rv = utils.config_positive_int_value(value) except Exception as e: if e.__class__ is not expected: raise else: self.assertEqual( 'Config option must be an positive int number, ' 'not "%s".' % value, e.args[0]) else: self.assertEqual(expected, rv) def test_config_auto_int_value(self): expectations = { # (value, default) : expected, ('1', 0): 1, (1, 0): 1, ('asdf', 0): ValueError, ('auto', 1): 1, ('AutO', 1): 1, ('Aut0', 1): ValueError, (None, 1): 1, } for (value, default), expected in expectations.items(): try: rv = utils.config_auto_int_value(value, default) except Exception as e: if e.__class__ is not expected: raise else: self.assertEqual(expected, rv) def test_streq_const_time(self): self.assertTrue(utils.streq_const_time('abc123', 'abc123')) self.assertFalse(utils.streq_const_time('a', 'aaaaa')) self.assertFalse(utils.streq_const_time('ABC123', 'abc123')) def test_quorum_size(self): expected_sizes = {1: 1, 2: 1, 3: 2, 4: 2, 5: 3} got_sizes = dict([(n, utils.quorum_size(n)) for n in expected_sizes]) self.assertEqual(expected_sizes, got_sizes) def test_majority_size(self): expected_sizes = {1: 1, 2: 2, 3: 2, 4: 3, 5: 3} got_sizes = dict([(n, utils.majority_size(n)) for n in expected_sizes]) self.assertEqual(expected_sizes, got_sizes) def test_rsync_ip_ipv4_localhost(self): self.assertEqual(utils.rsync_ip('127.0.0.1'), '127.0.0.1') def test_rsync_ip_ipv6_random_ip(self): self.assertEqual( utils.rsync_ip('fe80:0000:0000:0000:0202:b3ff:fe1e:8329'), '[fe80:0000:0000:0000:0202:b3ff:fe1e:8329]') def test_rsync_ip_ipv6_ipv4_compatible(self): self.assertEqual( utils.rsync_ip('::ffff:192.0.2.128'), '[::ffff:192.0.2.128]') def test_rsync_module_interpolation(self): fake_device = {'ip': '127.0.0.1', 'port': 11, 'replication_ip': '127.0.0.2', 'replication_port': 12, 'region': '1', 'zone': '2', 'device': 'sda1', 'meta': 'just_a_string'} self.assertEqual( utils.rsync_module_interpolation('{ip}', fake_device), '127.0.0.1') self.assertEqual( utils.rsync_module_interpolation('{port}', fake_device), '11') self.assertEqual( utils.rsync_module_interpolation('{replication_ip}', fake_device), '127.0.0.2') self.assertEqual( utils.rsync_module_interpolation('{replication_port}', fake_device), '12') self.assertEqual( utils.rsync_module_interpolation('{region}', fake_device), '1') self.assertEqual( utils.rsync_module_interpolation('{zone}', fake_device), '2') self.assertEqual( utils.rsync_module_interpolation('{device}', fake_device), 'sda1') self.assertEqual( utils.rsync_module_interpolation('{meta}', fake_device), 'just_a_string') self.assertEqual( utils.rsync_module_interpolation('{replication_ip}::object', fake_device), '127.0.0.2::object') self.assertEqual( utils.rsync_module_interpolation('{ip}::container{port}', fake_device), '127.0.0.1::container11') self.assertEqual( utils.rsync_module_interpolation( '{replication_ip}::object_{device}', fake_device), '127.0.0.2::object_sda1') self.assertEqual( utils.rsync_module_interpolation( '127.0.0.3::object_{replication_port}', fake_device), '127.0.0.3::object_12') self.assertRaises(ValueError, utils.rsync_module_interpolation, '{replication_ip}::object_{deivce}', fake_device) def test_fallocate_reserve(self): class StatVFS(object): f_frsize = 1024 f_bavail = 1 f_blocks = 100 def fstatvfs(fd): return StatVFS() orig_FALLOCATE_RESERVE = utils.FALLOCATE_RESERVE orig_fstatvfs = utils.os.fstatvfs try: fallocate = utils.FallocateWrapper(noop=True) utils.os.fstatvfs = fstatvfs # Make sure setting noop, which disables fallocate, also stops the # fallocate_reserve check. # Set the fallocate_reserve to 99% and request an object that is # about 50% the size. With fallocate_reserve off this will succeed. utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('99%') self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(500)), 0) # Setting noop to False after the constructor allows us to use # a noop fallocate syscall and still test fallocate_reserve. fallocate.noop = False # Want 1023 reserved, have 1024 * 1 free, so succeeds utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1023') StatVFS.f_frsize = 1024 StatVFS.f_bavail = 1 self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0) # Want 1023 reserved, have 512 * 2 free, so succeeds utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1023') StatVFS.f_frsize = 512 StatVFS.f_bavail = 2 self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0) # Want 1024 reserved, have 1024 * 1 free, so fails utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1024') StatVFS.f_frsize = 1024 StatVFS.f_bavail = 1 with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(0)) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024' % errno.ENOSPC) self.assertEqual(catcher.exception.errno, errno.ENOSPC) # Want 1024 reserved, have 512 * 2 free, so fails utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1024') StatVFS.f_frsize = 512 StatVFS.f_bavail = 2 with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(0)) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024' % errno.ENOSPC) self.assertEqual(catcher.exception.errno, errno.ENOSPC) # Want 2048 reserved, have 1024 * 1 free, so fails utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('2048') StatVFS.f_frsize = 1024 StatVFS.f_bavail = 1 with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(0)) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 1024 <= 2048' % errno.ENOSPC) self.assertEqual(catcher.exception.errno, errno.ENOSPC) # Want 2048 reserved, have 512 * 2 free, so fails utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('2048') StatVFS.f_frsize = 512 StatVFS.f_bavail = 2 with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(0)) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 1024 <= 2048' % errno.ENOSPC) self.assertEqual(catcher.exception.errno, errno.ENOSPC) # Want 1023 reserved, have 1024 * 1 free, but file size is 1, so # fails utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1023') StatVFS.f_frsize = 1024 StatVFS.f_bavail = 1 with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(1)) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 1023 <= 1023' % errno.ENOSPC) self.assertEqual(catcher.exception.errno, errno.ENOSPC) # Want 1022 reserved, have 1024 * 1 free, and file size is 1, so # succeeds utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1022') StatVFS.f_frsize = 1024 StatVFS.f_bavail = 1 self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(1)), 0) # Want 1% reserved, have 100 bytes * 2/100 free, and file size is # 99, so succeeds utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1%') StatVFS.f_frsize = 100 StatVFS.f_bavail = 2 StatVFS.f_blocks = 100 self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(99)), 0) # Want 2% reserved, have 50 bytes * 2/50 free, and file size is 49, # so succeeds utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('2%') StatVFS.f_frsize = 50 StatVFS.f_bavail = 2 StatVFS.f_blocks = 50 self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(49)), 0) # Want 100% reserved, have 100 * 100/100 free, and file size is 0, # so fails. utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('100%') StatVFS.f_frsize = 100 StatVFS.f_bavail = 100 StatVFS.f_blocks = 100 with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(0)) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 100 <= 100' % errno.ENOSPC) self.assertEqual(catcher.exception.errno, errno.ENOSPC) # Want 1% reserved, have 100 * 2/100 free, and file size is 101, # so fails. utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('1%') StatVFS.f_frsize = 100 StatVFS.f_bavail = 2 StatVFS.f_blocks = 100 with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(101)) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 0.99 <= 1' % errno.ENOSPC) self.assertEqual(catcher.exception.errno, errno.ENOSPC) # is 100, so fails utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('98%') StatVFS.f_frsize = 100 StatVFS.f_bavail = 99 StatVFS.f_blocks = 100 with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(100)) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 98 <= 98' % errno.ENOSPC) self.assertEqual(catcher.exception.errno, errno.ENOSPC) # Want 2% reserved, have 1000 bytes * 21/1000 free, and file size # is 999, so succeeds. utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('2%') StatVFS.f_frsize = 1000 StatVFS.f_bavail = 21 StatVFS.f_blocks = 1000 self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(999)), 0) # Want 2% resereved, have 1000 bytes * 21/1000 free, and file size # is 1000, so fails. utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value('2%') StatVFS.f_frsize = 1000 StatVFS.f_bavail = 21 StatVFS.f_blocks = 1000 with self.assertRaises(OSError) as catcher: fallocate(0, 1, 0, ctypes.c_uint64(1000)) self.assertEqual( str(catcher.exception), '[Errno %d] FALLOCATE_RESERVE fail 2 <= 2' % errno.ENOSPC) self.assertEqual(catcher.exception.errno, errno.ENOSPC) finally: utils.FALLOCATE_RESERVE = orig_FALLOCATE_RESERVE utils.os.fstatvfs = orig_fstatvfs def test_fallocate_func(self): class FallocateWrapper(object): def __init__(self): self.last_call = None def __call__(self, *args): self.last_call = list(args) self.last_call[-1] = self.last_call[-1].value return 0 with patch.object(utils, '_sys_fallocate', FallocateWrapper()): utils._sys_fallocate = FallocateWrapper() # Ensure fallocate calls _sys_fallocate even with 0 bytes utils._sys_fallocate.last_call = None utils.fallocate(1234, 0) self.assertEqual(utils._sys_fallocate.last_call, [1234, 1, 0, 0]) # Ensure fallocate calls _sys_fallocate even with negative bytes utils._sys_fallocate.last_call = None utils.fallocate(1234, -5678) self.assertEqual(utils._sys_fallocate.last_call, [1234, 1, 0, 0]) # Ensure fallocate calls _sys_fallocate properly with positive # bytes utils._sys_fallocate.last_call = None utils.fallocate(1234, 1) self.assertEqual(utils._sys_fallocate.last_call, [1234, 1, 0, 1]) utils._sys_fallocate.last_call = None utils.fallocate(1234, 10 * 1024 * 1024 * 1024) self.assertEqual(utils._sys_fallocate.last_call, [1234, 1, 0, 10 * 1024 * 1024 * 1024]) def test_generate_trans_id(self): fake_time = 1366428370.5163341 with patch.object(utils.time, 'time', return_value=fake_time): trans_id = utils.generate_trans_id('') self.assertEqual(len(trans_id), 34) self.assertEqual(trans_id[:2], 'tx') self.assertEqual(trans_id[23], '-') self.assertEqual(int(trans_id[24:], 16), int(fake_time)) with patch.object(utils.time, 'time', return_value=fake_time): trans_id = utils.generate_trans_id('-suffix') self.assertEqual(len(trans_id), 41) self.assertEqual(trans_id[:2], 'tx') self.assertEqual(trans_id[34:], '-suffix') self.assertEqual(trans_id[23], '-') self.assertEqual(int(trans_id[24:34], 16), int(fake_time)) def test_get_trans_id_time(self): ts = utils.get_trans_id_time('tx8c8bc884cdaf499bb29429aa9c46946e') self.assertIsNone(ts) ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-0051720c06') self.assertEqual(ts, 1366428678) self.assertEqual( time.asctime(time.gmtime(ts)) + ' UTC', 'Sat Apr 20 03:31:18 2013 UTC') ts = utils.get_trans_id_time( 'tx1df4ff4f55ea45f7b2ec2-0051720c06-suffix') self.assertEqual(ts, 1366428678) self.assertEqual( time.asctime(time.gmtime(ts)) + ' UTC', 'Sat Apr 20 03:31:18 2013 UTC') ts = utils.get_trans_id_time('') self.assertIsNone(ts) ts = utils.get_trans_id_time('garbage') self.assertIsNone(ts) ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-almostright') self.assertIsNone(ts) def test_config_fallocate_value(self): fallocate_value, is_percent = utils.config_fallocate_value('10%') self.assertEqual(fallocate_value, 10) self.assertTrue(is_percent) fallocate_value, is_percent = utils.config_fallocate_value('10') self.assertEqual(fallocate_value, 10) self.assertFalse(is_percent) try: fallocate_value, is_percent = utils.config_fallocate_value('ab%') except ValueError as err: exc = err self.assertEqual(str(exc), 'Error: ab% is an invalid value for ' 'fallocate_reserve.') try: fallocate_value, is_percent = utils.config_fallocate_value('ab') except ValueError as err: exc = err self.assertEqual(str(exc), 'Error: ab is an invalid value for ' 'fallocate_reserve.') try: fallocate_value, is_percent = utils.config_fallocate_value('1%%') except ValueError as err: exc = err self.assertEqual(str(exc), 'Error: 1%% is an invalid value for ' 'fallocate_reserve.') try: fallocate_value, is_percent = utils.config_fallocate_value('10.0') except ValueError as err: exc = err self.assertEqual(str(exc), 'Error: 10.0 is an invalid value for ' 'fallocate_reserve.') fallocate_value, is_percent = utils.config_fallocate_value('10.5%') self.assertEqual(fallocate_value, 10.5) self.assertTrue(is_percent) fallocate_value, is_percent = utils.config_fallocate_value('10.000%') self.assertEqual(fallocate_value, 10.000) self.assertTrue(is_percent) def test_tpool_reraise(self): with patch.object(utils.tpool, 'execute', lambda f: f()): self.assertTrue( utils.tpool_reraise(MagicMock(return_value='test1')), 'test1') self.assertRaises( Exception, utils.tpool_reraise, MagicMock(side_effect=Exception('test2'))) self.assertRaises( BaseException, utils.tpool_reraise, MagicMock(side_effect=BaseException('test3'))) def test_lock_file(self): flags = os.O_CREAT | os.O_RDWR with NamedTemporaryFile(delete=False) as nt: nt.write(b"test string") nt.flush() nt.close() with utils.lock_file(nt.name, unlink=False) as f: self.assertEqual(f.read(), b"test string") # we have a lock, now let's try to get a newer one fd = os.open(nt.name, flags) self.assertRaises(IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB) with utils.lock_file(nt.name, unlink=False, append=True) as f: f.seek(0) self.assertEqual(f.read(), b"test string") f.seek(0) f.write(b"\nanother string") f.flush() f.seek(0) self.assertEqual(f.read(), b"test string\nanother string") # we have a lock, now let's try to get a newer one fd = os.open(nt.name, flags) self.assertRaises(IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB) with utils.lock_file(nt.name, timeout=3, unlink=False) as f: try: with utils.lock_file( nt.name, timeout=1, unlink=False) as f: self.assertTrue( False, "Expected LockTimeout exception") except LockTimeout: pass with utils.lock_file(nt.name, unlink=True) as f: self.assertEqual(f.read(), b"test string\nanother string") # we have a lock, now let's try to get a newer one fd = os.open(nt.name, flags) self.assertRaises( IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB) self.assertRaises(OSError, os.remove, nt.name) def test_lock_file_unlinked_after_open(self): os_open = os.open first_pass = [True] def deleting_open(filename, flags): # unlink the file after it's opened. once. fd = os_open(filename, flags) if first_pass[0]: os.unlink(filename) first_pass[0] = False return fd with NamedTemporaryFile(delete=False) as nt: with mock.patch('os.open', deleting_open): with utils.lock_file(nt.name, unlink=True) as f: self.assertNotEqual(os.fstat(nt.fileno()).st_ino, os.fstat(f.fileno()).st_ino) first_pass = [True] def recreating_open(filename, flags): # unlink and recreate the file after it's opened fd = os_open(filename, flags) if first_pass[0]: os.unlink(filename) os.close(os_open(filename, os.O_CREAT | os.O_RDWR)) first_pass[0] = False return fd with NamedTemporaryFile(delete=False) as nt: with mock.patch('os.open', recreating_open): with utils.lock_file(nt.name, unlink=True) as f: self.assertNotEqual(os.fstat(nt.fileno()).st_ino, os.fstat(f.fileno()).st_ino) def test_lock_file_held_on_unlink(self): os_unlink = os.unlink def flocking_unlink(filename): # make sure the lock is held when we unlink fd = os.open(filename, os.O_RDWR) self.assertRaises( IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB) os.close(fd) os_unlink(filename) with NamedTemporaryFile(delete=False) as nt: with mock.patch('os.unlink', flocking_unlink): with utils.lock_file(nt.name, unlink=True): pass def test_lock_file_no_unlink_if_fail(self): os_open = os.open with NamedTemporaryFile(delete=True) as nt: def lock_on_open(filename, flags): # lock the file on another fd after it's opened. fd = os_open(filename, flags) fd2 = os_open(filename, flags) fcntl.flock(fd2, fcntl.LOCK_EX | fcntl.LOCK_NB) return fd try: timedout = False with mock.patch('os.open', lock_on_open): with utils.lock_file(nt.name, unlink=False, timeout=0.01): pass except LockTimeout: timedout = True self.assertTrue(timedout) self.assertTrue(os.path.exists(nt.name)) def test_ismount_path_does_not_exist(self): tmpdir = mkdtemp() try: self.assertFalse(utils.ismount(os.path.join(tmpdir, 'bar'))) finally: shutil.rmtree(tmpdir) def test_ismount_path_not_mount(self): tmpdir = mkdtemp() try: self.assertFalse(utils.ismount(tmpdir)) finally: shutil.rmtree(tmpdir) def test_ismount_path_error(self): def _mock_os_lstat(path): raise OSError(13, "foo") tmpdir = mkdtemp() try: with patch("os.lstat", _mock_os_lstat): # Raises exception with _raw -- see next test. utils.ismount(tmpdir) finally: shutil.rmtree(tmpdir) def test_ismount_raw_path_error(self): def _mock_os_lstat(path): raise OSError(13, "foo") tmpdir = mkdtemp() try: with patch("os.lstat", _mock_os_lstat): self.assertRaises(OSError, utils.ismount_raw, tmpdir) finally: shutil.rmtree(tmpdir) def test_ismount_path_is_symlink(self): tmpdir = mkdtemp() try: link = os.path.join(tmpdir, "tmp") os.symlink(tempfile.gettempdir(), link) self.assertFalse(utils.ismount(link)) finally: shutil.rmtree(tmpdir) def test_ismount_path_is_root(self): self.assertTrue(utils.ismount('/')) def test_ismount_parent_path_error(self): _os_lstat = os.lstat def _mock_os_lstat(path): if path.endswith(".."): raise OSError(13, "foo") else: return _os_lstat(path) tmpdir = mkdtemp() try: with patch("os.lstat", _mock_os_lstat): # Raises exception with _raw -- see next test. utils.ismount(tmpdir) finally: shutil.rmtree(tmpdir) def test_ismount_raw_parent_path_error(self): _os_lstat = os.lstat def _mock_os_lstat(path): if path.endswith(".."): raise OSError(13, "foo") else: return _os_lstat(path) tmpdir = mkdtemp() try: with patch("os.lstat", _mock_os_lstat): self.assertRaises(OSError, utils.ismount_raw, tmpdir) finally: shutil.rmtree(tmpdir) def test_ismount_successes_dev(self): _os_lstat = os.lstat class MockStat(object): def __init__(self, mode, dev, ino): self.st_mode = mode self.st_dev = dev self.st_ino = ino def _mock_os_lstat(path): if path.endswith(".."): parent = _os_lstat(path) return MockStat(parent.st_mode, parent.st_dev + 1, parent.st_ino) else: return _os_lstat(path) tmpdir = mkdtemp() try: with patch("os.lstat", _mock_os_lstat): self.assertTrue(utils.ismount(tmpdir)) finally: shutil.rmtree(tmpdir) def test_ismount_successes_ino(self): _os_lstat = os.lstat class MockStat(object): def __init__(self, mode, dev, ino): self.st_mode = mode self.st_dev = dev self.st_ino = ino def _mock_os_lstat(path): if path.endswith(".."): return _os_lstat(path) else: parent_path = os.path.join(path, "..") child = _os_lstat(path) parent = _os_lstat(parent_path) return MockStat(child.st_mode, parent.st_ino, child.st_dev) tmpdir = mkdtemp() try: with patch("os.lstat", _mock_os_lstat): self.assertTrue(utils.ismount(tmpdir)) finally: shutil.rmtree(tmpdir) def test_ismount_successes_stubfile(self): tmpdir = mkdtemp() fname = os.path.join(tmpdir, ".ismount") try: with open(fname, "w") as stubfile: stubfile.write("") self.assertTrue(utils.ismount(tmpdir)) finally: shutil.rmtree(tmpdir) def test_parse_content_type(self): self.assertEqual(utils.parse_content_type('text/plain'), ('text/plain', [])) self.assertEqual(utils.parse_content_type('text/plain;charset=utf-8'), ('text/plain', [('charset', 'utf-8')])) self.assertEqual( utils.parse_content_type('text/plain;hello="world";charset=utf-8'), ('text/plain', [('hello', '"world"'), ('charset', 'utf-8')])) self.assertEqual( utils.parse_content_type('text/plain; hello="world"; a=b'), ('text/plain', [('hello', '"world"'), ('a', 'b')])) self.assertEqual( utils.parse_content_type(r'text/plain; x="\""; a=b'), ('text/plain', [('x', r'"\""'), ('a', 'b')])) self.assertEqual( utils.parse_content_type(r'text/plain; x; a=b'), ('text/plain', [('x', ''), ('a', 'b')])) self.assertEqual( utils.parse_content_type(r'text/plain; x="\""; a'), ('text/plain', [('x', r'"\""'), ('a', '')])) def test_override_bytes_from_content_type(self): listing_dict = { 'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv', 'content_type': 'text/plain; hello="world"; swift_bytes=15'} utils.override_bytes_from_content_type(listing_dict, logger=FakeLogger()) self.assertEqual(listing_dict['bytes'], 15) self.assertEqual(listing_dict['content_type'], 'text/plain;hello="world"') listing_dict = { 'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv', 'content_type': 'text/plain; hello="world"; swift_bytes=hey'} utils.override_bytes_from_content_type(listing_dict, logger=FakeLogger()) self.assertEqual(listing_dict['bytes'], 1234) self.assertEqual(listing_dict['content_type'], 'text/plain;hello="world"') def test_extract_swift_bytes(self): scenarios = { # maps input value -> expected returned tuple '': ('', None), 'text/plain': ('text/plain', None), 'text/plain; other=thing': ('text/plain;other=thing', None), 'text/plain; swift_bytes=123': ('text/plain', '123'), 'text/plain; other=thing;swift_bytes=123': ('text/plain;other=thing', '123'), 'text/plain; swift_bytes=123; other=thing': ('text/plain;other=thing', '123'), 'text/plain; swift_bytes=123; swift_bytes=456': ('text/plain', '456'), 'text/plain; swift_bytes=123; other=thing;swift_bytes=456': ('text/plain;other=thing', '456')} for test_value, expected in scenarios.items(): self.assertEqual(expected, utils.extract_swift_bytes(test_value)) def test_clean_content_type(self): subtests = { '': '', 'text/plain': 'text/plain', 'text/plain; someother=thing': 'text/plain; someother=thing', 'text/plain; swift_bytes=123': 'text/plain', 'text/plain; someother=thing; swift_bytes=123': 'text/plain; someother=thing', # Since Swift always tacks on the swift_bytes, clean_content_type() # only strips swift_bytes if it's last. The next item simply shows # that if for some other odd reason it's not last, # clean_content_type() will not remove it from the header. 'text/plain; swift_bytes=123; someother=thing': 'text/plain; swift_bytes=123; someother=thing'} for before, after in subtests.items(): self.assertEqual(utils.clean_content_type(before), after) def test_get_valid_utf8_str(self): def do_test(input_value, expected): actual = utils.get_valid_utf8_str(input_value) self.assertEqual(expected, actual) self.assertIsInstance(actual, six.binary_type) actual.decode('utf-8') do_test(b'abc', b'abc') do_test(u'abc', b'abc') do_test(u'\uc77c\uc601', b'\xec\x9d\xbc\xec\x98\x81') do_test(b'\xec\x9d\xbc\xec\x98\x81', b'\xec\x9d\xbc\xec\x98\x81') # test some invalid UTF-8 do_test(b'\xec\x9d\xbc\xec\x98', b'\xec\x9d\xbc\xef\xbf\xbd') # check surrogate pairs, too do_test(u'\U0001f0a1', b'\xf0\x9f\x82\xa1'), do_test(u'\uD83C\uDCA1', b'\xf0\x9f\x82\xa1'), do_test(b'\xf0\x9f\x82\xa1', b'\xf0\x9f\x82\xa1'), do_test(b'\xed\xa0\xbc\xed\xb2\xa1', b'\xf0\x9f\x82\xa1'), def test_quote_bytes(self): self.assertEqual(b'/v1/a/c3/subdirx/', utils.quote(b'/v1/a/c3/subdirx/')) self.assertEqual(b'/v1/a%26b/c3/subdirx/', utils.quote(b'/v1/a&b/c3/subdirx/')) self.assertEqual(b'%2Fv1%2Fa&b%2Fc3%2Fsubdirx%2F', utils.quote(b'/v1/a&b/c3/subdirx/', safe='&')) self.assertEqual(b'abc_%EC%9D%BC%EC%98%81', utils.quote(u'abc_\uc77c\uc601'.encode('utf8'))) # Invalid utf8 is parsed as latin1, then re-encoded as utf8?? self.assertEqual(b'%EF%BF%BD%EF%BF%BD%EC%BC%9D%EF%BF%BD', utils.quote(u'\uc77c\uc601'.encode('utf8')[::-1])) def test_quote_unicode(self): self.assertEqual(u'/v1/a/c3/subdirx/', utils.quote(u'/v1/a/c3/subdirx/')) self.assertEqual(u'/v1/a%26b/c3/subdirx/', utils.quote(u'/v1/a&b/c3/subdirx/')) self.assertEqual(u'%2Fv1%2Fa&b%2Fc3%2Fsubdirx%2F', utils.quote(u'/v1/a&b/c3/subdirx/', safe='&')) self.assertEqual(u'abc_%EC%9D%BC%EC%98%81', utils.quote(u'abc_\uc77c\uc601')) def test_get_hmac(self): self.assertEqual( utils.get_hmac('GET', '/path', 1, 'abc'), 'b17f6ff8da0e251737aa9e3ee69a881e3e092e2f') def test_parse_override_options(self): # When override_<thing> is passed in, it takes precedence. opts = utils.parse_override_options( override_policies=[0, 1], override_devices=['sda', 'sdb'], override_partitions=[100, 200], policies='0,1,2,3', devices='sda,sdb,sdc,sdd', partitions='100,200,300,400') self.assertEqual(opts.policies, [0, 1]) self.assertEqual(opts.devices, ['sda', 'sdb']) self.assertEqual(opts.partitions, [100, 200]) # When override_<thing> is passed in, it applies even in run-once # mode. opts = utils.parse_override_options( once=True, override_policies=[0, 1], override_devices=['sda', 'sdb'], override_partitions=[100, 200], policies='0,1,2,3', devices='sda,sdb,sdc,sdd', partitions='100,200,300,400') self.assertEqual(opts.policies, [0, 1]) self.assertEqual(opts.devices, ['sda', 'sdb']) self.assertEqual(opts.partitions, [100, 200]) # In run-once mode, we honor the passed-in overrides. opts = utils.parse_override_options( once=True, policies='0,1,2,3', devices='sda,sdb,sdc,sdd', partitions='100,200,300,400') self.assertEqual(opts.policies, [0, 1, 2, 3]) self.assertEqual(opts.devices, ['sda', 'sdb', 'sdc', 'sdd']) self.assertEqual(opts.partitions, [100, 200, 300, 400]) # In run-forever mode, we ignore the passed-in overrides. opts = utils.parse_override_options( policies='0,1,2,3', devices='sda,sdb,sdc,sdd', partitions='100,200,300,400') self.assertEqual(opts.policies, []) self.assertEqual(opts.devices, []) self.assertEqual(opts.partitions, []) def test_get_policy_index(self): # Account has no information about a policy req = Request.blank( '/sda1/p/a', environ={'REQUEST_METHOD': 'GET'}) res = Response() self.assertIsNone(utils.get_policy_index(req.headers, res.headers)) # The policy of a container can be specified by the response header req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}) res = Response(headers={'X-Backend-Storage-Policy-Index': '1'}) self.assertEqual('1', utils.get_policy_index(req.headers, res.headers)) # The policy of an object to be created can be specified by the request # header req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Backend-Storage-Policy-Index': '2'}) res = Response() self.assertEqual('2', utils.get_policy_index(req.headers, res.headers)) def test_get_log_line(self): req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'}) res = Response() trans_time = 1.2 additional_info = 'some information' server_pid = 1234 exp_line = '1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD ' \ '/sda1/p/a/c/o" 200 - "-" "-" "-" 1.2000 "some information" 1234 -' with mock.patch( 'time.gmtime', mock.MagicMock(side_effect=[time.gmtime(10001.0)])): with mock.patch( 'os.getpid', mock.MagicMock(return_value=server_pid)): self.assertEqual( exp_line, utils.get_log_line(req, res, trans_time, additional_info)) def test_cache_from_env(self): # should never get logging when swift.cache is found env = {'swift.cache': 42} logger = FakeLogger() with mock.patch('swift.common.utils.logging', logger): self.assertEqual(42, utils.cache_from_env(env)) self.assertEqual(0, len(logger.get_lines_for_level('error'))) logger = FakeLogger() with mock.patch('swift.common.utils.logging', logger): self.assertEqual(42, utils.cache_from_env(env, False)) self.assertEqual(0, len(logger.get_lines_for_level('error'))) logger = FakeLogger() with mock.patch('swift.common.utils.logging', logger): self.assertEqual(42, utils.cache_from_env(env, True)) self.assertEqual(0, len(logger.get_lines_for_level('error'))) # check allow_none controls logging when swift.cache is not found err_msg = 'ERROR: swift.cache could not be found in env!' env = {} logger = FakeLogger() with mock.patch('swift.common.utils.logging', logger): self.assertIsNone(utils.cache_from_env(env)) self.assertTrue(err_msg in logger.get_lines_for_level('error')) logger = FakeLogger() with mock.patch('swift.common.utils.logging', logger): self.assertIsNone(utils.cache_from_env(env, False)) self.assertTrue(err_msg in logger.get_lines_for_level('error')) logger = FakeLogger() with mock.patch('swift.common.utils.logging', logger): self.assertIsNone(utils.cache_from_env(env, True)) self.assertEqual(0, len(logger.get_lines_for_level('error'))) def test_fsync_dir(self): tempdir = None fd = None try: tempdir = mkdtemp() fd, temppath = tempfile.mkstemp(dir=tempdir) _mock_fsync = mock.Mock() _mock_close = mock.Mock() with patch('swift.common.utils.fsync', _mock_fsync): with patch('os.close', _mock_close): utils.fsync_dir(tempdir) self.assertTrue(_mock_fsync.called) self.assertTrue(_mock_close.called) self.assertTrue(isinstance(_mock_fsync.call_args[0][0], int)) self.assertEqual(_mock_fsync.call_args[0][0], _mock_close.call_args[0][0]) # Not a directory - arg is file path self.assertRaises(OSError, utils.fsync_dir, temppath) logger = FakeLogger() def _mock_fsync(fd): raise OSError(errno.EBADF, os.strerror(errno.EBADF)) with patch('swift.common.utils.fsync', _mock_fsync): with mock.patch('swift.common.utils.logging', logger): utils.fsync_dir(tempdir) self.assertEqual(1, len(logger.get_lines_for_level('warning'))) finally: if fd is not None: os.close(fd) os.unlink(temppath) if tempdir: os.rmdir(tempdir) def test_renamer_with_fsync_dir(self): tempdir = None try: tempdir = mkdtemp() # Simulate part of object path already existing part_dir = os.path.join(tempdir, 'objects/1234/') os.makedirs(part_dir) obj_dir = os.path.join(part_dir, 'aaa', 'a' * 32) obj_path = os.path.join(obj_dir, '1425276031.12345.data') # Object dir had to be created _m_os_rename = mock.Mock() _m_fsync_dir = mock.Mock() with patch('os.rename', _m_os_rename): with patch('swift.common.utils.fsync_dir', _m_fsync_dir): utils.renamer("fake_path", obj_path) _m_os_rename.assert_called_once_with('fake_path', obj_path) # fsync_dir on parents of all newly create dirs self.assertEqual(_m_fsync_dir.call_count, 3) # Object dir existed _m_os_rename.reset_mock() _m_fsync_dir.reset_mock() with patch('os.rename', _m_os_rename): with patch('swift.common.utils.fsync_dir', _m_fsync_dir): utils.renamer("fake_path", obj_path) _m_os_rename.assert_called_once_with('fake_path', obj_path) # fsync_dir only on the leaf dir self.assertEqual(_m_fsync_dir.call_count, 1) finally: if tempdir: shutil.rmtree(tempdir) def test_renamer_when_fsync_is_false(self): _m_os_rename = mock.Mock() _m_fsync_dir = mock.Mock() _m_makedirs_count = mock.Mock(return_value=2) with patch('os.rename', _m_os_rename): with patch('swift.common.utils.fsync_dir', _m_fsync_dir): with patch('swift.common.utils.makedirs_count', _m_makedirs_count): utils.renamer("fake_path", "/a/b/c.data", fsync=False) _m_makedirs_count.assert_called_once_with("/a/b") _m_os_rename.assert_called_once_with('fake_path', "/a/b/c.data") self.assertFalse(_m_fsync_dir.called) def test_makedirs_count(self): tempdir = None fd = None try: tempdir = mkdtemp() os.makedirs(os.path.join(tempdir, 'a/b')) # 4 new dirs created dirpath = os.path.join(tempdir, 'a/b/1/2/3/4') ret = utils.makedirs_count(dirpath) self.assertEqual(ret, 4) # no new dirs created - dir already exists ret = utils.makedirs_count(dirpath) self.assertEqual(ret, 0) # path exists and is a file fd, temppath = tempfile.mkstemp(dir=dirpath) os.close(fd) self.assertRaises(OSError, utils.makedirs_count, temppath) finally: if tempdir: shutil.rmtree(tempdir) def test_modify_priority(self): pid = os.getpid() logger = debug_logger() called = {} def _fake_setpriority(*args): called['setpriority'] = args def _fake_syscall(*args): called['syscall'] = args # Test if current architecture supports changing of priority try: utils.NR_ioprio_set() except OSError as e: raise unittest.SkipTest(e) with patch('swift.common.utils._libc_setpriority', _fake_setpriority), \ patch('swift.common.utils._posix_syscall', _fake_syscall): called = {} # not set / default utils.modify_priority({}, logger) self.assertEqual(called, {}) called = {} # just nice utils.modify_priority({'nice_priority': '1'}, logger) self.assertEqual(called, {'setpriority': (0, pid, 1)}) called = {} # just ionice class uses default priority 0 utils.modify_priority({'ionice_class': 'IOPRIO_CLASS_RT'}, logger) architecture = os.uname()[4] arch_bits = platform.architecture()[0] if architecture == 'x86_64' and arch_bits == '64bit': self.assertEqual(called, {'syscall': (251, 1, pid, 1 << 13)}) elif architecture == 'aarch64' and arch_bits == '64bit': self.assertEqual(called, {'syscall': (30, 1, pid, 1 << 13)}) else: self.fail("Unexpected call: %r" % called) called = {} # just ionice priority is ignored utils.modify_priority({'ionice_priority': '4'}, logger) self.assertEqual(called, {}) called = {} # bad ionice class utils.modify_priority({'ionice_class': 'class_foo'}, logger) self.assertEqual(called, {}) called = {} # ionice class & priority utils.modify_priority({ 'ionice_class': 'IOPRIO_CLASS_BE', 'ionice_priority': '4', }, logger) if architecture == 'x86_64' and arch_bits == '64bit': self.assertEqual(called, { 'syscall': (251, 1, pid, 2 << 13 | 4) }) elif architecture == 'aarch64' and arch_bits == '64bit': self.assertEqual(called, { 'syscall': (30, 1, pid, 2 << 13 | 4) }) else: self.fail("Unexpected call: %r" % called) called = {} # all utils.modify_priority({ 'nice_priority': '-15', 'ionice_class': 'IOPRIO_CLASS_IDLE', 'ionice_priority': '6', }, logger) if architecture == 'x86_64' and arch_bits == '64bit': self.assertEqual(called, { 'setpriority': (0, pid, -15), 'syscall': (251, 1, pid, 3 << 13 | 6), }) elif architecture == 'aarch64' and arch_bits == '64bit': self.assertEqual(called, { 'setpriority': (0, pid, -15), 'syscall': (30, 1, pid, 3 << 13 | 6), }) else: self.fail("Unexpected call: %r" % called) def test__NR_ioprio_set(self): with patch('os.uname', return_value=('', '', '', '', 'x86_64')), \ patch('platform.architecture', return_value=('64bit', '')): self.assertEqual(251, utils.NR_ioprio_set()) with patch('os.uname', return_value=('', '', '', '', 'x86_64')), \ patch('platform.architecture', return_value=('32bit', '')): self.assertRaises(OSError, utils.NR_ioprio_set) with patch('os.uname', return_value=('', '', '', '', 'aarch64')), \ patch('platform.architecture', return_value=('64bit', '')): self.assertEqual(30, utils.NR_ioprio_set()) with patch('os.uname', return_value=('', '', '', '', 'aarch64')), \ patch('platform.architecture', return_value=('32bit', '')): self.assertRaises(OSError, utils.NR_ioprio_set) with patch('os.uname', return_value=('', '', '', '', 'alpha')), \ patch('platform.architecture', return_value=('64bit', '')): self.assertRaises(OSError, utils.NR_ioprio_set) @requires_o_tmpfile_support_in_tmp def test_link_fd_to_path_linkat_success(self): tempdir = mkdtemp() fd = os.open(tempdir, utils.O_TMPFILE | os.O_WRONLY) data = b"I'm whatever Gotham needs me to be" _m_fsync_dir = mock.Mock() try: os.write(fd, data) # fd is O_WRONLY self.assertRaises(OSError, os.read, fd, 1) file_path = os.path.join(tempdir, uuid4().hex) with mock.patch('swift.common.utils.fsync_dir', _m_fsync_dir): utils.link_fd_to_path(fd, file_path, 1) with open(file_path, 'rb') as f: self.assertEqual(f.read(), data) self.assertEqual(_m_fsync_dir.call_count, 2) finally: os.close(fd) shutil.rmtree(tempdir) @requires_o_tmpfile_support_in_tmp def test_link_fd_to_path_target_exists(self): tempdir = mkdtemp() # Create and write to a file fd, path = tempfile.mkstemp(dir=tempdir) os.write(fd, b"hello world") os.fsync(fd) os.close(fd) self.assertTrue(os.path.exists(path)) fd = os.open(tempdir, utils.O_TMPFILE | os.O_WRONLY) try: os.write(fd, b"bye world") os.fsync(fd) utils.link_fd_to_path(fd, path, 0, fsync=False) # Original file now should have been over-written with open(path, 'rb') as f: self.assertEqual(f.read(), b"bye world") finally: os.close(fd) shutil.rmtree(tempdir) @requires_o_tmpfile_support def test_link_fd_to_path_errno_not_EEXIST_or_ENOENT(self): _m_linkat = mock.Mock( side_effect=IOError(errno.EACCES, os.strerror(errno.EACCES))) with mock.patch('swift.common.utils.linkat', _m_linkat): try: utils.link_fd_to_path(0, '/path', 1) except IOError as err: self.assertEqual(err.errno, errno.EACCES) else: self.fail("Expecting IOError exception") self.assertTrue(_m_linkat.called) @requires_o_tmpfile_support_in_tmp def test_linkat_race_dir_not_exists(self): tempdir = mkdtemp() target_dir = os.path.join(tempdir, uuid4().hex) target_path = os.path.join(target_dir, uuid4().hex) os.mkdir(target_dir) fd = os.open(target_dir, utils.O_TMPFILE | os.O_WRONLY) # Simulating directory deletion by other backend process os.rmdir(target_dir) self.assertFalse(os.path.exists(target_dir)) try: utils.link_fd_to_path(fd, target_path, 1) self.assertTrue(os.path.exists(target_dir)) self.assertTrue(os.path.exists(target_path)) finally: os.close(fd) shutil.rmtree(tempdir) def test_safe_json_loads(self): expectations = { None: None, '': None, 0: None, 1: None, '"asdf"': 'asdf', '[]': [], '{}': {}, "{'foo': 'bar'}": None, '{"foo": "bar"}': {'foo': 'bar'}, } failures = [] for value, expected in expectations.items(): try: result = utils.safe_json_loads(value) except Exception as e: # it's called safe, if it blows up the test blows up self.fail('%r caused safe method to throw %r!' % ( value, e)) try: self.assertEqual(expected, result) except AssertionError: failures.append('%r => %r (expected %r)' % ( value, result, expected)) if failures: self.fail('Invalid results from pure function:\n%s' % '\n'.join(failures)) def test_strict_b64decode(self): expectations = { None: ValueError, 0: ValueError, b'': b'', u'': b'', b'A': ValueError, b'AA': ValueError, b'AAA': ValueError, b'AAAA': b'\x00\x00\x00', u'AAAA': b'\x00\x00\x00', b'////': b'\xff\xff\xff', u'////': b'\xff\xff\xff', b'A===': ValueError, b'AA==': b'\x00', b'AAA=': b'\x00\x00', b' AAAA': ValueError, b'AAAA ': ValueError, b'AAAA============': b'\x00\x00\x00', b'AA&AA==': ValueError, b'====': b'', } failures = [] for value, expected in expectations.items(): try: result = utils.strict_b64decode(value) except Exception as e: if inspect.isclass(expected) and issubclass( expected, Exception): if not isinstance(e, expected): failures.append('%r raised %r (expected to raise %r)' % (value, e, expected)) else: failures.append('%r raised %r (expected to return %r)' % (value, e, expected)) else: if inspect.isclass(expected) and issubclass( expected, Exception): failures.append('%r => %r (expected to raise %r)' % (value, result, expected)) elif result != expected: failures.append('%r => %r (expected %r)' % ( value, result, expected)) if failures: self.fail('Invalid results from pure function:\n%s' % '\n'.join(failures)) def test_replace_partition_in_path(self): # Check for new part = part * 2 old = '/s/n/d/o/700/c77/af088baea4806dcaba30bf07d9e64c77/f' new = '/s/n/d/o/1400/c77/af088baea4806dcaba30bf07d9e64c77/f' # Expected outcome self.assertEqual(utils.replace_partition_in_path(old, 11), new) # Make sure there is no change if the part power didn't change self.assertEqual(utils.replace_partition_in_path(old, 10), old) self.assertEqual(utils.replace_partition_in_path(new, 11), new) # Check for new part = part * 2 + 1 old = '/s/n/d/o/693/c77/ad708baea4806dcaba30bf07d9e64c77/f' new = '/s/n/d/o/1387/c77/ad708baea4806dcaba30bf07d9e64c77/f' # Expected outcome self.assertEqual(utils.replace_partition_in_path(old, 11), new) # Make sure there is no change if the part power didn't change self.assertEqual(utils.replace_partition_in_path(old, 10), old) self.assertEqual(utils.replace_partition_in_path(new, 11), new) def test_round_robin_iter(self): it1 = iter([1, 2, 3]) it2 = iter([4, 5]) it3 = iter([6, 7, 8, 9]) it4 = iter([]) rr_its = utils.round_robin_iter([it1, it2, it3, it4]) got = list(rr_its) # Expect that items get fetched in a round-robin fashion from the # iterators self.assertListEqual([1, 4, 6, 2, 5, 7, 3, 8, 9], got) class ResellerConfReader(unittest.TestCase): def setUp(self): self.default_rules = {'operator_roles': ['admin', 'swiftoperator'], 'service_roles': [], 'require_group': ''} def test_defaults(self): conf = {} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['AUTH_']) self.assertEqual(options['AUTH_'], self.default_rules) def test_same_as_default(self): conf = {'reseller_prefix': 'AUTH', 'operator_roles': 'admin, swiftoperator'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['AUTH_']) self.assertEqual(options['AUTH_'], self.default_rules) def test_single_blank_reseller(self): conf = {'reseller_prefix': ''} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['']) self.assertEqual(options[''], self.default_rules) def test_single_blank_reseller_with_conf(self): conf = {'reseller_prefix': '', "''operator_roles": 'role1, role2'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['']) self.assertEqual(options[''].get('operator_roles'), ['role1', 'role2']) self.assertEqual(options[''].get('service_roles'), self.default_rules.get('service_roles')) self.assertEqual(options[''].get('require_group'), self.default_rules.get('require_group')) def test_multiple_same_resellers(self): conf = {'reseller_prefix': " '' , '' "} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['']) conf = {'reseller_prefix': '_, _'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['_']) conf = {'reseller_prefix': 'AUTH, PRE2, AUTH, PRE2'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['AUTH_', 'PRE2_']) def test_several_resellers_with_conf(self): conf = {'reseller_prefix': 'PRE1, PRE2', 'PRE1_operator_roles': 'role1, role2', 'PRE1_service_roles': 'role3, role4', 'PRE2_operator_roles': 'role5', 'PRE2_service_roles': 'role6', 'PRE2_require_group': 'pre2_group'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['PRE1_', 'PRE2_']) self.assertEqual(set(['role1', 'role2']), set(options['PRE1_'].get('operator_roles'))) self.assertEqual(['role5'], options['PRE2_'].get('operator_roles')) self.assertEqual(set(['role3', 'role4']), set(options['PRE1_'].get('service_roles'))) self.assertEqual(['role6'], options['PRE2_'].get('service_roles')) self.assertEqual('', options['PRE1_'].get('require_group')) self.assertEqual('pre2_group', options['PRE2_'].get('require_group')) def test_several_resellers_first_blank(self): conf = {'reseller_prefix': " '' , PRE2", "''operator_roles": 'role1, role2', "''service_roles": 'role3, role4', 'PRE2_operator_roles': 'role5', 'PRE2_service_roles': 'role6', 'PRE2_require_group': 'pre2_group'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['', 'PRE2_']) self.assertEqual(set(['role1', 'role2']), set(options[''].get('operator_roles'))) self.assertEqual(['role5'], options['PRE2_'].get('operator_roles')) self.assertEqual(set(['role3', 'role4']), set(options[''].get('service_roles'))) self.assertEqual(['role6'], options['PRE2_'].get('service_roles')) self.assertEqual('', options[''].get('require_group')) self.assertEqual('pre2_group', options['PRE2_'].get('require_group')) def test_several_resellers_with_blank_comma(self): conf = {'reseller_prefix': "AUTH , '', PRE2", "''operator_roles": 'role1, role2', "''service_roles": 'role3, role4', 'PRE2_operator_roles': 'role5', 'PRE2_service_roles': 'role6', 'PRE2_require_group': 'pre2_group'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_']) self.assertEqual(set(['admin', 'swiftoperator']), set(options['AUTH_'].get('operator_roles'))) self.assertEqual(set(['role1', 'role2']), set(options[''].get('operator_roles'))) self.assertEqual(['role5'], options['PRE2_'].get('operator_roles')) self.assertEqual([], options['AUTH_'].get('service_roles')) self.assertEqual(set(['role3', 'role4']), set(options[''].get('service_roles'))) self.assertEqual(['role6'], options['PRE2_'].get('service_roles')) self.assertEqual('', options['AUTH_'].get('require_group')) self.assertEqual('', options[''].get('require_group')) self.assertEqual('pre2_group', options['PRE2_'].get('require_group')) def test_stray_comma(self): conf = {'reseller_prefix': "AUTH ,, PRE2", "''operator_roles": 'role1, role2', "''service_roles": 'role3, role4', 'PRE2_operator_roles': 'role5', 'PRE2_service_roles': 'role6', 'PRE2_require_group': 'pre2_group'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['AUTH_', 'PRE2_']) self.assertEqual(set(['admin', 'swiftoperator']), set(options['AUTH_'].get('operator_roles'))) self.assertEqual(['role5'], options['PRE2_'].get('operator_roles')) self.assertEqual([], options['AUTH_'].get('service_roles')) self.assertEqual(['role6'], options['PRE2_'].get('service_roles')) self.assertEqual('', options['AUTH_'].get('require_group')) self.assertEqual('pre2_group', options['PRE2_'].get('require_group')) def test_multiple_stray_commas_resellers(self): conf = {'reseller_prefix': ' , , ,'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['']) self.assertEqual(options[''], self.default_rules) def test_unprefixed_options(self): conf = {'reseller_prefix': "AUTH , '', PRE2", "operator_roles": 'role1, role2', "service_roles": 'role3, role4', 'require_group': 'auth_blank_group', 'PRE2_operator_roles': 'role5', 'PRE2_service_roles': 'role6', 'PRE2_require_group': 'pre2_group'} prefixes, options = utils.config_read_reseller_options( conf, self.default_rules) self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_']) self.assertEqual(set(['role1', 'role2']), set(options['AUTH_'].get('operator_roles'))) self.assertEqual(set(['role1', 'role2']), set(options[''].get('operator_roles'))) self.assertEqual(['role5'], options['PRE2_'].get('operator_roles')) self.assertEqual(set(['role3', 'role4']), set(options['AUTH_'].get('service_roles'))) self.assertEqual(set(['role3', 'role4']), set(options[''].get('service_roles'))) self.assertEqual(['role6'], options['PRE2_'].get('service_roles')) self.assertEqual('auth_blank_group', options['AUTH_'].get('require_group')) self.assertEqual('auth_blank_group', options[''].get('require_group')) self.assertEqual('pre2_group', options['PRE2_'].get('require_group')) class TestUnlinkOlder(unittest.TestCase): def setUp(self): self.tempdir = mkdtemp() self.mtime = {} self.ts = make_timestamp_iter() def tearDown(self): rmtree(self.tempdir, ignore_errors=True) def touch(self, fpath, mtime=None): self.mtime[fpath] = mtime or next(self.ts) open(fpath, 'w') @contextlib.contextmanager def high_resolution_getmtime(self): orig_getmtime = os.path.getmtime def mock_getmtime(fpath): mtime = self.mtime.get(fpath) if mtime is None: mtime = orig_getmtime(fpath) return mtime with mock.patch('os.path.getmtime', mock_getmtime): yield def test_unlink_older_than_path_not_exists(self): path = os.path.join(self.tempdir, 'does-not-exist') # just make sure it doesn't blow up utils.unlink_older_than(path, next(self.ts)) def test_unlink_older_than_file(self): path = os.path.join(self.tempdir, 'some-file') self.touch(path) with self.assertRaises(OSError) as ctx: utils.unlink_older_than(path, next(self.ts)) self.assertEqual(ctx.exception.errno, errno.ENOTDIR) def test_unlink_older_than_now(self): self.touch(os.path.join(self.tempdir, 'test')) with self.high_resolution_getmtime(): utils.unlink_older_than(self.tempdir, next(self.ts)) self.assertEqual([], os.listdir(self.tempdir)) def test_unlink_not_old_enough(self): start = next(self.ts) self.touch(os.path.join(self.tempdir, 'test')) with self.high_resolution_getmtime(): utils.unlink_older_than(self.tempdir, start) self.assertEqual(['test'], os.listdir(self.tempdir)) def test_unlink_mixed(self): self.touch(os.path.join(self.tempdir, 'first')) cutoff = next(self.ts) self.touch(os.path.join(self.tempdir, 'second')) with self.high_resolution_getmtime(): utils.unlink_older_than(self.tempdir, cutoff) self.assertEqual(['second'], os.listdir(self.tempdir)) def test_unlink_paths(self): paths = [] for item in ('first', 'second', 'third'): path = os.path.join(self.tempdir, item) self.touch(path) paths.append(path) # don't unlink everyone with self.high_resolution_getmtime(): utils.unlink_paths_older_than(paths[:2], next(self.ts)) self.assertEqual(['third'], os.listdir(self.tempdir)) def test_unlink_empty_paths(self): # just make sure it doesn't blow up utils.unlink_paths_older_than([], next(self.ts)) def test_unlink_not_exists_paths(self): path = os.path.join(self.tempdir, 'does-not-exist') # just make sure it doesn't blow up utils.unlink_paths_older_than([path], next(self.ts)) class TestSwiftInfo(unittest.TestCase): def tearDown(self): utils._swift_info = {} utils._swift_admin_info = {} def test_register_swift_info(self): utils.register_swift_info(foo='bar') utils.register_swift_info(lorem='ipsum') utils.register_swift_info('cap1', cap1_foo='cap1_bar') utils.register_swift_info('cap1', cap1_lorem='cap1_ipsum') self.assertTrue('swift' in utils._swift_info) self.assertTrue('foo' in utils._swift_info['swift']) self.assertEqual(utils._swift_info['swift']['foo'], 'bar') self.assertTrue('lorem' in utils._swift_info['swift']) self.assertEqual(utils._swift_info['swift']['lorem'], 'ipsum') self.assertTrue('cap1' in utils._swift_info) self.assertTrue('cap1_foo' in utils._swift_info['cap1']) self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar') self.assertTrue('cap1_lorem' in utils._swift_info['cap1']) self.assertEqual(utils._swift_info['cap1']['cap1_lorem'], 'cap1_ipsum') self.assertRaises(ValueError, utils.register_swift_info, 'admin', foo='bar') self.assertRaises(ValueError, utils.register_swift_info, 'disallowed_sections', disallowed_sections=None) utils.register_swift_info('goodkey', foo='5.6') self.assertRaises(ValueError, utils.register_swift_info, 'bad.key', foo='5.6') data = {'bad.key': '5.6'} self.assertRaises(ValueError, utils.register_swift_info, 'goodkey', **data) def test_get_swift_info(self): utils._swift_info = {'swift': {'foo': 'bar'}, 'cap1': {'cap1_foo': 'cap1_bar'}} utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}} info = utils.get_swift_info() self.assertNotIn('admin', info) self.assertIn('swift', info) self.assertIn('foo', info['swift']) self.assertEqual(utils._swift_info['swift']['foo'], 'bar') self.assertIn('cap1', info) self.assertIn('cap1_foo', info['cap1']) self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar') def test_get_swift_info_with_disallowed_sections(self): utils._swift_info = {'swift': {'foo': 'bar'}, 'cap1': {'cap1_foo': 'cap1_bar'}, 'cap2': {'cap2_foo': 'cap2_bar'}, 'cap3': {'cap3_foo': 'cap3_bar'}} utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}} info = utils.get_swift_info(disallowed_sections=['cap1', 'cap3']) self.assertNotIn('admin', info) self.assertIn('swift', info) self.assertIn('foo', info['swift']) self.assertEqual(info['swift']['foo'], 'bar') self.assertNotIn('cap1', info) self.assertIn('cap2', info) self.assertIn('cap2_foo', info['cap2']) self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar') self.assertNotIn('cap3', info) def test_register_swift_admin_info(self): utils.register_swift_info(admin=True, admin_foo='admin_bar') utils.register_swift_info(admin=True, admin_lorem='admin_ipsum') utils.register_swift_info('cap1', admin=True, ac1_foo='ac1_bar') utils.register_swift_info('cap1', admin=True, ac1_lorem='ac1_ipsum') self.assertIn('swift', utils._swift_admin_info) self.assertIn('admin_foo', utils._swift_admin_info['swift']) self.assertEqual( utils._swift_admin_info['swift']['admin_foo'], 'admin_bar') self.assertIn('admin_lorem', utils._swift_admin_info['swift']) self.assertEqual( utils._swift_admin_info['swift']['admin_lorem'], 'admin_ipsum') self.assertIn('cap1', utils._swift_admin_info) self.assertIn('ac1_foo', utils._swift_admin_info['cap1']) self.assertEqual( utils._swift_admin_info['cap1']['ac1_foo'], 'ac1_bar') self.assertIn('ac1_lorem', utils._swift_admin_info['cap1']) self.assertEqual( utils._swift_admin_info['cap1']['ac1_lorem'], 'ac1_ipsum') self.assertNotIn('swift', utils._swift_info) self.assertNotIn('cap1', utils._swift_info) def test_get_swift_admin_info(self): utils._swift_info = {'swift': {'foo': 'bar'}, 'cap1': {'cap1_foo': 'cap1_bar'}} utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}} info = utils.get_swift_info(admin=True) self.assertIn('admin', info) self.assertIn('admin_cap1', info['admin']) self.assertIn('ac1_foo', info['admin']['admin_cap1']) self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar') self.assertIn('swift', info) self.assertIn('foo', info['swift']) self.assertEqual(utils._swift_info['swift']['foo'], 'bar') self.assertIn('cap1', info) self.assertIn('cap1_foo', info['cap1']) self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar') def test_get_swift_admin_info_with_disallowed_sections(self): utils._swift_info = {'swift': {'foo': 'bar'}, 'cap1': {'cap1_foo': 'cap1_bar'}, 'cap2': {'cap2_foo': 'cap2_bar'}, 'cap3': {'cap3_foo': 'cap3_bar'}} utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}} info = utils.get_swift_info( admin=True, disallowed_sections=['cap1', 'cap3']) self.assertIn('admin', info) self.assertIn('admin_cap1', info['admin']) self.assertIn('ac1_foo', info['admin']['admin_cap1']) self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar') self.assertIn('disallowed_sections', info['admin']) self.assertIn('cap1', info['admin']['disallowed_sections']) self.assertNotIn('cap2', info['admin']['disallowed_sections']) self.assertIn('cap3', info['admin']['disallowed_sections']) self.assertIn('swift', info) self.assertIn('foo', info['swift']) self.assertEqual(info['swift']['foo'], 'bar') self.assertNotIn('cap1', info) self.assertIn('cap2', info) self.assertIn('cap2_foo', info['cap2']) self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar') self.assertNotIn('cap3', info) def test_get_swift_admin_info_with_disallowed_sub_sections(self): utils._swift_info = {'swift': {'foo': 'bar'}, 'cap1': {'cap1_foo': 'cap1_bar', 'cap1_moo': 'cap1_baa'}, 'cap2': {'cap2_foo': 'cap2_bar'}, 'cap3': {'cap2_foo': 'cap2_bar'}, 'cap4': {'a': {'b': {'c': 'c'}, 'b.c': 'b.c'}}} utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}} info = utils.get_swift_info( admin=True, disallowed_sections=['cap1.cap1_foo', 'cap3', 'cap4.a.b.c']) self.assertNotIn('cap3', info) self.assertEqual(info['cap1']['cap1_moo'], 'cap1_baa') self.assertNotIn('cap1_foo', info['cap1']) self.assertNotIn('c', info['cap4']['a']['b']) self.assertEqual(info['cap4']['a']['b.c'], 'b.c') def test_get_swift_info_with_unmatched_disallowed_sections(self): cap1 = {'cap1_foo': 'cap1_bar', 'cap1_moo': 'cap1_baa'} utils._swift_info = {'swift': {'foo': 'bar'}, 'cap1': cap1} # expect no exceptions info = utils.get_swift_info( disallowed_sections=['cap2.cap1_foo', 'cap1.no_match', 'cap1.cap1_foo.no_match.no_match']) self.assertEqual(info['cap1'], cap1) class TestFileLikeIter(unittest.TestCase): def test_iter_file_iter(self): in_iter = [b'abc', b'de', b'fghijk', b'l'] chunks = [] for chunk in utils.FileLikeIter(in_iter): chunks.append(chunk) self.assertEqual(chunks, in_iter) def test_next(self): in_iter = [b'abc', b'de', b'fghijk', b'l'] chunks = [] iter_file = utils.FileLikeIter(in_iter) while True: try: chunk = next(iter_file) except StopIteration: break chunks.append(chunk) self.assertEqual(chunks, in_iter) def test_read(self): in_iter = [b'abc', b'de', b'fghijk', b'l'] iter_file = utils.FileLikeIter(in_iter) self.assertEqual(iter_file.read(), b''.join(in_iter)) def test_read_with_size(self): in_iter = [b'abc', b'de', b'fghijk', b'l'] chunks = [] iter_file = utils.FileLikeIter(in_iter) while True: chunk = iter_file.read(2) if not chunk: break self.assertTrue(len(chunk) <= 2) chunks.append(chunk) self.assertEqual(b''.join(chunks), b''.join(in_iter)) def test_read_with_size_zero(self): # makes little sense, but file supports it, so... self.assertEqual(utils.FileLikeIter(b'abc').read(0), b'') def test_readline(self): in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n', b'trailing.'] lines = [] iter_file = utils.FileLikeIter(in_iter) while True: line = iter_file.readline() if not line: break lines.append(line) self.assertEqual( lines, [v if v == b'trailing.' else v + b'\n' for v in b''.join(in_iter).split(b'\n')]) def test_readline2(self): self.assertEqual( utils.FileLikeIter([b'abc', b'def\n']).readline(4), b'abcd') def test_readline3(self): self.assertEqual( utils.FileLikeIter([b'a' * 1111, b'bc\ndef']).readline(), (b'a' * 1111) + b'bc\n') def test_readline_with_size(self): in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n', b'trailing.'] lines = [] iter_file = utils.FileLikeIter(in_iter) while True: line = iter_file.readline(2) if not line: break lines.append(line) self.assertEqual( lines, [b'ab', b'c\n', b'd\n', b'ef', b'g\n', b'h\n', b'ij', b'\n', b'\n', b'k\n', b'tr', b'ai', b'li', b'ng', b'.']) def test_readlines(self): in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n', b'trailing.'] lines = utils.FileLikeIter(in_iter).readlines() self.assertEqual( lines, [v if v == b'trailing.' else v + b'\n' for v in b''.join(in_iter).split(b'\n')]) def test_readlines_with_size(self): in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n', b'trailing.'] iter_file = utils.FileLikeIter(in_iter) lists_of_lines = [] while True: lines = iter_file.readlines(2) if not lines: break lists_of_lines.append(lines) self.assertEqual( lists_of_lines, [[b'ab'], [b'c\n'], [b'd\n'], [b'ef'], [b'g\n'], [b'h\n'], [b'ij'], [b'\n', b'\n'], [b'k\n'], [b'tr'], [b'ai'], [b'li'], [b'ng'], [b'.']]) def test_close(self): iter_file = utils.FileLikeIter([b'a', b'b', b'c']) self.assertEqual(next(iter_file), b'a') iter_file.close() self.assertTrue(iter_file.closed) self.assertRaises(ValueError, iter_file.next) self.assertRaises(ValueError, iter_file.read) self.assertRaises(ValueError, iter_file.readline) self.assertRaises(ValueError, iter_file.readlines) # Just make sure repeated close calls don't raise an Exception iter_file.close() self.assertTrue(iter_file.closed) class TestStatsdLogging(unittest.TestCase): def setUp(self): def fake_getaddrinfo(host, port, *args): # this is what a real getaddrinfo('localhost', port, # socket.AF_INET) returned once return [(socket.AF_INET, # address family socket.SOCK_STREAM, # socket type socket.IPPROTO_TCP, # socket protocol '', # canonical name, ('127.0.0.1', port)), # socket address (socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP, '', ('127.0.0.1', port))] self.real_getaddrinfo = utils.socket.getaddrinfo self.getaddrinfo_patcher = mock.patch.object( utils.socket, 'getaddrinfo', fake_getaddrinfo) self.mock_getaddrinfo = self.getaddrinfo_patcher.start() self.addCleanup(self.getaddrinfo_patcher.stop) def test_get_logger_statsd_client_not_specified(self): logger = utils.get_logger({}, 'some-name', log_route='some-route') # white-box construction validation self.assertIsNone(logger.logger.statsd_client) def test_get_logger_statsd_client_defaults(self): logger = utils.get_logger({'log_statsd_host': 'some.host.com'}, 'some-name', log_route='some-route') # white-box construction validation self.assertTrue(isinstance(logger.logger.statsd_client, utils.StatsdClient)) self.assertEqual(logger.logger.statsd_client._host, 'some.host.com') self.assertEqual(logger.logger.statsd_client._port, 8125) self.assertEqual(logger.logger.statsd_client._prefix, 'some-name.') self.assertEqual(logger.logger.statsd_client._default_sample_rate, 1) logger.set_statsd_prefix('some-name.more-specific') self.assertEqual(logger.logger.statsd_client._prefix, 'some-name.more-specific.') logger.set_statsd_prefix('') self.assertEqual(logger.logger.statsd_client._prefix, '') def test_get_logger_statsd_client_non_defaults(self): logger = utils.get_logger({ 'log_statsd_host': 'another.host.com', 'log_statsd_port': '9876', 'log_statsd_default_sample_rate': '0.75', 'log_statsd_sample_rate_factor': '0.81', 'log_statsd_metric_prefix': 'tomato.sauce', }, 'some-name', log_route='some-route') self.assertEqual(logger.logger.statsd_client._prefix, 'tomato.sauce.some-name.') logger.set_statsd_prefix('some-name.more-specific') self.assertEqual(logger.logger.statsd_client._prefix, 'tomato.sauce.some-name.more-specific.') logger.set_statsd_prefix('') self.assertEqual(logger.logger.statsd_client._prefix, 'tomato.sauce.') self.assertEqual(logger.logger.statsd_client._host, 'another.host.com') self.assertEqual(logger.logger.statsd_client._port, 9876) self.assertEqual(logger.logger.statsd_client._default_sample_rate, 0.75) self.assertEqual(logger.logger.statsd_client._sample_rate_factor, 0.81) def test_ipv4_or_ipv6_hostname_defaults_to_ipv4(self): def stub_getaddrinfo_both_ipv4_and_ipv6(host, port, family, *rest): if family == socket.AF_INET: return [(socket.AF_INET, 'blah', 'blah', 'blah', ('127.0.0.1', int(port)))] elif family == socket.AF_INET6: # Implemented so an incorrectly ordered implementation (IPv6 # then IPv4) would realistically fail. return [(socket.AF_INET6, 'blah', 'blah', 'blah', ('::1', int(port), 0, 0))] with mock.patch.object(utils.socket, 'getaddrinfo', new=stub_getaddrinfo_both_ipv4_and_ipv6): logger = utils.get_logger({ 'log_statsd_host': 'localhost', 'log_statsd_port': '9876', }, 'some-name', log_route='some-route') statsd_client = logger.logger.statsd_client self.assertEqual(statsd_client._sock_family, socket.AF_INET) self.assertEqual(statsd_client._target, ('localhost', 9876)) got_sock = statsd_client._open_socket() self.assertEqual(got_sock.family, socket.AF_INET) def test_ipv4_instantiation_and_socket_creation(self): logger = utils.get_logger({ 'log_statsd_host': '127.0.0.1', 'log_statsd_port': '9876', }, 'some-name', log_route='some-route') statsd_client = logger.logger.statsd_client self.assertEqual(statsd_client._sock_family, socket.AF_INET) self.assertEqual(statsd_client._target, ('127.0.0.1', 9876)) got_sock = statsd_client._open_socket() self.assertEqual(got_sock.family, socket.AF_INET) def test_ipv6_instantiation_and_socket_creation(self): # We have to check the given hostname or IP for IPv4/IPv6 on logger # instantiation so we don't call getaddrinfo() too often and don't have # to call bind() on our socket to detect IPv4/IPv6 on every send. # # This test uses the real getaddrinfo, so we patch over the mock to # put the real one back. If we just stop the mock, then # unittest.exit() blows up, but stacking real-fake-real works okay. with mock.patch.object(utils.socket, 'getaddrinfo', self.real_getaddrinfo): logger = utils.get_logger({ 'log_statsd_host': '::1', 'log_statsd_port': '9876', }, 'some-name', log_route='some-route') statsd_client = logger.logger.statsd_client self.assertEqual(statsd_client._sock_family, socket.AF_INET6) self.assertEqual(statsd_client._target, ('::1', 9876, 0, 0)) got_sock = statsd_client._open_socket() self.assertEqual(got_sock.family, socket.AF_INET6) def test_bad_hostname_instantiation(self): with mock.patch.object(utils.socket, 'getaddrinfo', side_effect=utils.socket.gaierror("whoops")): logger = utils.get_logger({ 'log_statsd_host': 'i-am-not-a-hostname-or-ip', 'log_statsd_port': '9876', }, 'some-name', log_route='some-route') statsd_client = logger.logger.statsd_client self.assertEqual(statsd_client._sock_family, socket.AF_INET) self.assertEqual(statsd_client._target, ('i-am-not-a-hostname-or-ip', 9876)) got_sock = statsd_client._open_socket() self.assertEqual(got_sock.family, socket.AF_INET) # Maybe the DNS server gets fixed in a bit and it starts working... or # maybe the DNS record hadn't propagated yet. In any case, failed # statsd sends will warn in the logs until the DNS failure or invalid # IP address in the configuration is fixed. def test_sending_ipv6(self): def fake_getaddrinfo(host, port, *args): # this is what a real getaddrinfo('::1', port, # socket.AF_INET6) returned once return [(socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP, '', ('::1', port, 0, 0)), (socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP, '', ('::1', port, 0, 0))] with mock.patch.object(utils.socket, 'getaddrinfo', fake_getaddrinfo): logger = utils.get_logger({ 'log_statsd_host': '::1', 'log_statsd_port': '9876', }, 'some-name', log_route='some-route') statsd_client = logger.logger.statsd_client fl = FakeLogger() statsd_client.logger = fl mock_socket = MockUdpSocket() statsd_client._open_socket = lambda *_: mock_socket logger.increment('tunafish') self.assertEqual(fl.get_lines_for_level('warning'), []) self.assertEqual(mock_socket.sent, [(b'some-name.tunafish:1|c', ('::1', 9876, 0, 0))]) def test_no_exception_when_cant_send_udp_packet(self): logger = utils.get_logger({'log_statsd_host': 'some.host.com'}) statsd_client = logger.logger.statsd_client fl = FakeLogger() statsd_client.logger = fl mock_socket = MockUdpSocket(sendto_errno=errno.EPERM) statsd_client._open_socket = lambda *_: mock_socket logger.increment('tunafish') expected = ["Error sending UDP message to ('some.host.com', 8125): " "[Errno 1] test errno 1"] self.assertEqual(fl.get_lines_for_level('warning'), expected) def test_sample_rates(self): logger = utils.get_logger({'log_statsd_host': 'some.host.com'}) mock_socket = MockUdpSocket() # encapsulation? what's that? statsd_client = logger.logger.statsd_client self.assertTrue(statsd_client.random is random.random) statsd_client._open_socket = lambda *_: mock_socket statsd_client.random = lambda: 0.50001 logger.increment('tribbles', sample_rate=0.5) self.assertEqual(len(mock_socket.sent), 0) statsd_client.random = lambda: 0.49999 logger.increment('tribbles', sample_rate=0.5) self.assertEqual(len(mock_socket.sent), 1) payload = mock_socket.sent[0][0] self.assertTrue(payload.endswith(b"|@0.5")) def test_sample_rates_with_sample_rate_factor(self): logger = utils.get_logger({ 'log_statsd_host': 'some.host.com', 'log_statsd_default_sample_rate': '0.82', 'log_statsd_sample_rate_factor': '0.91', }) effective_sample_rate = 0.82 * 0.91 mock_socket = MockUdpSocket() # encapsulation? what's that? statsd_client = logger.logger.statsd_client self.assertTrue(statsd_client.random is random.random) statsd_client._open_socket = lambda *_: mock_socket statsd_client.random = lambda: effective_sample_rate + 0.001 logger.increment('tribbles') self.assertEqual(len(mock_socket.sent), 0) statsd_client.random = lambda: effective_sample_rate - 0.001 logger.increment('tribbles') self.assertEqual(len(mock_socket.sent), 1) payload = mock_socket.sent[0][0] suffix = "|@%s" % effective_sample_rate if six.PY3: suffix = suffix.encode('utf-8') self.assertTrue(payload.endswith(suffix), payload) effective_sample_rate = 0.587 * 0.91 statsd_client.random = lambda: effective_sample_rate - 0.001 logger.increment('tribbles', sample_rate=0.587) self.assertEqual(len(mock_socket.sent), 2) payload = mock_socket.sent[1][0] suffix = "|@%s" % effective_sample_rate if six.PY3: suffix = suffix.encode('utf-8') self.assertTrue(payload.endswith(suffix), payload) def test_timing_stats(self): class MockController(object): def __init__(self, status): self.status = status self.logger = self self.args = () self.called = 'UNKNOWN' def timing_since(self, *args): self.called = 'timing' self.args = args @utils.timing_stats() def METHOD(controller): return Response(status=controller.status) mock_controller = MockController(200) METHOD(mock_controller) self.assertEqual(mock_controller.called, 'timing') self.assertEqual(len(mock_controller.args), 2) self.assertEqual(mock_controller.args[0], 'METHOD.timing') self.assertTrue(mock_controller.args[1] > 0) mock_controller = MockController(400) METHOD(mock_controller) self.assertEqual(len(mock_controller.args), 2) self.assertEqual(mock_controller.called, 'timing') self.assertEqual(mock_controller.args[0], 'METHOD.timing') self.assertTrue(mock_controller.args[1] > 0) mock_controller = MockController(404) METHOD(mock_controller) self.assertEqual(len(mock_controller.args), 2) self.assertEqual(mock_controller.called, 'timing') self.assertEqual(mock_controller.args[0], 'METHOD.timing') self.assertTrue(mock_controller.args[1] > 0) mock_controller = MockController(412) METHOD(mock_controller) self.assertEqual(len(mock_controller.args), 2) self.assertEqual(mock_controller.called, 'timing') self.assertEqual(mock_controller.args[0], 'METHOD.timing') self.assertTrue(mock_controller.args[1] > 0) mock_controller = MockController(416) METHOD(mock_controller) self.assertEqual(len(mock_controller.args), 2) self.assertEqual(mock_controller.called, 'timing') self.assertEqual(mock_controller.args[0], 'METHOD.timing') self.assertTrue(mock_controller.args[1] > 0) mock_controller = MockController(500) METHOD(mock_controller) self.assertEqual(len(mock_controller.args), 2) self.assertEqual(mock_controller.called, 'timing') self.assertEqual(mock_controller.args[0], 'METHOD.errors.timing') self.assertTrue(mock_controller.args[1] > 0) mock_controller = MockController(507) METHOD(mock_controller) self.assertEqual(len(mock_controller.args), 2) self.assertEqual(mock_controller.called, 'timing') self.assertEqual(mock_controller.args[0], 'METHOD.errors.timing') self.assertTrue(mock_controller.args[1] > 0) class UnsafeXrange(object): """ Like xrange(limit), but with extra context switching to screw things up. """ def __init__(self, upper_bound): self.current = 0 self.concurrent_calls = 0 self.upper_bound = upper_bound self.concurrent_call = False def __iter__(self): return self def next(self): if self.concurrent_calls > 0: self.concurrent_call = True self.concurrent_calls += 1 try: if self.current >= self.upper_bound: raise StopIteration else: val = self.current self.current += 1 eventlet.sleep() # yield control return val finally: self.concurrent_calls -= 1 __next__ = next class TestAffinityKeyFunction(unittest.TestCase): def setUp(self): self.nodes = [dict(id=0, region=1, zone=1), dict(id=1, region=1, zone=2), dict(id=2, region=2, zone=1), dict(id=3, region=2, zone=2), dict(id=4, region=3, zone=1), dict(id=5, region=3, zone=2), dict(id=6, region=4, zone=0), dict(id=7, region=4, zone=1)] def test_single_region(self): keyfn = utils.affinity_key_function("r3=1") ids = [n['id'] for n in sorted(self.nodes, key=keyfn)] self.assertEqual([4, 5, 0, 1, 2, 3, 6, 7], ids) def test_bogus_value(self): self.assertRaises(ValueError, utils.affinity_key_function, "r3") self.assertRaises(ValueError, utils.affinity_key_function, "r3=elephant") def test_empty_value(self): # Empty's okay, it just means no preference keyfn = utils.affinity_key_function("") self.assertTrue(callable(keyfn)) ids = [n['id'] for n in sorted(self.nodes, key=keyfn)] self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids) def test_all_whitespace_value(self): # Empty's okay, it just means no preference keyfn = utils.affinity_key_function(" \n") self.assertTrue(callable(keyfn)) ids = [n['id'] for n in sorted(self.nodes, key=keyfn)] self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids) def test_with_zone_zero(self): keyfn = utils.affinity_key_function("r4z0=1") ids = [n['id'] for n in sorted(self.nodes, key=keyfn)] self.assertEqual([6, 0, 1, 2, 3, 4, 5, 7], ids) def test_multiple(self): keyfn = utils.affinity_key_function("r1=100, r4=200, r3z1=1") ids = [n['id'] for n in sorted(self.nodes, key=keyfn)] self.assertEqual([4, 0, 1, 6, 7, 2, 3, 5], ids) def test_more_specific_after_less_specific(self): keyfn = utils.affinity_key_function("r2=100, r2z2=50") ids = [n['id'] for n in sorted(self.nodes, key=keyfn)] self.assertEqual([3, 2, 0, 1, 4, 5, 6, 7], ids) class TestAffinityLocalityPredicate(unittest.TestCase): def setUp(self): self.nodes = [dict(id=0, region=1, zone=1), dict(id=1, region=1, zone=2), dict(id=2, region=2, zone=1), dict(id=3, region=2, zone=2), dict(id=4, region=3, zone=1), dict(id=5, region=3, zone=2), dict(id=6, region=4, zone=0), dict(id=7, region=4, zone=1)] def test_empty(self): pred = utils.affinity_locality_predicate('') self.assertTrue(pred is None) def test_region(self): pred = utils.affinity_locality_predicate('r1') self.assertTrue(callable(pred)) ids = [n['id'] for n in self.nodes if pred(n)] self.assertEqual([0, 1], ids) def test_zone(self): pred = utils.affinity_locality_predicate('r1z1') self.assertTrue(callable(pred)) ids = [n['id'] for n in self.nodes if pred(n)] self.assertEqual([0], ids) def test_multiple(self): pred = utils.affinity_locality_predicate('r1, r3, r4z0') self.assertTrue(callable(pred)) ids = [n['id'] for n in self.nodes if pred(n)] self.assertEqual([0, 1, 4, 5, 6], ids) def test_invalid(self): self.assertRaises(ValueError, utils.affinity_locality_predicate, 'falafel') self.assertRaises(ValueError, utils.affinity_locality_predicate, 'r8zQ') self.assertRaises(ValueError, utils.affinity_locality_predicate, 'r2d2') self.assertRaises(ValueError, utils.affinity_locality_predicate, 'r1z1=1') class TestRateLimitedIterator(unittest.TestCase): def run_under_pseudo_time( self, func, *args, **kwargs): curr_time = [42.0] def my_time(): curr_time[0] += 0.001 return curr_time[0] def my_sleep(duration): curr_time[0] += 0.001 curr_time[0] += duration with patch('time.time', my_time), \ patch('eventlet.sleep', my_sleep): return func(*args, **kwargs) def test_rate_limiting(self): def testfunc(): limited_iterator = utils.RateLimitedIterator(range(9999), 100) got = [] started_at = time.time() try: while time.time() - started_at < 0.1: got.append(next(limited_iterator)) except StopIteration: pass return got got = self.run_under_pseudo_time(testfunc) # it's 11, not 10, because ratelimiting doesn't apply to the very # first element. self.assertEqual(len(got), 11) def test_rate_limiting_sometimes(self): def testfunc(): limited_iterator = utils.RateLimitedIterator( range(9999), 100, ratelimit_if=lambda item: item % 23 != 0) got = [] started_at = time.time() try: while time.time() - started_at < 0.5: got.append(next(limited_iterator)) except StopIteration: pass return got got = self.run_under_pseudo_time(testfunc) # we'd get 51 without the ratelimit_if, but because 0, 23 and 46 # weren't subject to ratelimiting, we get 54 instead self.assertEqual(len(got), 54) def test_limit_after(self): def testfunc(): limited_iterator = utils.RateLimitedIterator( range(9999), 100, limit_after=5) got = [] started_at = time.time() try: while time.time() - started_at < 0.1: got.append(next(limited_iterator)) except StopIteration: pass return got got = self.run_under_pseudo_time(testfunc) # it's 16, not 15, because ratelimiting doesn't apply to the very # first element. self.assertEqual(len(got), 16) class TestGreenthreadSafeIterator(unittest.TestCase): def increment(self, iterable): plus_ones = [] for n in iterable: plus_ones.append(n + 1) return plus_ones def test_setup_works(self): # it should work without concurrent access self.assertEqual([0, 1, 2, 3], list(UnsafeXrange(4))) iterable = UnsafeXrange(10) pile = eventlet.GreenPile(2) for _ in range(2): pile.spawn(self.increment, iterable) sorted([resp for resp in pile]) self.assertTrue( iterable.concurrent_call, 'test setup is insufficiently crazy') def test_access_is_serialized(self): pile = eventlet.GreenPile(2) unsafe_iterable = UnsafeXrange(10) iterable = utils.GreenthreadSafeIterator(unsafe_iterable) for _ in range(2): pile.spawn(self.increment, iterable) response = sorted(sum([resp for resp in pile], [])) self.assertEqual(list(range(1, 11)), response) self.assertTrue( not unsafe_iterable.concurrent_call, 'concurrent call occurred') class TestStatsdLoggingDelegation(unittest.TestCase): def setUp(self): self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.sock.bind(('localhost', 0)) self.port = self.sock.getsockname()[1] self.queue = Queue() self.reader_thread = threading.Thread(target=self.statsd_reader) self.reader_thread.setDaemon(1) self.reader_thread.start() def tearDown(self): # The "no-op when disabled" test doesn't set up a real logger, so # create one here so we can tell the reader thread to stop. if not getattr(self, 'logger', None): self.logger = utils.get_logger({ 'log_statsd_host': 'localhost', 'log_statsd_port': str(self.port), }, 'some-name') self.logger.increment('STOP') self.reader_thread.join(timeout=4) self.sock.close() del self.logger def statsd_reader(self): while True: try: payload = self.sock.recv(4096) if payload and b'STOP' in payload: return 42 self.queue.put(payload) except Exception as e: sys.stderr.write('statsd_reader thread: %r' % (e,)) break def _send_and_get(self, sender_fn, *args, **kwargs): """ Because the client library may not actually send a packet with sample_rate < 1, we keep trying until we get one through. """ got = None while not got: sender_fn(*args, **kwargs) try: got = self.queue.get(timeout=0.5) except Empty: pass return got def assertStat(self, expected, sender_fn, *args, **kwargs): got = self._send_and_get(sender_fn, *args, **kwargs) if six.PY3: got = got.decode('utf-8') return self.assertEqual(expected, got) def assertStatMatches(self, expected_regexp, sender_fn, *args, **kwargs): got = self._send_and_get(sender_fn, *args, **kwargs) if six.PY3: got = got.decode('utf-8') return self.assertTrue(re.search(expected_regexp, got), [got, expected_regexp]) def test_methods_are_no_ops_when_not_enabled(self): logger = utils.get_logger({ # No "log_statsd_host" means "disabled" 'log_statsd_port': str(self.port), }, 'some-name') # Delegate methods are no-ops self.assertIsNone(logger.update_stats('foo', 88)) self.assertIsNone(logger.update_stats('foo', 88, 0.57)) self.assertIsNone(logger.update_stats('foo', 88, sample_rate=0.61)) self.assertIsNone(logger.increment('foo')) self.assertIsNone(logger.increment('foo', 0.57)) self.assertIsNone(logger.increment('foo', sample_rate=0.61)) self.assertIsNone(logger.decrement('foo')) self.assertIsNone(logger.decrement('foo', 0.57)) self.assertIsNone(logger.decrement('foo', sample_rate=0.61)) self.assertIsNone(logger.timing('foo', 88.048)) self.assertIsNone(logger.timing('foo', 88.57, 0.34)) self.assertIsNone(logger.timing('foo', 88.998, sample_rate=0.82)) self.assertIsNone(logger.timing_since('foo', 8938)) self.assertIsNone(logger.timing_since('foo', 8948, 0.57)) self.assertIsNone(logger.timing_since('foo', 849398, sample_rate=0.61)) # Now, the queue should be empty (no UDP packets sent) self.assertRaises(Empty, self.queue.get_nowait) def test_delegate_methods_with_no_default_sample_rate(self): self.logger = utils.get_logger({ 'log_statsd_host': 'localhost', 'log_statsd_port': str(self.port), }, 'some-name') self.assertStat('some-name.some.counter:1|c', self.logger.increment, 'some.counter') self.assertStat('some-name.some.counter:-1|c', self.logger.decrement, 'some.counter') self.assertStat('some-name.some.operation:4900.0|ms', self.logger.timing, 'some.operation', 4.9 * 1000) self.assertStatMatches('some-name\.another\.operation:\d+\.\d+\|ms', self.logger.timing_since, 'another.operation', time.time()) self.assertStat('some-name.another.counter:42|c', self.logger.update_stats, 'another.counter', 42) # Each call can override the sample_rate (also, bonus prefix test) self.logger.set_statsd_prefix('pfx') self.assertStat('pfx.some.counter:1|c|@0.972', self.logger.increment, 'some.counter', sample_rate=0.972) self.assertStat('pfx.some.counter:-1|c|@0.972', self.logger.decrement, 'some.counter', sample_rate=0.972) self.assertStat('pfx.some.operation:4900.0|ms|@0.972', self.logger.timing, 'some.operation', 4.9 * 1000, sample_rate=0.972) self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.972', self.logger.timing_since, 'another.op', time.time(), sample_rate=0.972) self.assertStat('pfx.another.counter:3|c|@0.972', self.logger.update_stats, 'another.counter', 3, sample_rate=0.972) # Can override sample_rate with non-keyword arg self.logger.set_statsd_prefix('') self.assertStat('some.counter:1|c|@0.939', self.logger.increment, 'some.counter', 0.939) self.assertStat('some.counter:-1|c|@0.939', self.logger.decrement, 'some.counter', 0.939) self.assertStat('some.operation:4900.0|ms|@0.939', self.logger.timing, 'some.operation', 4.9 * 1000, 0.939) self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.939', self.logger.timing_since, 'another.op', time.time(), 0.939) self.assertStat('another.counter:3|c|@0.939', self.logger.update_stats, 'another.counter', 3, 0.939) def test_delegate_methods_with_default_sample_rate(self): self.logger = utils.get_logger({ 'log_statsd_host': 'localhost', 'log_statsd_port': str(self.port), 'log_statsd_default_sample_rate': '0.93', }, 'pfx') self.assertStat('pfx.some.counter:1|c|@0.93', self.logger.increment, 'some.counter') self.assertStat('pfx.some.counter:-1|c|@0.93', self.logger.decrement, 'some.counter') self.assertStat('pfx.some.operation:4760.0|ms|@0.93', self.logger.timing, 'some.operation', 4.76 * 1000) self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.93', self.logger.timing_since, 'another.op', time.time()) self.assertStat('pfx.another.counter:3|c|@0.93', self.logger.update_stats, 'another.counter', 3) # Each call can override the sample_rate self.assertStat('pfx.some.counter:1|c|@0.9912', self.logger.increment, 'some.counter', sample_rate=0.9912) self.assertStat('pfx.some.counter:-1|c|@0.9912', self.logger.decrement, 'some.counter', sample_rate=0.9912) self.assertStat('pfx.some.operation:4900.0|ms|@0.9912', self.logger.timing, 'some.operation', 4.9 * 1000, sample_rate=0.9912) self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.9912', self.logger.timing_since, 'another.op', time.time(), sample_rate=0.9912) self.assertStat('pfx.another.counter:3|c|@0.9912', self.logger.update_stats, 'another.counter', 3, sample_rate=0.9912) # Can override sample_rate with non-keyword arg self.logger.set_statsd_prefix('') self.assertStat('some.counter:1|c|@0.987654', self.logger.increment, 'some.counter', 0.987654) self.assertStat('some.counter:-1|c|@0.987654', self.logger.decrement, 'some.counter', 0.987654) self.assertStat('some.operation:4900.0|ms|@0.987654', self.logger.timing, 'some.operation', 4.9 * 1000, 0.987654) self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.987654', self.logger.timing_since, 'another.op', time.time(), 0.987654) self.assertStat('another.counter:3|c|@0.987654', self.logger.update_stats, 'another.counter', 3, 0.987654) def test_delegate_methods_with_metric_prefix(self): self.logger = utils.get_logger({ 'log_statsd_host': 'localhost', 'log_statsd_port': str(self.port), 'log_statsd_metric_prefix': 'alpha.beta', }, 'pfx') self.assertStat('alpha.beta.pfx.some.counter:1|c', self.logger.increment, 'some.counter') self.assertStat('alpha.beta.pfx.some.counter:-1|c', self.logger.decrement, 'some.counter') self.assertStat('alpha.beta.pfx.some.operation:4760.0|ms', self.logger.timing, 'some.operation', 4.76 * 1000) self.assertStatMatches( 'alpha\.beta\.pfx\.another\.op:\d+\.\d+\|ms', self.logger.timing_since, 'another.op', time.time()) self.assertStat('alpha.beta.pfx.another.counter:3|c', self.logger.update_stats, 'another.counter', 3) self.logger.set_statsd_prefix('') self.assertStat('alpha.beta.some.counter:1|c|@0.9912', self.logger.increment, 'some.counter', sample_rate=0.9912) self.assertStat('alpha.beta.some.counter:-1|c|@0.9912', self.logger.decrement, 'some.counter', 0.9912) self.assertStat('alpha.beta.some.operation:4900.0|ms|@0.9912', self.logger.timing, 'some.operation', 4.9 * 1000, sample_rate=0.9912) self.assertStatMatches('alpha\.beta\.another\.op:\d+\.\d+\|ms|@0.9912', self.logger.timing_since, 'another.op', time.time(), sample_rate=0.9912) self.assertStat('alpha.beta.another.counter:3|c|@0.9912', self.logger.update_stats, 'another.counter', 3, sample_rate=0.9912) @reset_logger_state def test_thread_locals(self): logger = utils.get_logger(None) # test the setter logger.thread_locals = ('id', 'ip') self.assertEqual(logger.thread_locals, ('id', 'ip')) # reset logger.thread_locals = (None, None) self.assertEqual(logger.thread_locals, (None, None)) logger.txn_id = '1234' logger.client_ip = '1.2.3.4' self.assertEqual(logger.thread_locals, ('1234', '1.2.3.4')) logger.txn_id = '5678' logger.client_ip = '5.6.7.8' self.assertEqual(logger.thread_locals, ('5678', '5.6.7.8')) def test_no_fdatasync(self): called = [] class NoFdatasync(object): pass def fsync(fd): called.append(fd) with patch('swift.common.utils.os', NoFdatasync()): with patch('swift.common.utils.fsync', fsync): utils.fdatasync(12345) self.assertEqual(called, [12345]) def test_yes_fdatasync(self): called = [] class YesFdatasync(object): def fdatasync(self, fd): called.append(fd) with patch('swift.common.utils.os', YesFdatasync()): utils.fdatasync(12345) self.assertEqual(called, [12345]) def test_fsync_bad_fullsync(self): class FCNTL(object): F_FULLSYNC = 123 def fcntl(self, fd, op): raise IOError(18) with patch('swift.common.utils.fcntl', FCNTL()): self.assertRaises(OSError, lambda: utils.fsync(12345)) def test_fsync_f_fullsync(self): called = [] class FCNTL(object): F_FULLSYNC = 123 def fcntl(self, fd, op): called[:] = [fd, op] return 0 with patch('swift.common.utils.fcntl', FCNTL()): utils.fsync(12345) self.assertEqual(called, [12345, 123]) def test_fsync_no_fullsync(self): called = [] class FCNTL(object): pass def fsync(fd): called.append(fd) with patch('swift.common.utils.fcntl', FCNTL()): with patch('os.fsync', fsync): utils.fsync(12345) self.assertEqual(called, [12345]) class TestAuditLocationGenerator(unittest.TestCase): def test_drive_tree_access(self): orig_listdir = utils.listdir def _mock_utils_listdir(path): if 'bad_part' in path: raise OSError(errno.EACCES) elif 'bad_suffix' in path: raise OSError(errno.EACCES) elif 'bad_hash' in path: raise OSError(errno.EACCES) else: return orig_listdir(path) # Check Raise on Bad partition tmpdir = mkdtemp() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) obj_path = os.path.join(data, "bad_part") with open(obj_path, "w"): pass part1 = os.path.join(data, "partition1") os.makedirs(part1) part2 = os.path.join(data, "partition2") os.makedirs(part2) with patch('swift.common.utils.listdir', _mock_utils_listdir): audit = lambda: list(utils.audit_location_generator( tmpdir, "data", mount_check=False)) self.assertRaises(OSError, audit) rmtree(tmpdir) # Check Raise on Bad Suffix tmpdir = mkdtemp() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) part1 = os.path.join(data, "partition1") os.makedirs(part1) part2 = os.path.join(data, "partition2") os.makedirs(part2) obj_path = os.path.join(part1, "bad_suffix") with open(obj_path, 'w'): pass suffix = os.path.join(part2, "suffix") os.makedirs(suffix) with patch('swift.common.utils.listdir', _mock_utils_listdir): audit = lambda: list(utils.audit_location_generator( tmpdir, "data", mount_check=False)) self.assertRaises(OSError, audit) rmtree(tmpdir) # Check Raise on Bad Hash tmpdir = mkdtemp() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) part1 = os.path.join(data, "partition1") os.makedirs(part1) suffix = os.path.join(part1, "suffix") os.makedirs(suffix) hash1 = os.path.join(suffix, "hash1") os.makedirs(hash1) obj_path = os.path.join(suffix, "bad_hash") with open(obj_path, 'w'): pass with patch('swift.common.utils.listdir', _mock_utils_listdir): audit = lambda: list(utils.audit_location_generator( tmpdir, "data", mount_check=False)) self.assertRaises(OSError, audit) rmtree(tmpdir) def test_non_dir_drive(self): with temptree([]) as tmpdir: logger = FakeLogger() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) # Create a file, that represents a non-dir drive open(os.path.join(tmpdir, 'asdf'), 'w') locations = utils.audit_location_generator( tmpdir, "data", mount_check=False, logger=logger ) self.assertEqual(list(locations), []) self.assertEqual(1, len(logger.get_lines_for_level('warning'))) # Test without the logger locations = utils.audit_location_generator( tmpdir, "data", mount_check=False ) self.assertEqual(list(locations), []) def test_mount_check_drive(self): with temptree([]) as tmpdir: logger = FakeLogger() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) # Create a file, that represents a non-dir drive open(os.path.join(tmpdir, 'asdf'), 'w') locations = utils.audit_location_generator( tmpdir, "data", mount_check=True, logger=logger ) self.assertEqual(list(locations), []) self.assertEqual(2, len(logger.get_lines_for_level('warning'))) # Test without the logger locations = utils.audit_location_generator( tmpdir, "data", mount_check=True ) self.assertEqual(list(locations), []) def test_non_dir_contents(self): with temptree([]) as tmpdir: logger = FakeLogger() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) with open(os.path.join(data, "partition1"), "w"): pass partition = os.path.join(data, "partition2") os.makedirs(partition) with open(os.path.join(partition, "suffix1"), "w"): pass suffix = os.path.join(partition, "suffix2") os.makedirs(suffix) with open(os.path.join(suffix, "hash1"), "w"): pass locations = utils.audit_location_generator( tmpdir, "data", mount_check=False, logger=logger ) self.assertEqual(list(locations), []) def test_find_objects(self): with temptree([]) as tmpdir: expected_objs = list() logger = FakeLogger() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) # Create a file, that represents a non-dir drive open(os.path.join(tmpdir, 'asdf'), 'w') partition = os.path.join(data, "partition1") os.makedirs(partition) suffix = os.path.join(partition, "suffix") os.makedirs(suffix) hash_path = os.path.join(suffix, "hash") os.makedirs(hash_path) obj_path = os.path.join(hash_path, "obj1.db") with open(obj_path, "w"): pass expected_objs.append((obj_path, 'drive', 'partition1')) partition = os.path.join(data, "partition2") os.makedirs(partition) suffix = os.path.join(partition, "suffix2") os.makedirs(suffix) hash_path = os.path.join(suffix, "hash2") os.makedirs(hash_path) obj_path = os.path.join(hash_path, "obj2.db") with open(obj_path, "w"): pass expected_objs.append((obj_path, 'drive', 'partition2')) locations = utils.audit_location_generator( tmpdir, "data", mount_check=False, logger=logger ) got_objs = list(locations) self.assertEqual(len(got_objs), len(expected_objs)) self.assertEqual(sorted(got_objs), sorted(expected_objs)) self.assertEqual(1, len(logger.get_lines_for_level('warning'))) def test_ignore_metadata(self): with temptree([]) as tmpdir: logger = FakeLogger() data = os.path.join(tmpdir, "drive", "data") os.makedirs(data) partition = os.path.join(data, "partition2") os.makedirs(partition) suffix = os.path.join(partition, "suffix2") os.makedirs(suffix) hash_path = os.path.join(suffix, "hash2") os.makedirs(hash_path) obj_path = os.path.join(hash_path, "obj1.dat") with open(obj_path, "w"): pass meta_path = os.path.join(hash_path, "obj1.meta") with open(meta_path, "w"): pass locations = utils.audit_location_generator( tmpdir, "data", ".dat", mount_check=False, logger=logger ) self.assertEqual(list(locations), [(obj_path, "drive", "partition2")]) class TestGreenAsyncPile(unittest.TestCase): def test_runs_everything(self): def run_test(): tests_ran[0] += 1 return tests_ran[0] tests_ran = [0] pile = utils.GreenAsyncPile(3) for x in range(3): pile.spawn(run_test) self.assertEqual(sorted(x for x in pile), [1, 2, 3]) def test_is_asynchronous(self): def run_test(index): events[index].wait() return index pile = utils.GreenAsyncPile(3) for order in ((1, 2, 0), (0, 1, 2), (2, 1, 0), (0, 2, 1)): events = [eventlet.event.Event(), eventlet.event.Event(), eventlet.event.Event()] for x in range(3): pile.spawn(run_test, x) for x in order: events[x].send() self.assertEqual(next(pile), x) def test_next_when_empty(self): def run_test(): pass pile = utils.GreenAsyncPile(3) pile.spawn(run_test) self.assertIsNone(next(pile)) self.assertRaises(StopIteration, lambda: next(pile)) def test_waitall_timeout_timesout(self): def run_test(sleep_duration): eventlet.sleep(sleep_duration) completed[0] += 1 return sleep_duration completed = [0] pile = utils.GreenAsyncPile(3) pile.spawn(run_test, 0.1) pile.spawn(run_test, 1.0) self.assertEqual(pile.waitall(0.5), [0.1]) self.assertEqual(completed[0], 1) def test_waitall_timeout_completes(self): def run_test(sleep_duration): eventlet.sleep(sleep_duration) completed[0] += 1 return sleep_duration completed = [0] pile = utils.GreenAsyncPile(3) pile.spawn(run_test, 0.1) pile.spawn(run_test, 0.1) self.assertEqual(pile.waitall(0.5), [0.1, 0.1]) self.assertEqual(completed[0], 2) def test_waitfirst_only_returns_first(self): def run_test(name): eventlet.sleep(0) completed.append(name) return name completed = [] pile = utils.GreenAsyncPile(3) pile.spawn(run_test, 'first') pile.spawn(run_test, 'second') pile.spawn(run_test, 'third') self.assertEqual(pile.waitfirst(0.5), completed[0]) # 3 still completed, but only the first was returned. self.assertEqual(3, len(completed)) def test_wait_with_firstn(self): def run_test(name): eventlet.sleep(0) completed.append(name) return name for first_n in [None] + list(range(6)): completed = [] pile = utils.GreenAsyncPile(10) for i in range(10): pile.spawn(run_test, i) actual = pile._wait(1, first_n) expected_n = first_n if first_n else 10 self.assertEqual(completed[:expected_n], actual) self.assertEqual(10, len(completed)) def test_pending(self): pile = utils.GreenAsyncPile(3) self.assertEqual(0, pile._pending) for repeats in range(2): # repeat to verify that pending will go again up after going down for i in range(4): pile.spawn(lambda: i) self.assertEqual(4, pile._pending) for i in range(3, -1, -1): next(pile) self.assertEqual(i, pile._pending) # sanity check - the pile is empty self.assertRaises(StopIteration, pile.next) # pending remains 0 self.assertEqual(0, pile._pending) class TestLRUCache(unittest.TestCase): def test_maxsize(self): @utils.LRUCache(maxsize=10) def f(*args): return math.sqrt(*args) _orig_math_sqrt = math.sqrt # setup cache [0-10) for i in range(10): self.assertEqual(math.sqrt(i), f(i)) self.assertEqual(f.size(), 10) # validate cache [0-10) with patch('math.sqrt'): for i in range(10): self.assertEqual(_orig_math_sqrt(i), f(i)) self.assertEqual(f.size(), 10) # update cache [10-20) for i in range(10, 20): self.assertEqual(math.sqrt(i), f(i)) # cache size is fixed self.assertEqual(f.size(), 10) # validate cache [10-20) with patch('math.sqrt'): for i in range(10, 20): self.assertEqual(_orig_math_sqrt(i), f(i)) # validate un-cached [0-10) with patch('math.sqrt', new=None): for i in range(10): self.assertRaises(TypeError, f, i) # cache unchanged self.assertEqual(f.size(), 10) with patch('math.sqrt'): for i in range(10, 20): self.assertEqual(_orig_math_sqrt(i), f(i)) self.assertEqual(f.size(), 10) def test_maxtime(self): @utils.LRUCache(maxtime=30) def f(*args): return math.sqrt(*args) self.assertEqual(30, f.maxtime) _orig_math_sqrt = math.sqrt now = time.time() the_future = now + 31 # setup cache [0-10) with patch('time.time', lambda: now): for i in range(10): self.assertEqual(math.sqrt(i), f(i)) self.assertEqual(f.size(), 10) # validate cache [0-10) with patch('math.sqrt'): for i in range(10): self.assertEqual(_orig_math_sqrt(i), f(i)) self.assertEqual(f.size(), 10) # validate expired [0-10) with patch('math.sqrt', new=None): with patch('time.time', lambda: the_future): for i in range(10): self.assertRaises(TypeError, f, i) # validate repopulates [0-10) with patch('time.time', lambda: the_future): for i in range(10): self.assertEqual(math.sqrt(i), f(i)) # reuses cache space self.assertEqual(f.size(), 10) def test_set_maxtime(self): @utils.LRUCache(maxtime=30) def f(*args): return math.sqrt(*args) self.assertEqual(30, f.maxtime) self.assertEqual(2, f(4)) self.assertEqual(1, f.size()) # expire everything f.maxtime = -1 # validate un-cached [0-10) with patch('math.sqrt', new=None): self.assertRaises(TypeError, f, 4) def test_set_maxsize(self): @utils.LRUCache(maxsize=10) def f(*args): return math.sqrt(*args) for i in range(12): f(i) self.assertEqual(f.size(), 10) f.maxsize = 4 for i in range(12): f(i) self.assertEqual(f.size(), 4) class TestSpliterator(unittest.TestCase): def test_string(self): input_chunks = ["coun", "ter-", "b", "ra", "nch-mater", "nit", "y-fungusy", "-nummular"] si = utils.Spliterator(input_chunks) self.assertEqual(''.join(si.take(8)), "counter-") self.assertEqual(''.join(si.take(7)), "branch-") self.assertEqual(''.join(si.take(10)), "maternity-") self.assertEqual(''.join(si.take(8)), "fungusy-") self.assertEqual(''.join(si.take(8)), "nummular") def test_big_input_string(self): input_chunks = ["iridium"] si = utils.Spliterator(input_chunks) self.assertEqual(''.join(si.take(2)), "ir") self.assertEqual(''.join(si.take(1)), "i") self.assertEqual(''.join(si.take(2)), "di") self.assertEqual(''.join(si.take(1)), "u") self.assertEqual(''.join(si.take(1)), "m") def test_chunk_boundaries(self): input_chunks = ["soylent", "green", "is", "people"] si = utils.Spliterator(input_chunks) self.assertEqual(''.join(si.take(7)), "soylent") self.assertEqual(''.join(si.take(5)), "green") self.assertEqual(''.join(si.take(2)), "is") self.assertEqual(''.join(si.take(6)), "people") def test_no_empty_strings(self): input_chunks = ["soylent", "green", "is", "people"] si = utils.Spliterator(input_chunks) outputs = (list(si.take(7)) # starts and ends on chunk boundary + list(si.take(2)) # spans two chunks + list(si.take(3)) # begins but does not end chunk + list(si.take(2)) # ends but does not begin chunk + list(si.take(6))) # whole chunk + EOF self.assertNotIn('', outputs) def test_running_out(self): input_chunks = ["not much"] si = utils.Spliterator(input_chunks) self.assertEqual(''.join(si.take(4)), "not ") self.assertEqual(''.join(si.take(99)), "much") # short self.assertEqual(''.join(si.take(4)), "") self.assertEqual(''.join(si.take(4)), "") def test_overlap(self): input_chunks = ["one fish", "two fish", "red fish", "blue fish"] si = utils.Spliterator(input_chunks) t1 = si.take(20) # longer than first chunk self.assertLess(len(next(t1)), 20) # it's not exhausted t2 = si.take(20) self.assertRaises(ValueError, next, t2) def test_closing(self): input_chunks = ["abcd", "efg", "hij"] si = utils.Spliterator(input_chunks) it = si.take(3) # shorter than first chunk self.assertEqual(next(it), 'abc') it.close() self.assertEqual(list(si.take(20)), ['d', 'efg', 'hij']) si = utils.Spliterator(input_chunks) self.assertEqual(list(si.take(1)), ['a']) it = si.take(1) # still shorter than first chunk self.assertEqual(next(it), 'b') it.close() self.assertEqual(list(si.take(20)), ['cd', 'efg', 'hij']) si = utils.Spliterator(input_chunks) it = si.take(6) # longer than first chunk, shorter than first + second self.assertEqual(next(it), 'abcd') self.assertEqual(next(it), 'ef') it.close() self.assertEqual(list(si.take(20)), ['g', 'hij']) si = utils.Spliterator(input_chunks) self.assertEqual(list(si.take(2)), ['ab']) it = si.take(3) # longer than rest of chunk self.assertEqual(next(it), 'cd') it.close() self.assertEqual(list(si.take(20)), ['efg', 'hij']) class TestParseContentRange(unittest.TestCase): def test_good(self): start, end, total = utils.parse_content_range("bytes 100-200/300") self.assertEqual(start, 100) self.assertEqual(end, 200) self.assertEqual(total, 300) def test_bad(self): self.assertRaises(ValueError, utils.parse_content_range, "100-300/500") self.assertRaises(ValueError, utils.parse_content_range, "bytes 100-200/aardvark") self.assertRaises(ValueError, utils.parse_content_range, "bytes bulbous-bouffant/4994801") class TestParseContentDisposition(unittest.TestCase): def test_basic_content_type(self): name, attrs = utils.parse_content_disposition('text/plain') self.assertEqual(name, 'text/plain') self.assertEqual(attrs, {}) def test_content_type_with_charset(self): name, attrs = utils.parse_content_disposition( 'text/plain; charset=UTF8') self.assertEqual(name, 'text/plain') self.assertEqual(attrs, {'charset': 'UTF8'}) def test_content_disposition(self): name, attrs = utils.parse_content_disposition( 'form-data; name="somefile"; filename="test.html"') self.assertEqual(name, 'form-data') self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'}) def test_content_disposition_without_white_space(self): name, attrs = utils.parse_content_disposition( 'form-data;name="somefile";filename="test.html"') self.assertEqual(name, 'form-data') self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'}) class TestIterMultipartMimeDocuments(unittest.TestCase): def test_bad_start(self): it = utils.iter_multipart_mime_documents(BytesIO(b'blah'), b'unique') exc = None try: next(it) except MimeInvalid as err: exc = err self.assertTrue('invalid starting boundary' in str(exc)) self.assertTrue('--unique' in str(exc)) def test_empty(self): it = utils.iter_multipart_mime_documents(BytesIO(b'--unique'), b'unique') fp = next(it) self.assertEqual(fp.read(), b'') self.assertRaises(StopIteration, next, it) def test_basic(self): it = utils.iter_multipart_mime_documents( BytesIO(b'--unique\r\nabcdefg\r\n--unique--'), b'unique') fp = next(it) self.assertEqual(fp.read(), b'abcdefg') self.assertRaises(StopIteration, next, it) def test_basic2(self): it = utils.iter_multipart_mime_documents( BytesIO(b'--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'), b'unique') fp = next(it) self.assertEqual(fp.read(), b'abcdefg') fp = next(it) self.assertEqual(fp.read(), b'hijkl') self.assertRaises(StopIteration, next, it) def test_tiny_reads(self): it = utils.iter_multipart_mime_documents( BytesIO(b'--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'), b'unique') fp = next(it) self.assertEqual(fp.read(2), b'ab') self.assertEqual(fp.read(2), b'cd') self.assertEqual(fp.read(2), b'ef') self.assertEqual(fp.read(2), b'g') self.assertEqual(fp.read(2), b'') fp = next(it) self.assertEqual(fp.read(), b'hijkl') self.assertRaises(StopIteration, next, it) def test_big_reads(self): it = utils.iter_multipart_mime_documents( BytesIO(b'--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'), b'unique') fp = next(it) self.assertEqual(fp.read(65536), b'abcdefg') self.assertEqual(fp.read(), b'') fp = next(it) self.assertEqual(fp.read(), b'hijkl') self.assertRaises(StopIteration, next, it) def test_leading_crlfs(self): it = utils.iter_multipart_mime_documents( BytesIO(b'\r\n\r\n\r\n--unique\r\nabcdefg\r\n' b'--unique\r\nhijkl\r\n--unique--'), b'unique') fp = next(it) self.assertEqual(fp.read(65536), b'abcdefg') self.assertEqual(fp.read(), b'') fp = next(it) self.assertEqual(fp.read(), b'hijkl') self.assertRaises(StopIteration, next, it) def test_broken_mid_stream(self): # We go ahead and accept whatever is sent instead of rejecting the # whole request, in case the partial form is still useful. it = utils.iter_multipart_mime_documents( BytesIO(b'--unique\r\nabc'), b'unique') fp = next(it) self.assertEqual(fp.read(), b'abc') self.assertRaises(StopIteration, next, it) def test_readline(self): it = utils.iter_multipart_mime_documents( BytesIO(b'--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n\r\n' b'jkl\r\n\r\n--unique--'), b'unique') fp = next(it) self.assertEqual(fp.readline(), b'ab\r\n') self.assertEqual(fp.readline(), b'cd\ref\ng') self.assertEqual(fp.readline(), b'') fp = next(it) self.assertEqual(fp.readline(), b'hi\r\n') self.assertEqual(fp.readline(), b'\r\n') self.assertEqual(fp.readline(), b'jkl\r\n') self.assertRaises(StopIteration, next, it) def test_readline_with_tiny_chunks(self): it = utils.iter_multipart_mime_documents( BytesIO(b'--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n' b'\r\njkl\r\n\r\n--unique--'), b'unique', read_chunk_size=2) fp = next(it) self.assertEqual(fp.readline(), b'ab\r\n') self.assertEqual(fp.readline(), b'cd\ref\ng') self.assertEqual(fp.readline(), b'') fp = next(it) self.assertEqual(fp.readline(), b'hi\r\n') self.assertEqual(fp.readline(), b'\r\n') self.assertEqual(fp.readline(), b'jkl\r\n') self.assertRaises(StopIteration, next, it) class TestParseMimeHeaders(unittest.TestCase): def test_parse_mime_headers(self): doc_file = BytesIO(b"""Content-Disposition: form-data; name="file_size" Foo: Bar NOT-title-cAsED: quux Connexion: =?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?= Status: =?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?= Latin-1: Resincronizaci\xf3n realizada con \xe9xito Utf-8: \xd0\xba\xd0\xbe\xd0\xbd\xd1\x82\xd0\xb5\xd0\xb9\xd0\xbd\xd0\xb5\xd1\x80 This is the body """) headers = utils.parse_mime_headers(doc_file) utf8 = u'\u043a\u043e\u043d\u0442\u0435\u0439\u043d\u0435\u0440' if six.PY2: utf8 = utf8.encode('utf-8') expected_headers = { 'Content-Disposition': 'form-data; name="file_size"', 'Foo': "Bar", 'Not-Title-Cased': "quux", # Encoded-word or non-ASCII values are treated just like any other # bytestring (at least for now) 'Connexion': "=?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=", 'Status': "=?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=", 'Latin-1': "Resincronizaci\xf3n realizada con \xe9xito", 'Utf-8': utf8, } self.assertEqual(expected_headers, headers) self.assertEqual(b"This is the body\n", doc_file.read()) class FakeResponse(object): def __init__(self, status, headers, body): self.status = status self.headers = HeaderKeyDict(headers) self.body = StringIO(body) def getheader(self, header_name): return str(self.headers.get(header_name, '')) def getheaders(self): return self.headers.items() def read(self, length=None): return self.body.read(length) def readline(self, length=None): return self.body.readline(length) class TestDocumentItersToHTTPResponseBody(unittest.TestCase): def test_no_parts(self): body = utils.document_iters_to_http_response_body( iter([]), 'dontcare', multipart=False, logger=FakeLogger()) self.assertEqual(body, '') def test_single_part(self): body = "time flies like an arrow; fruit flies like a banana" doc_iters = [{'part_iter': iter(StringIO(body).read, '')}] resp_body = ''.join( utils.document_iters_to_http_response_body( iter(doc_iters), 'dontcare', multipart=False, logger=FakeLogger())) self.assertEqual(resp_body, body) def test_multiple_parts(self): part1 = "two peanuts were walking down a railroad track" part2 = "and one was a salted. ... peanut." doc_iters = [{ 'start_byte': 88, 'end_byte': 133, 'content_type': 'application/peanut', 'entity_length': 1024, 'part_iter': iter(StringIO(part1).read, ''), }, { 'start_byte': 500, 'end_byte': 532, 'content_type': 'application/salted', 'entity_length': 1024, 'part_iter': iter(StringIO(part2).read, ''), }] resp_body = ''.join( utils.document_iters_to_http_response_body( iter(doc_iters), 'boundaryboundary', multipart=True, logger=FakeLogger())) self.assertEqual(resp_body, ( "--boundaryboundary\r\n" + # This is a little too strict; we don't actually care that the # headers are in this order, but the test is much more legible # this way. "Content-Type: application/peanut\r\n" + "Content-Range: bytes 88-133/1024\r\n" + "\r\n" + part1 + "\r\n" + "--boundaryboundary\r\n" "Content-Type: application/salted\r\n" + "Content-Range: bytes 500-532/1024\r\n" + "\r\n" + part2 + "\r\n" + "--boundaryboundary--")) def test_closed_part_iterator(self): print('test') useful_iter_mock = mock.MagicMock() useful_iter_mock.__iter__.return_value = [''] body_iter = utils.document_iters_to_http_response_body( iter([{'part_iter': useful_iter_mock}]), 'dontcare', multipart=False, logger=FakeLogger()) body = '' for s in body_iter: body += s self.assertEqual(body, '') useful_iter_mock.close.assert_called_once_with() # Calling "close" on the mock will now raise an AttributeError del useful_iter_mock.close body_iter = utils.document_iters_to_http_response_body( iter([{'part_iter': useful_iter_mock}]), 'dontcare', multipart=False, logger=FakeLogger()) body = '' for s in body_iter: body += s class TestPairs(unittest.TestCase): def test_pairs(self): items = [10, 20, 30, 40, 50, 60] got_pairs = set(utils.pairs(items)) self.assertEqual(got_pairs, set([(10, 20), (10, 30), (10, 40), (10, 50), (10, 60), (20, 30), (20, 40), (20, 50), (20, 60), (30, 40), (30, 50), (30, 60), (40, 50), (40, 60), (50, 60)])) class TestSocketStringParser(unittest.TestCase): def test_socket_string_parser(self): default = 1337 addrs = [('1.2.3.4', '1.2.3.4', default), ('1.2.3.4:5000', '1.2.3.4', 5000), ('[dead:beef::1]', 'dead:beef::1', default), ('[dead:beef::1]:5000', 'dead:beef::1', 5000), ('example.com', 'example.com', default), ('example.com:5000', 'example.com', 5000), ('foo.1-2-3.bar.com:5000', 'foo.1-2-3.bar.com', 5000), ('1.2.3.4:10:20', None, None), ('dead:beef::1:5000', None, None)] for addr, expected_host, expected_port in addrs: if expected_host: host, port = utils.parse_socket_string(addr, default) self.assertEqual(expected_host, host) self.assertEqual(expected_port, int(port)) else: with self.assertRaises(ValueError): utils.parse_socket_string(addr, default) class TestHashForFileFunction(unittest.TestCase): def setUp(self): self.tempfilename = tempfile.mktemp() def tearDown(self): try: os.unlink(self.tempfilename) except OSError: pass def test_hash_for_file_smallish(self): stub_data = b'some data' with open(self.tempfilename, 'wb') as fd: fd.write(stub_data) with mock.patch('swift.common.utils.md5') as mock_md5: mock_hasher = mock_md5.return_value rv = utils.md5_hash_for_file(self.tempfilename) self.assertTrue(mock_hasher.hexdigest.called) self.assertEqual(rv, mock_hasher.hexdigest.return_value) self.assertEqual([mock.call(stub_data)], mock_hasher.update.call_args_list) def test_hash_for_file_big(self): num_blocks = 10 block_size = utils.MD5_BLOCK_READ_BYTES truncate = 523 start_char = ord('a') expected_blocks = [chr(i).encode('utf8') * block_size for i in range(start_char, start_char + num_blocks)] full_data = b''.join(expected_blocks) trimmed_data = full_data[:-truncate] # sanity self.assertEqual(len(trimmed_data), block_size * num_blocks - truncate) with open(self.tempfilename, 'wb') as fd: fd.write(trimmed_data) with mock.patch('swift.common.utils.md5') as mock_md5: mock_hasher = mock_md5.return_value rv = utils.md5_hash_for_file(self.tempfilename) self.assertTrue(mock_hasher.hexdigest.called) self.assertEqual(rv, mock_hasher.hexdigest.return_value) self.assertEqual(num_blocks, len(mock_hasher.update.call_args_list)) found_blocks = [] for i, (expected_block, call) in enumerate(zip( expected_blocks, mock_hasher.update.call_args_list)): args, kwargs = call self.assertEqual(kwargs, {}) self.assertEqual(1, len(args)) block = args[0] if i < num_blocks - 1: self.assertEqual(block, expected_block) else: self.assertEqual(block, expected_block[:-truncate]) found_blocks.append(block) self.assertEqual(b''.join(found_blocks), trimmed_data) def test_hash_for_file_empty(self): with open(self.tempfilename, 'wb'): pass with mock.patch('swift.common.utils.md5') as mock_md5: mock_hasher = mock_md5.return_value rv = utils.md5_hash_for_file(self.tempfilename) self.assertTrue(mock_hasher.hexdigest.called) self.assertIs(rv, mock_hasher.hexdigest.return_value) self.assertEqual([], mock_hasher.update.call_args_list) def test_hash_for_file_brittle(self): data_to_expected_hash = { b'': 'd41d8cd98f00b204e9800998ecf8427e', b'some data': '1e50210a0202497fb79bc38b6ade6c34', (b'a' * 4096 * 10)[:-523]: '06a41551609656c85f14f659055dc6d3', } # unlike some other places where the concrete implementation really # matters for backwards compatibility these brittle tests are probably # not needed or justified, if a future maintainer rips them out later # they're probably doing the right thing failures = [] for stub_data, expected_hash in data_to_expected_hash.items(): with open(self.tempfilename, 'wb') as fd: fd.write(stub_data) rv = utils.md5_hash_for_file(self.tempfilename) try: self.assertEqual(expected_hash, rv) except AssertionError: trim_cap = 80 if len(stub_data) > trim_cap: stub_data = '%s...<truncated>' % stub_data[:trim_cap] failures.append('hash for %r was %s instead of expected %s' % ( stub_data, rv, expected_hash)) if failures: self.fail('Some data did not compute expected hash:\n' + '\n'.join(failures)) class TestSetSwiftDir(unittest.TestCase): def setUp(self): self.swift_dir = tempfile.mkdtemp() self.swift_conf = os.path.join(self.swift_dir, 'swift.conf') self.policy_name = ''.join(random.sample(string.ascii_letters, 20)) with open(self.swift_conf, "wt") as sc: sc.write(''' [swift-hash] swift_hash_path_suffix = changeme [storage-policy:0] name = default default = yes [storage-policy:1] name = %s ''' % self.policy_name) def tearDown(self): shutil.rmtree(self.swift_dir, ignore_errors=True) def test_set_swift_dir(self): set_swift_dir(None) reload_storage_policies() self.assertIsNone(POLICIES.get_by_name(self.policy_name)) set_swift_dir(self.swift_dir) reload_storage_policies() self.assertIsNotNone(POLICIES.get_by_name(self.policy_name)) class TestPipeMutex(unittest.TestCase): def setUp(self): self.mutex = utils.PipeMutex() def tearDown(self): self.mutex.close() def test_nonblocking(self): evt_lock1 = eventlet.event.Event() evt_lock2 = eventlet.event.Event() evt_unlock = eventlet.event.Event() def get_the_lock(): self.mutex.acquire() evt_lock1.send('got the lock') evt_lock2.wait() self.mutex.release() evt_unlock.send('released the lock') eventlet.spawn(get_the_lock) evt_lock1.wait() # Now, the other greenthread has the lock. self.assertFalse(self.mutex.acquire(blocking=False)) evt_lock2.send('please release the lock') evt_unlock.wait() # The other greenthread has released the lock. self.assertTrue(self.mutex.acquire(blocking=False)) def test_recursive(self): self.assertTrue(self.mutex.acquire(blocking=False)) self.assertTrue(self.mutex.acquire(blocking=False)) def try_acquire_lock(): return self.mutex.acquire(blocking=False) self.assertFalse(eventlet.spawn(try_acquire_lock).wait()) self.mutex.release() self.assertFalse(eventlet.spawn(try_acquire_lock).wait()) self.mutex.release() self.assertTrue(eventlet.spawn(try_acquire_lock).wait()) def test_release_without_acquire(self): self.assertRaises(RuntimeError, self.mutex.release) def test_too_many_releases(self): self.mutex.acquire() self.mutex.release() self.assertRaises(RuntimeError, self.mutex.release) def test_wrong_releaser(self): self.mutex.acquire() with quiet_eventlet_exceptions(): self.assertRaises(RuntimeError, eventlet.spawn(self.mutex.release).wait) def test_blocking(self): evt = eventlet.event.Event() sequence = [] def coro1(): eventlet.sleep(0) # let coro2 go self.mutex.acquire() sequence.append('coro1 acquire') evt.send('go') self.mutex.release() sequence.append('coro1 release') def coro2(): evt.wait() # wait for coro1 to start us self.mutex.acquire() sequence.append('coro2 acquire') self.mutex.release() sequence.append('coro2 release') c1 = eventlet.spawn(coro1) c2 = eventlet.spawn(coro2) c1.wait() c2.wait() self.assertEqual(sequence, [ 'coro1 acquire', 'coro1 release', 'coro2 acquire', 'coro2 release']) def test_blocking_tpool(self): # Note: this test's success isn't a guarantee that the mutex is # working. However, this test's failure means that the mutex is # definitely broken. sequence = [] def do_stuff(): n = 10 while n > 0: self.mutex.acquire() sequence.append("<") eventlet.sleep(0.0001) sequence.append(">") self.mutex.release() n -= 1 greenthread1 = eventlet.spawn(do_stuff) greenthread2 = eventlet.spawn(do_stuff) real_thread1 = eventlet.patcher.original('threading').Thread( target=do_stuff) real_thread1.start() real_thread2 = eventlet.patcher.original('threading').Thread( target=do_stuff) real_thread2.start() greenthread1.wait() greenthread2.wait() real_thread1.join() real_thread2.join() self.assertEqual(''.join(sequence), "<>" * 40) def test_blocking_preserves_ownership(self): pthread1_event = eventlet.patcher.original('threading').Event() pthread2_event1 = eventlet.patcher.original('threading').Event() pthread2_event2 = eventlet.patcher.original('threading').Event() thread_id = [] owner = [] def pthread1(): thread_id.append(id(eventlet.greenthread.getcurrent())) self.mutex.acquire() owner.append(self.mutex.owner) pthread2_event1.set() orig_os_write = utils.os.write def patched_os_write(*a, **kw): try: return orig_os_write(*a, **kw) finally: pthread1_event.wait() with mock.patch.object(utils.os, 'write', patched_os_write): self.mutex.release() pthread2_event2.set() def pthread2(): pthread2_event1.wait() # ensure pthread1 acquires lock first thread_id.append(id(eventlet.greenthread.getcurrent())) self.mutex.acquire() pthread1_event.set() pthread2_event2.wait() owner.append(self.mutex.owner) self.mutex.release() real_thread1 = eventlet.patcher.original('threading').Thread( target=pthread1) real_thread1.start() real_thread2 = eventlet.patcher.original('threading').Thread( target=pthread2) real_thread2.start() real_thread1.join() real_thread2.join() self.assertEqual(thread_id, owner) self.assertIsNone(self.mutex.owner) @classmethod def tearDownClass(cls): # PipeMutex turns this off when you instantiate one eventlet.debug.hub_prevent_multiple_readers(True) class TestDistributeEvenly(unittest.TestCase): def test_evenly_divided(self): out = utils.distribute_evenly(range(12), 3) self.assertEqual(out, [ [0, 3, 6, 9], [1, 4, 7, 10], [2, 5, 8, 11], ]) out = utils.distribute_evenly(range(12), 4) self.assertEqual(out, [ [0, 4, 8], [1, 5, 9], [2, 6, 10], [3, 7, 11], ]) def test_uneven(self): out = utils.distribute_evenly(range(11), 3) self.assertEqual(out, [ [0, 3, 6, 9], [1, 4, 7, 10], [2, 5, 8], ]) def test_just_one(self): out = utils.distribute_evenly(range(5), 1) self.assertEqual(out, [[0, 1, 2, 3, 4]]) def test_more_buckets_than_items(self): out = utils.distribute_evenly(range(5), 7) self.assertEqual(out, [[0], [1], [2], [3], [4], [], []]) if __name__ == '__main__': unittest.main()
apache-2.0
varunarya10/python-ironicclient
ironicclient/client.py
3
4613
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneclient.v2_0 import client as ksclient from ironicclient.common import utils from ironicclient import exc from ironicclient.openstack.common import gettextutils gettextutils.install('ironicclient') def _get_ksclient(**kwargs): """Get an endpoint and auth token from Keystone. :param kwargs: keyword args containing credentials: * username: name of user * password: user's password * auth_url: endpoint to authenticate against * insecure: allow insecure SSL (no cert verification) * tenant_{name|id}: name or ID of tenant """ return ksclient.Client(username=kwargs.get('username'), password=kwargs.get('password'), tenant_id=kwargs.get('tenant_id'), tenant_name=kwargs.get('tenant_name'), auth_url=kwargs.get('auth_url'), insecure=kwargs.get('insecure')) def _get_endpoint(client, **kwargs): """Get an endpoint using the provided keystone client.""" attr = None filter_value = None if kwargs.get('region_name'): attr = 'region' filter_value = kwargs.get('region_name') return client.service_catalog.url_for( service_type=kwargs.get('service_type') or 'baremetal', attr=attr, filter_value=filter_value, endpoint_type=kwargs.get('endpoint_type') or 'publicURL') def get_client(api_version, **kwargs): """Get an authenticated client, based on the credentials in args. :param api_version: the API version to use. Valid value: '1'. :param kwargs: keyword args containing credentials, either: * os_auth_token: pre-existing token to re-use * ironic_url: ironic API endpoint or: * os_username: name of user * os_password: user's password * os_auth_url: endpoint to authenticate against * insecure: allow insecure SSL (no cert verification) * os_tenant_{name|id}: name or ID of tenant """ if kwargs.get('os_auth_token') and kwargs.get('ironic_url'): token = kwargs.get('os_auth_token') endpoint = kwargs.get('ironic_url') auth_ref = None elif (kwargs.get('os_username') and kwargs.get('os_password') and kwargs.get('os_auth_url') and (kwargs.get('os_tenant_id') or kwargs.get('os_tenant_name'))): ks_kwargs = { 'username': kwargs.get('os_username'), 'password': kwargs.get('os_password'), 'tenant_id': kwargs.get('os_tenant_id'), 'tenant_name': kwargs.get('os_tenant_name'), 'auth_url': kwargs.get('os_auth_url'), 'service_type': kwargs.get('os_service_type'), 'endpoint_type': kwargs.get('os_endpoint_type'), 'insecure': kwargs.get('insecure'), } _ksclient = _get_ksclient(**ks_kwargs) token = (kwargs.get('os_auth_token') if kwargs.get('os_auth_token') else _ksclient.auth_token) ks_kwargs['region_name'] = kwargs.get('os_region_name') endpoint = (kwargs.get('ironic_url') or _get_endpoint(_ksclient, **ks_kwargs)) auth_ref = _ksclient.auth_ref else: e = (_('Must provide Keystone credentials or user-defined endpoint ' 'and token')) raise exc.AmbiguousAuthSystem(e) cli_kwargs = { 'token': token, 'insecure': kwargs.get('insecure'), 'timeout': kwargs.get('timeout'), 'ca_file': kwargs.get('ca_file'), 'cert_file': kwargs.get('cert_file'), 'key_file': kwargs.get('key_file'), 'auth_ref': auth_ref, } return Client(api_version, endpoint, **cli_kwargs) def Client(version, *args, **kwargs): module = utils.import_versioned_module(version, 'client') client_class = getattr(module, 'Client') return client_class(*args, **kwargs)
apache-2.0
amohanta/miasm
miasm2/expression/expression_helper.py
5
18131
# # Copyright (C) 2011 EADS France, Fabrice Desclaux <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Expressions manipulation functions import itertools import collections import random import string import miasm2.expression.expression as m2_expr def parity(a): tmp = (a) & 0xFFL cpt = 1 while tmp != 0: cpt ^= tmp & 1 tmp >>= 1 return cpt def merge_sliceto_slice(args): sources = {} non_slice = {} sources_int = {} for a in args: if isinstance(a[0], m2_expr.ExprInt): # sources_int[a.start] = a # copy ExprInt because we will inplace modify arg just below # /!\ TODO XXX never ever modify inplace args... sources_int[a[1]] = (m2_expr.ExprInt_fromsize(a[2] - a[1], a[0].arg.__class__( a[0].arg)), a[1], a[2]) elif isinstance(a[0], m2_expr.ExprSlice): if not a[0].arg in sources: sources[a[0].arg] = [] sources[a[0].arg].append(a) else: non_slice[a[1]] = a # find max stop to determine size max_size = None for a in args: if max_size is None or max_size < a[2]: max_size = a[2] # first simplify all num slices final_sources = [] sorted_s = [] for x in sources_int.values(): x = list(x) # mask int v = x[0].arg & ((1 << (x[2] - x[1])) - 1) x[0] = m2_expr.ExprInt_from(x[0], v) x = tuple(x) sorted_s.append((x[1], x)) sorted_s.sort() while sorted_s: start, v = sorted_s.pop() out = [m2_expr.ExprInt(v[0].arg), v[1], v[2]] size = v[2] - v[1] while sorted_s: if sorted_s[-1][1][2] != start: break s_start, s_stop = sorted_s[-1][1][1], sorted_s[-1][1][2] size += s_stop - s_start a = m2_expr.mod_size2uint[size]( (int(out[0].arg) << (out[1] - s_start)) + int(sorted_s[-1][1][0].arg)) out[0] = m2_expr.ExprInt(a) sorted_s.pop() out[1] = s_start out[0] = m2_expr.ExprInt_fromsize(size, out[0].arg) final_sources.append((start, out)) final_sources_int = final_sources # check if same sources have corresponding start/stop # is slice AND is sliceto simp_sources = [] for args in sources.values(): final_sources = [] sorted_s = [] for x in args: sorted_s.append((x[1], x)) sorted_s.sort() while sorted_s: start, v = sorted_s.pop() ee = v[0].arg[v[0].start:v[0].stop] out = ee, v[1], v[2] while sorted_s: if sorted_s[-1][1][2] != start: break if sorted_s[-1][1][0].stop != out[0].start: break start = sorted_s[-1][1][1] # out[0].start = sorted_s[-1][1][0].start o_e, _, o_stop = out o1, o2 = sorted_s[-1][1][0].start, o_e.stop o_e = o_e.arg[o1:o2] out = o_e, start, o_stop # update _size # out[0]._size = out[0].stop-out[0].start sorted_s.pop() out = out[0], start, out[2] final_sources.append((start, out)) simp_sources += final_sources simp_sources += final_sources_int for i, v in non_slice.items(): simp_sources.append((i, v)) simp_sources.sort() simp_sources = [x[1] for x in simp_sources] return simp_sources op_propag_cst = ['+', '*', '^', '&', '|', '>>', '<<', "a>>", ">>>", "<<<", "/", "%", 'idiv', 'imod', 'umod', 'udiv'] def is_pure_int(e): """ return True if expr is only composed with integers /!\ ExprCond returns True is src1 and src2 are integers """ def modify_cond(e): if isinstance(e, m2_expr.ExprCond): return e.src1 | e.src2 return e def find_int(e, s): if isinstance(e, m2_expr.ExprId) or isinstance(e, m2_expr.ExprMem): s.add(e) return e s = set() new_e = e.visit(modify_cond) new_e.visit(lambda x: find_int(x, s)) if s: return False return True def is_int_or_cond_src_int(e): if isinstance(e, m2_expr.ExprInt): return True if isinstance(e, m2_expr.ExprCond): return (isinstance(e.src1, m2_expr.ExprInt) and isinstance(e.src2, m2_expr.ExprInt)) return False def fast_unify(seq, idfun=None): # order preserving unifying list function if idfun is None: idfun = lambda x: x seen = {} result = [] for item in seq: marker = idfun(item) if marker in seen: continue seen[marker] = 1 result.append(item) return result def get_missing_interval(all_intervals, i_min=0, i_max=32): """Return a list of missing interval in all_interval @all_interval: list of (int, int) @i_min: int, minimal missing interval bound @i_max: int, maximal missing interval bound""" my_intervals = all_intervals[:] my_intervals.sort() my_intervals.append((i_max, i_max)) missing_i = [] last_pos = i_min for start, stop in my_intervals: if last_pos != start: missing_i.append((last_pos, start)) last_pos = stop return missing_i class Variables_Identifier(object): """Identify variables in an expression. Returns: - variables with their corresponding values - original expression with variables translated """ # Attribute used to distinguish created variables from original ones is_var_ident = "is_var_ident" def __init__(self, expr, var_prefix="v"): """Set the expression @expr to handle and launch variable identification process @expr: Expr instance @var_prefix: (optional) prefix of the variable name, default is 'v'""" # Init self.var_indice = itertools.count() self.var_asked = set() self._vars = {} # VarID -> Expr self.var_prefix = var_prefix # Launch recurrence self.find_variables_rec(expr) # Compute inter-variable dependencies has_change = True while has_change: has_change = False for var_id, var_value in self._vars.iteritems(): cur = var_value # Do not replace with itself to_replace = {v_val:v_id for v_id, v_val in self._vars.iteritems() if v_id != var_id} var_value = var_value.replace_expr(to_replace) if cur != var_value: # Force @self._vars update has_change = True self._vars[var_id] = var_value break # Replace in the original equation self._equation = expr.replace_expr({v_val: v_id for v_id, v_val in self._vars.iteritems()}) # Compute variables dependencies self._vars_ordered = collections.OrderedDict() todo = set(self._vars.iterkeys()) needs = {} ## Build initial needs for var_id, var_expr in self._vars.iteritems(): ### Handle corner cases while using Variable Identifier on an ### already computed equation needs[var_id] = [var_name for var_name in var_expr.get_r(mem_read=True) if self.is_var_identifier(var_name) and \ var_name in todo and \ var_name != var_id] ## Build order list while todo: done = set() for var_id in todo: all_met = True for need in needs[var_id]: if need not in self._vars_ordered: # A dependency is not met all_met = False break if not all_met: continue # All dependencies are already met, add current self._vars_ordered[var_id] = self._vars[var_id] done.add(var_id) # Update the todo list for element_done in done: todo.remove(element_done) @classmethod def is_var_identifier(cls, expr): "Return True iff @expr is a variable identifier" if not isinstance(expr, m2_expr.ExprId): return False return hasattr(expr, cls.is_var_ident) and \ getattr(expr, cls.is_var_ident) == True def find_variables_rec(self, expr): """Recursive method called by find_variable to expand @expr. Set @var_names and @var_values. This implementation is faster than an expression visitor because we do not rebuild each expression. """ if (expr in self.var_asked): # Expr has already been asked if (expr not in self._vars.values()): # Create var identifier = m2_expr.ExprId("%s%s" % (self.var_prefix, self.var_indice.next()), size = expr.size) setattr(identifier, self.__class__.is_var_ident, True) self._vars[identifier] = expr # Recursion stop case return else: # First time for @expr self.var_asked.add(expr) if isinstance(expr, m2_expr.ExprOp): for a in expr.args: self.find_variables_rec(a) elif isinstance(expr, m2_expr.ExprInt): pass elif isinstance(expr, m2_expr.ExprId): pass elif isinstance(expr, m2_expr.ExprMem): self.find_variables_rec(expr.arg) elif isinstance(expr, m2_expr.ExprCompose): for a in expr.args: self.find_variables_rec(list(a)[0]) elif isinstance(expr, m2_expr.ExprSlice): self.find_variables_rec(expr.arg) elif isinstance(expr, m2_expr.ExprCond): self.find_variables_rec(expr.cond) self.find_variables_rec(expr.src1) self.find_variables_rec(expr.src2) else: raise NotImplementedError("Type not handled: %s" % expr) @property def vars(self): return self._vars_ordered @property def equation(self): return self._equation def __str__(self): "Display variables and final equation" out = "" for var_id, var_expr in self.vars.iteritems(): out += "%s = %s\n" % (var_id, var_expr) out += "Final: %s" % self.equation return out class ExprRandom(object): """Return an expression randomly generated""" # Identifiers length identifier_len = 5 # Identifiers' name charset identifier_charset = string.letters # Number max value number_max = 0xFFFFFFFF # Available operations operations_by_args_number = {1: ["-"], 2: ["<<", "<<<", ">>", ">>>"], "2+": ["+", "*", "&", "|", "^"], } # Maximum number of argument for operations operations_max_args_number = 5 # If set, output expression is a perfect tree perfect_tree = True # Max argument size in slice, relative to slice size slice_add_size = 10 # Maximum number of layer in compose compose_max_layer = 5 # Maximum size of memory address in bits memory_max_address_size = 32 # Re-use already generated elements to mimic a more realistic behavior reuse_element = True generated_elements = {} # (depth, size) -> [Expr] @classmethod def identifier(cls, size=32): """Return a random identifier @size: (optional) identifier size """ return m2_expr.ExprId("".join([random.choice(cls.identifier_charset) for _ in xrange(cls.identifier_len)]), size=size) @classmethod def number(cls, size=32): """Return a random number @size: (optional) number max bits """ num = random.randint(0, cls.number_max % (2**size)) return m2_expr.ExprInt_fromsize(size, num) @classmethod def atomic(cls, size=32): """Return an atomic Expression @size: (optional) Expr size """ available_funcs = [cls.identifier, cls.number] return random.choice(available_funcs)(size=size) @classmethod def operation(cls, size=32, depth=1): """Return an ExprOp @size: (optional) Operation size @depth: (optional) Expression depth """ operand_type = random.choice(cls.operations_by_args_number.keys()) if isinstance(operand_type, str) and "+" in operand_type: number_args = random.randint(int(operand_type[:-1]), cls.operations_max_args_number) else: number_args = operand_type args = [cls._gen(size=size, depth=depth - 1) for _ in xrange(number_args)] operand = random.choice(cls.operations_by_args_number[operand_type]) return m2_expr.ExprOp(operand, *args) @classmethod def slice(cls, size=32, depth=1): """Return an ExprSlice @size: (optional) Operation size @depth: (optional) Expression depth """ start = random.randint(0, size) stop = start + size return cls._gen(size=random.randint(stop, stop + cls.slice_add_size), depth=depth - 1)[start:stop] @classmethod def compose(cls, size=32, depth=1): """Return an ExprCompose @size: (optional) Operation size @depth: (optional) Expression depth """ # First layer upper_bound = random.randint(1, size) args = [(cls._gen(size=upper_bound, depth=depth - 1), 0, upper_bound)] # Next layers while (upper_bound < size): if len(args) == (cls.compose_max_layer - 1): # We reach the maximum size upper_bound = size else: upper_bound = random.randint(args[-1][-1] + 1, size) args.append((cls._gen(size=upper_bound - args[-1][-1]), args[-1][-1], upper_bound)) return m2_expr.ExprCompose(args) @classmethod def memory(cls, size=32, depth=1): """Return an ExprMem @size: (optional) Operation size @depth: (optional) Expression depth """ address_size = random.randint(1, cls.memory_max_address_size) return m2_expr.ExprMem(cls._gen(size=address_size, depth=depth - 1), size=size) @classmethod def _gen(cls, size=32, depth=1): """Internal function for generating sub-expression according to options @size: (optional) Operation size @depth: (optional) Expression depth /!\ @generated_elements is left modified """ # Perfect tree handling if not cls.perfect_tree: depth = random.randint(max(0, depth - 2), depth) # Element re-use if cls.reuse_element and random.choice([True, False]) and \ (depth, size) in cls.generated_elements: return random.choice(cls.generated_elements[(depth, size)]) # Recursion stop if depth == 0: return cls.atomic(size=size) # Build a more complex expression available_funcs = [cls.operation, cls.slice, cls.compose, cls.memory] gen = random.choice(available_funcs)(size=size, depth=depth) # Save it new_value = cls.generated_elements.get((depth, size), []) + [gen] cls.generated_elements[(depth, size)] = new_value return gen @classmethod def get(cls, size=32, depth=1, clean=True): """Return a randomly generated expression @size: (optional) Operation size @depth: (optional) Expression depth @clean: (optional) Clean expression cache between two calls """ # Init state if clean: cls.generated_elements = {} # Get an element got = cls._gen(size=size, depth=depth) # Clear state if clean: cls.generated_elements = {} return got def _expr_cmp_gen(arg1, arg2): return (arg2 - arg1) ^ ((arg2 ^ arg1) & ((arg2 - arg1) ^ arg2)) def expr_cmpu(arg1, arg2): """ Returns a one bit long Expression: * 1 if @arg1 is strictly greater than @arg2 (unsigned) * 0 otherwise. """ return (_expr_cmp_gen(arg1, arg2) ^ arg2 ^ arg1).msb() def expr_cmps(arg1, arg2): """ Returns a one bit long Expression: * 1 if @arg1 is strictly greater than @arg2 (signed) * 0 otherwise. """ return _expr_cmp_gen(arg1, arg2).msb()
gpl-2.0
owaiskhan/Retransmission-Combining
gnuradio-core/src/python/gnuradio/gr/qa_nlog10.py
11
1510
#!/usr/bin/env python # # Copyright 2005,2007,2010 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, gr_unittest class test_nlog10(gr_unittest.TestCase): def setUp (self): self.tb = gr.top_block () def tearDown (self): self.tb = None def test_001(self): src_data = (-10, 0, 10, 100, 1000, 10000, 100000) expected_result = (-180, -180, 10, 20, 30, 40, 50) src = gr.vector_source_f(src_data) op = gr.nlog10_ff(10) dst = gr.vector_sink_f() self.tb.connect (src, op, dst) self.tb.run() result_data = dst.data() self.assertFloatTuplesAlmostEqual (expected_result, result_data) if __name__ == '__main__': gr_unittest.run(test_nlog10, "test_nlog10.xml")
gpl-3.0
Hoekz/hackness-monster
venv/lib/python2.7/site-packages/jinja2/parser.py
336
35442
# -*- coding: utf-8 -*- """ jinja2.parser ~~~~~~~~~~~~~ Implements the template parser. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ from jinja2 import nodes from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError from jinja2.lexer import describe_token, describe_token_expr from jinja2._compat import imap _statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print', 'macro', 'include', 'from', 'import', 'set']) _compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq']) class Parser(object): """This is the central parsing class Jinja2 uses. It's passed to extensions and can be used to parse expressions or statements. """ def __init__(self, environment, source, name=None, filename=None, state=None): self.environment = environment self.stream = environment._tokenize(source, name, filename, state) self.name = name self.filename = filename self.closed = False self.extensions = {} for extension in environment.iter_extensions(): for tag in extension.tags: self.extensions[tag] = extension.parse self._last_identifier = 0 self._tag_stack = [] self._end_token_stack = [] def fail(self, msg, lineno=None, exc=TemplateSyntaxError): """Convenience method that raises `exc` with the message, passed line number or last line number as well as the current name and filename. """ if lineno is None: lineno = self.stream.current.lineno raise exc(msg, lineno, self.name, self.filename) def _fail_ut_eof(self, name, end_token_stack, lineno): expected = [] for exprs in end_token_stack: expected.extend(imap(describe_token_expr, exprs)) if end_token_stack: currently_looking = ' or '.join( "'%s'" % describe_token_expr(expr) for expr in end_token_stack[-1]) else: currently_looking = None if name is None: message = ['Unexpected end of template.'] else: message = ['Encountered unknown tag \'%s\'.' % name] if currently_looking: if name is not None and name in expected: message.append('You probably made a nesting mistake. Jinja ' 'is expecting this tag, but currently looking ' 'for %s.' % currently_looking) else: message.append('Jinja was looking for the following tags: ' '%s.' % currently_looking) if self._tag_stack: message.append('The innermost block that needs to be ' 'closed is \'%s\'.' % self._tag_stack[-1]) self.fail(' '.join(message), lineno) def fail_unknown_tag(self, name, lineno=None): """Called if the parser encounters an unknown tag. Tries to fail with a human readable error message that could help to identify the problem. """ return self._fail_ut_eof(name, self._end_token_stack, lineno) def fail_eof(self, end_tokens=None, lineno=None): """Like fail_unknown_tag but for end of template situations.""" stack = list(self._end_token_stack) if end_tokens is not None: stack.append(end_tokens) return self._fail_ut_eof(None, stack, lineno) def is_tuple_end(self, extra_end_rules=None): """Are we at the end of a tuple?""" if self.stream.current.type in ('variable_end', 'block_end', 'rparen'): return True elif extra_end_rules is not None: return self.stream.current.test_any(extra_end_rules) return False def free_identifier(self, lineno=None): """Return a new free identifier as :class:`~jinja2.nodes.InternalName`.""" self._last_identifier += 1 rv = object.__new__(nodes.InternalName) nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno) return rv def parse_statement(self): """Parse a single statement.""" token = self.stream.current if token.type != 'name': self.fail('tag name expected', token.lineno) self._tag_stack.append(token.value) pop_tag = True try: if token.value in _statement_keywords: return getattr(self, 'parse_' + self.stream.current.value)() if token.value == 'call': return self.parse_call_block() if token.value == 'filter': return self.parse_filter_block() ext = self.extensions.get(token.value) if ext is not None: return ext(self) # did not work out, remove the token we pushed by accident # from the stack so that the unknown tag fail function can # produce a proper error message. self._tag_stack.pop() pop_tag = False self.fail_unknown_tag(token.value, token.lineno) finally: if pop_tag: self._tag_stack.pop() def parse_statements(self, end_tokens, drop_needle=False): """Parse multiple statements into a list until one of the end tokens is reached. This is used to parse the body of statements as it also parses template data if appropriate. The parser checks first if the current token is a colon and skips it if there is one. Then it checks for the block end and parses until if one of the `end_tokens` is reached. Per default the active token in the stream at the end of the call is the matched end token. If this is not wanted `drop_needle` can be set to `True` and the end token is removed. """ # the first token may be a colon for python compatibility self.stream.skip_if('colon') # in the future it would be possible to add whole code sections # by adding some sort of end of statement token and parsing those here. self.stream.expect('block_end') result = self.subparse(end_tokens) # we reached the end of the template too early, the subparser # does not check for this, so we do that now if self.stream.current.type == 'eof': self.fail_eof(end_tokens) if drop_needle: next(self.stream) return result def parse_set(self): """Parse an assign statement.""" lineno = next(self.stream).lineno target = self.parse_assign_target() if self.stream.skip_if('assign'): expr = self.parse_tuple() return nodes.Assign(target, expr, lineno=lineno) body = self.parse_statements(('name:endset',), drop_needle=True) return nodes.AssignBlock(target, body, lineno=lineno) def parse_for(self): """Parse a for loop.""" lineno = self.stream.expect('name:for').lineno target = self.parse_assign_target(extra_end_rules=('name:in',)) self.stream.expect('name:in') iter = self.parse_tuple(with_condexpr=False, extra_end_rules=('name:recursive',)) test = None if self.stream.skip_if('name:if'): test = self.parse_expression() recursive = self.stream.skip_if('name:recursive') body = self.parse_statements(('name:endfor', 'name:else')) if next(self.stream).value == 'endfor': else_ = [] else: else_ = self.parse_statements(('name:endfor',), drop_needle=True) return nodes.For(target, iter, body, else_, test, recursive, lineno=lineno) def parse_if(self): """Parse an if construct.""" node = result = nodes.If(lineno=self.stream.expect('name:if').lineno) while 1: node.test = self.parse_tuple(with_condexpr=False) node.body = self.parse_statements(('name:elif', 'name:else', 'name:endif')) token = next(self.stream) if token.test('name:elif'): new_node = nodes.If(lineno=self.stream.current.lineno) node.else_ = [new_node] node = new_node continue elif token.test('name:else'): node.else_ = self.parse_statements(('name:endif',), drop_needle=True) else: node.else_ = [] break return result def parse_block(self): node = nodes.Block(lineno=next(self.stream).lineno) node.name = self.stream.expect('name').value node.scoped = self.stream.skip_if('name:scoped') # common problem people encounter when switching from django # to jinja. we do not support hyphens in block names, so let's # raise a nicer error message in that case. if self.stream.current.type == 'sub': self.fail('Block names in Jinja have to be valid Python ' 'identifiers and may not contain hyphens, use an ' 'underscore instead.') node.body = self.parse_statements(('name:endblock',), drop_needle=True) self.stream.skip_if('name:' + node.name) return node def parse_extends(self): node = nodes.Extends(lineno=next(self.stream).lineno) node.template = self.parse_expression() return node def parse_import_context(self, node, default): if self.stream.current.test_any('name:with', 'name:without') and \ self.stream.look().test('name:context'): node.with_context = next(self.stream).value == 'with' self.stream.skip() else: node.with_context = default return node def parse_include(self): node = nodes.Include(lineno=next(self.stream).lineno) node.template = self.parse_expression() if self.stream.current.test('name:ignore') and \ self.stream.look().test('name:missing'): node.ignore_missing = True self.stream.skip(2) else: node.ignore_missing = False return self.parse_import_context(node, True) def parse_import(self): node = nodes.Import(lineno=next(self.stream).lineno) node.template = self.parse_expression() self.stream.expect('name:as') node.target = self.parse_assign_target(name_only=True).name return self.parse_import_context(node, False) def parse_from(self): node = nodes.FromImport(lineno=next(self.stream).lineno) node.template = self.parse_expression() self.stream.expect('name:import') node.names = [] def parse_context(): if self.stream.current.value in ('with', 'without') and \ self.stream.look().test('name:context'): node.with_context = next(self.stream).value == 'with' self.stream.skip() return True return False while 1: if node.names: self.stream.expect('comma') if self.stream.current.type == 'name': if parse_context(): break target = self.parse_assign_target(name_only=True) if target.name.startswith('_'): self.fail('names starting with an underline can not ' 'be imported', target.lineno, exc=TemplateAssertionError) if self.stream.skip_if('name:as'): alias = self.parse_assign_target(name_only=True) node.names.append((target.name, alias.name)) else: node.names.append(target.name) if parse_context() or self.stream.current.type != 'comma': break else: break if not hasattr(node, 'with_context'): node.with_context = False self.stream.skip_if('comma') return node def parse_signature(self, node): node.args = args = [] node.defaults = defaults = [] self.stream.expect('lparen') while self.stream.current.type != 'rparen': if args: self.stream.expect('comma') arg = self.parse_assign_target(name_only=True) arg.set_ctx('param') if self.stream.skip_if('assign'): defaults.append(self.parse_expression()) elif defaults: self.fail('non-default argument follows default argument') args.append(arg) self.stream.expect('rparen') def parse_call_block(self): node = nodes.CallBlock(lineno=next(self.stream).lineno) if self.stream.current.type == 'lparen': self.parse_signature(node) else: node.args = [] node.defaults = [] node.call = self.parse_expression() if not isinstance(node.call, nodes.Call): self.fail('expected call', node.lineno) node.body = self.parse_statements(('name:endcall',), drop_needle=True) return node def parse_filter_block(self): node = nodes.FilterBlock(lineno=next(self.stream).lineno) node.filter = self.parse_filter(None, start_inline=True) node.body = self.parse_statements(('name:endfilter',), drop_needle=True) return node def parse_macro(self): node = nodes.Macro(lineno=next(self.stream).lineno) node.name = self.parse_assign_target(name_only=True).name self.parse_signature(node) node.body = self.parse_statements(('name:endmacro',), drop_needle=True) return node def parse_print(self): node = nodes.Output(lineno=next(self.stream).lineno) node.nodes = [] while self.stream.current.type != 'block_end': if node.nodes: self.stream.expect('comma') node.nodes.append(self.parse_expression()) return node def parse_assign_target(self, with_tuple=True, name_only=False, extra_end_rules=None): """Parse an assignment target. As Jinja2 allows assignments to tuples, this function can parse all allowed assignment targets. Per default assignments to tuples are parsed, that can be disable however by setting `with_tuple` to `False`. If only assignments to names are wanted `name_only` can be set to `True`. The `extra_end_rules` parameter is forwarded to the tuple parsing function. """ if name_only: token = self.stream.expect('name') target = nodes.Name(token.value, 'store', lineno=token.lineno) else: if with_tuple: target = self.parse_tuple(simplified=True, extra_end_rules=extra_end_rules) else: target = self.parse_primary() target.set_ctx('store') if not target.can_assign(): self.fail('can\'t assign to %r' % target.__class__. __name__.lower(), target.lineno) return target def parse_expression(self, with_condexpr=True): """Parse an expression. Per default all expressions are parsed, if the optional `with_condexpr` parameter is set to `False` conditional expressions are not parsed. """ if with_condexpr: return self.parse_condexpr() return self.parse_or() def parse_condexpr(self): lineno = self.stream.current.lineno expr1 = self.parse_or() while self.stream.skip_if('name:if'): expr2 = self.parse_or() if self.stream.skip_if('name:else'): expr3 = self.parse_condexpr() else: expr3 = None expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno) lineno = self.stream.current.lineno return expr1 def parse_or(self): lineno = self.stream.current.lineno left = self.parse_and() while self.stream.skip_if('name:or'): right = self.parse_and() left = nodes.Or(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_and(self): lineno = self.stream.current.lineno left = self.parse_not() while self.stream.skip_if('name:and'): right = self.parse_not() left = nodes.And(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_not(self): if self.stream.current.test('name:not'): lineno = next(self.stream).lineno return nodes.Not(self.parse_not(), lineno=lineno) return self.parse_compare() def parse_compare(self): lineno = self.stream.current.lineno expr = self.parse_add() ops = [] while 1: token_type = self.stream.current.type if token_type in _compare_operators: next(self.stream) ops.append(nodes.Operand(token_type, self.parse_add())) elif self.stream.skip_if('name:in'): ops.append(nodes.Operand('in', self.parse_add())) elif (self.stream.current.test('name:not') and self.stream.look().test('name:in')): self.stream.skip(2) ops.append(nodes.Operand('notin', self.parse_add())) else: break lineno = self.stream.current.lineno if not ops: return expr return nodes.Compare(expr, ops, lineno=lineno) def parse_add(self): lineno = self.stream.current.lineno left = self.parse_sub() while self.stream.current.type == 'add': next(self.stream) right = self.parse_sub() left = nodes.Add(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_sub(self): lineno = self.stream.current.lineno left = self.parse_concat() while self.stream.current.type == 'sub': next(self.stream) right = self.parse_concat() left = nodes.Sub(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_concat(self): lineno = self.stream.current.lineno args = [self.parse_mul()] while self.stream.current.type == 'tilde': next(self.stream) args.append(self.parse_mul()) if len(args) == 1: return args[0] return nodes.Concat(args, lineno=lineno) def parse_mul(self): lineno = self.stream.current.lineno left = self.parse_div() while self.stream.current.type == 'mul': next(self.stream) right = self.parse_div() left = nodes.Mul(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_div(self): lineno = self.stream.current.lineno left = self.parse_floordiv() while self.stream.current.type == 'div': next(self.stream) right = self.parse_floordiv() left = nodes.Div(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_floordiv(self): lineno = self.stream.current.lineno left = self.parse_mod() while self.stream.current.type == 'floordiv': next(self.stream) right = self.parse_mod() left = nodes.FloorDiv(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_mod(self): lineno = self.stream.current.lineno left = self.parse_pow() while self.stream.current.type == 'mod': next(self.stream) right = self.parse_pow() left = nodes.Mod(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_pow(self): lineno = self.stream.current.lineno left = self.parse_unary() while self.stream.current.type == 'pow': next(self.stream) right = self.parse_unary() left = nodes.Pow(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_unary(self, with_filter=True): token_type = self.stream.current.type lineno = self.stream.current.lineno if token_type == 'sub': next(self.stream) node = nodes.Neg(self.parse_unary(False), lineno=lineno) elif token_type == 'add': next(self.stream) node = nodes.Pos(self.parse_unary(False), lineno=lineno) else: node = self.parse_primary() node = self.parse_postfix(node) if with_filter: node = self.parse_filter_expr(node) return node def parse_primary(self): token = self.stream.current if token.type == 'name': if token.value in ('true', 'false', 'True', 'False'): node = nodes.Const(token.value in ('true', 'True'), lineno=token.lineno) elif token.value in ('none', 'None'): node = nodes.Const(None, lineno=token.lineno) else: node = nodes.Name(token.value, 'load', lineno=token.lineno) next(self.stream) elif token.type == 'string': next(self.stream) buf = [token.value] lineno = token.lineno while self.stream.current.type == 'string': buf.append(self.stream.current.value) next(self.stream) node = nodes.Const(''.join(buf), lineno=lineno) elif token.type in ('integer', 'float'): next(self.stream) node = nodes.Const(token.value, lineno=token.lineno) elif token.type == 'lparen': next(self.stream) node = self.parse_tuple(explicit_parentheses=True) self.stream.expect('rparen') elif token.type == 'lbracket': node = self.parse_list() elif token.type == 'lbrace': node = self.parse_dict() else: self.fail("unexpected '%s'" % describe_token(token), token.lineno) return node def parse_tuple(self, simplified=False, with_condexpr=True, extra_end_rules=None, explicit_parentheses=False): """Works like `parse_expression` but if multiple expressions are delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created. This method could also return a regular expression instead of a tuple if no commas where found. The default parsing mode is a full tuple. If `simplified` is `True` only names and literals are parsed. The `no_condexpr` parameter is forwarded to :meth:`parse_expression`. Because tuples do not require delimiters and may end in a bogus comma an extra hint is needed that marks the end of a tuple. For example for loops support tuples between `for` and `in`. In that case the `extra_end_rules` is set to ``['name:in']``. `explicit_parentheses` is true if the parsing was triggered by an expression in parentheses. This is used to figure out if an empty tuple is a valid expression or not. """ lineno = self.stream.current.lineno if simplified: parse = self.parse_primary elif with_condexpr: parse = self.parse_expression else: parse = lambda: self.parse_expression(with_condexpr=False) args = [] is_tuple = False while 1: if args: self.stream.expect('comma') if self.is_tuple_end(extra_end_rules): break args.append(parse()) if self.stream.current.type == 'comma': is_tuple = True else: break lineno = self.stream.current.lineno if not is_tuple: if args: return args[0] # if we don't have explicit parentheses, an empty tuple is # not a valid expression. This would mean nothing (literally # nothing) in the spot of an expression would be an empty # tuple. if not explicit_parentheses: self.fail('Expected an expression, got \'%s\'' % describe_token(self.stream.current)) return nodes.Tuple(args, 'load', lineno=lineno) def parse_list(self): token = self.stream.expect('lbracket') items = [] while self.stream.current.type != 'rbracket': if items: self.stream.expect('comma') if self.stream.current.type == 'rbracket': break items.append(self.parse_expression()) self.stream.expect('rbracket') return nodes.List(items, lineno=token.lineno) def parse_dict(self): token = self.stream.expect('lbrace') items = [] while self.stream.current.type != 'rbrace': if items: self.stream.expect('comma') if self.stream.current.type == 'rbrace': break key = self.parse_expression() self.stream.expect('colon') value = self.parse_expression() items.append(nodes.Pair(key, value, lineno=key.lineno)) self.stream.expect('rbrace') return nodes.Dict(items, lineno=token.lineno) def parse_postfix(self, node): while 1: token_type = self.stream.current.type if token_type == 'dot' or token_type == 'lbracket': node = self.parse_subscript(node) # calls are valid both after postfix expressions (getattr # and getitem) as well as filters and tests elif token_type == 'lparen': node = self.parse_call(node) else: break return node def parse_filter_expr(self, node): while 1: token_type = self.stream.current.type if token_type == 'pipe': node = self.parse_filter(node) elif token_type == 'name' and self.stream.current.value == 'is': node = self.parse_test(node) # calls are valid both after postfix expressions (getattr # and getitem) as well as filters and tests elif token_type == 'lparen': node = self.parse_call(node) else: break return node def parse_subscript(self, node): token = next(self.stream) if token.type == 'dot': attr_token = self.stream.current next(self.stream) if attr_token.type == 'name': return nodes.Getattr(node, attr_token.value, 'load', lineno=token.lineno) elif attr_token.type != 'integer': self.fail('expected name or number', attr_token.lineno) arg = nodes.Const(attr_token.value, lineno=attr_token.lineno) return nodes.Getitem(node, arg, 'load', lineno=token.lineno) if token.type == 'lbracket': args = [] while self.stream.current.type != 'rbracket': if args: self.stream.expect('comma') args.append(self.parse_subscribed()) self.stream.expect('rbracket') if len(args) == 1: arg = args[0] else: arg = nodes.Tuple(args, 'load', lineno=token.lineno) return nodes.Getitem(node, arg, 'load', lineno=token.lineno) self.fail('expected subscript expression', self.lineno) def parse_subscribed(self): lineno = self.stream.current.lineno if self.stream.current.type == 'colon': next(self.stream) args = [None] else: node = self.parse_expression() if self.stream.current.type != 'colon': return node next(self.stream) args = [node] if self.stream.current.type == 'colon': args.append(None) elif self.stream.current.type not in ('rbracket', 'comma'): args.append(self.parse_expression()) else: args.append(None) if self.stream.current.type == 'colon': next(self.stream) if self.stream.current.type not in ('rbracket', 'comma'): args.append(self.parse_expression()) else: args.append(None) else: args.append(None) return nodes.Slice(lineno=lineno, *args) def parse_call(self, node): token = self.stream.expect('lparen') args = [] kwargs = [] dyn_args = dyn_kwargs = None require_comma = False def ensure(expr): if not expr: self.fail('invalid syntax for function call expression', token.lineno) while self.stream.current.type != 'rparen': if require_comma: self.stream.expect('comma') # support for trailing comma if self.stream.current.type == 'rparen': break if self.stream.current.type == 'mul': ensure(dyn_args is None and dyn_kwargs is None) next(self.stream) dyn_args = self.parse_expression() elif self.stream.current.type == 'pow': ensure(dyn_kwargs is None) next(self.stream) dyn_kwargs = self.parse_expression() else: ensure(dyn_args is None and dyn_kwargs is None) if self.stream.current.type == 'name' and \ self.stream.look().type == 'assign': key = self.stream.current.value self.stream.skip(2) value = self.parse_expression() kwargs.append(nodes.Keyword(key, value, lineno=value.lineno)) else: ensure(not kwargs) args.append(self.parse_expression()) require_comma = True self.stream.expect('rparen') if node is None: return args, kwargs, dyn_args, dyn_kwargs return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno) def parse_filter(self, node, start_inline=False): while self.stream.current.type == 'pipe' or start_inline: if not start_inline: next(self.stream) token = self.stream.expect('name') name = token.value while self.stream.current.type == 'dot': next(self.stream) name += '.' + self.stream.expect('name').value if self.stream.current.type == 'lparen': args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None) else: args = [] kwargs = [] dyn_args = dyn_kwargs = None node = nodes.Filter(node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno) start_inline = False return node def parse_test(self, node): token = next(self.stream) if self.stream.current.test('name:not'): next(self.stream) negated = True else: negated = False name = self.stream.expect('name').value while self.stream.current.type == 'dot': next(self.stream) name += '.' + self.stream.expect('name').value dyn_args = dyn_kwargs = None kwargs = [] if self.stream.current.type == 'lparen': args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None) elif (self.stream.current.type in ('name', 'string', 'integer', 'float', 'lparen', 'lbracket', 'lbrace') and not self.stream.current.test_any('name:else', 'name:or', 'name:and')): if self.stream.current.test('name:is'): self.fail('You cannot chain multiple tests with is') args = [self.parse_expression()] else: args = [] node = nodes.Test(node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno) if negated: node = nodes.Not(node, lineno=token.lineno) return node def subparse(self, end_tokens=None): body = [] data_buffer = [] add_data = data_buffer.append if end_tokens is not None: self._end_token_stack.append(end_tokens) def flush_data(): if data_buffer: lineno = data_buffer[0].lineno body.append(nodes.Output(data_buffer[:], lineno=lineno)) del data_buffer[:] try: while self.stream: token = self.stream.current if token.type == 'data': if token.value: add_data(nodes.TemplateData(token.value, lineno=token.lineno)) next(self.stream) elif token.type == 'variable_begin': next(self.stream) add_data(self.parse_tuple(with_condexpr=True)) self.stream.expect('variable_end') elif token.type == 'block_begin': flush_data() next(self.stream) if end_tokens is not None and \ self.stream.current.test_any(*end_tokens): return body rv = self.parse_statement() if isinstance(rv, list): body.extend(rv) else: body.append(rv) self.stream.expect('block_end') else: raise AssertionError('internal parsing error') flush_data() finally: if end_tokens is not None: self._end_token_stack.pop() return body def parse(self): """Parse the whole template into a `Template` node.""" result = nodes.Template(self.subparse(), lineno=1) result.set_environment(self.environment) return result
mit
patrioticcow/MessagesForSkype
packages/win32/bundle/MessagesForSkype/modules/python/1.3.1-beta/Lib/idlelib/PyParse.py
185
19510
import re import sys # Reason last stmt is continued (or C_NONE if it's not). (C_NONE, C_BACKSLASH, C_STRING_FIRST_LINE, C_STRING_NEXT_LINES, C_BRACKET) = range(5) if 0: # for throwaway debugging output def dump(*stuff): sys.__stdout__.write(" ".join(map(str, stuff)) + "\n") # Find what looks like the start of a popular stmt. _synchre = re.compile(r""" ^ [ \t]* (?: while | else | def | return | assert | break | class | continue | elif | try | except | raise | import | yield ) \b """, re.VERBOSE | re.MULTILINE).search # Match blank line or non-indenting comment line. _junkre = re.compile(r""" [ \t]* (?: \# \S .* )? \n """, re.VERBOSE).match # Match any flavor of string; the terminating quote is optional # so that we're robust in the face of incomplete program text. _match_stringre = re.compile(r""" \""" [^"\\]* (?: (?: \\. | "(?!"") ) [^"\\]* )* (?: \""" )? | " [^"\\\n]* (?: \\. [^"\\\n]* )* "? | ''' [^'\\]* (?: (?: \\. | '(?!'') ) [^'\\]* )* (?: ''' )? | ' [^'\\\n]* (?: \\. [^'\\\n]* )* '? """, re.VERBOSE | re.DOTALL).match # Match a line that starts with something interesting; # used to find the first item of a bracket structure. _itemre = re.compile(r""" [ \t]* [^\s#\\] # if we match, m.end()-1 is the interesting char """, re.VERBOSE).match # Match start of stmts that should be followed by a dedent. _closere = re.compile(r""" \s* (?: return | break | continue | raise | pass ) \b """, re.VERBOSE).match # Chew up non-special chars as quickly as possible. If match is # successful, m.end() less 1 is the index of the last boring char # matched. If match is unsuccessful, the string starts with an # interesting char. _chew_ordinaryre = re.compile(r""" [^[\](){}#'"\\]+ """, re.VERBOSE).match # Build translation table to map uninteresting chars to "x", open # brackets to "(", and close brackets to ")". _tran = ['x'] * 256 for ch in "({[": _tran[ord(ch)] = '(' for ch in ")}]": _tran[ord(ch)] = ')' for ch in "\"'\\\n#": _tran[ord(ch)] = ch _tran = ''.join(_tran) del ch try: UnicodeType = type(unicode("")) except NameError: UnicodeType = None class Parser: def __init__(self, indentwidth, tabwidth): self.indentwidth = indentwidth self.tabwidth = tabwidth def set_str(self, str): assert len(str) == 0 or str[-1] == '\n' if type(str) is UnicodeType: # The parse functions have no idea what to do with Unicode, so # replace all Unicode characters with "x". This is "safe" # so long as the only characters germane to parsing the structure # of Python are 7-bit ASCII. It's *necessary* because Unicode # strings don't have a .translate() method that supports # deletechars. uniphooey = str str = [] push = str.append for raw in map(ord, uniphooey): push(raw < 127 and chr(raw) or "x") str = "".join(str) self.str = str self.study_level = 0 # Return index of a good place to begin parsing, as close to the # end of the string as possible. This will be the start of some # popular stmt like "if" or "def". Return None if none found: # the caller should pass more prior context then, if possible, or # if not (the entire program text up until the point of interest # has already been tried) pass 0 to set_lo. # # This will be reliable iff given a reliable is_char_in_string # function, meaning that when it says "no", it's absolutely # guaranteed that the char is not in a string. def find_good_parse_start(self, is_char_in_string=None, _synchre=_synchre): str, pos = self.str, None if not is_char_in_string: # no clue -- make the caller pass everything return None # Peek back from the end for a good place to start, # but don't try too often; pos will be left None, or # bumped to a legitimate synch point. limit = len(str) for tries in range(5): i = str.rfind(":\n", 0, limit) if i < 0: break i = str.rfind('\n', 0, i) + 1 # start of colon line m = _synchre(str, i, limit) if m and not is_char_in_string(m.start()): pos = m.start() break limit = i if pos is None: # Nothing looks like a block-opener, or stuff does # but is_char_in_string keeps returning true; most likely # we're in or near a giant string, the colorizer hasn't # caught up enough to be helpful, or there simply *aren't* # any interesting stmts. In any of these cases we're # going to have to parse the whole thing to be sure, so # give it one last try from the start, but stop wasting # time here regardless of the outcome. m = _synchre(str) if m and not is_char_in_string(m.start()): pos = m.start() return pos # Peeking back worked; look forward until _synchre no longer # matches. i = pos + 1 while 1: m = _synchre(str, i) if m: s, i = m.span() if not is_char_in_string(s): pos = s else: break return pos # Throw away the start of the string. Intended to be called with # find_good_parse_start's result. def set_lo(self, lo): assert lo == 0 or self.str[lo-1] == '\n' if lo > 0: self.str = self.str[lo:] # As quickly as humanly possible <wink>, find the line numbers (0- # based) of the non-continuation lines. # Creates self.{goodlines, continuation}. def _study1(self): if self.study_level >= 1: return self.study_level = 1 # Map all uninteresting characters to "x", all open brackets # to "(", all close brackets to ")", then collapse runs of # uninteresting characters. This can cut the number of chars # by a factor of 10-40, and so greatly speed the following loop. str = self.str str = str.translate(_tran) str = str.replace('xxxxxxxx', 'x') str = str.replace('xxxx', 'x') str = str.replace('xx', 'x') str = str.replace('xx', 'x') str = str.replace('\nx', '\n') # note that replacing x\n with \n would be incorrect, because # x may be preceded by a backslash # March over the squashed version of the program, accumulating # the line numbers of non-continued stmts, and determining # whether & why the last stmt is a continuation. continuation = C_NONE level = lno = 0 # level is nesting level; lno is line number self.goodlines = goodlines = [0] push_good = goodlines.append i, n = 0, len(str) while i < n: ch = str[i] i = i+1 # cases are checked in decreasing order of frequency if ch == 'x': continue if ch == '\n': lno = lno + 1 if level == 0: push_good(lno) # else we're in an unclosed bracket structure continue if ch == '(': level = level + 1 continue if ch == ')': if level: level = level - 1 # else the program is invalid, but we can't complain continue if ch == '"' or ch == "'": # consume the string quote = ch if str[i-1:i+2] == quote * 3: quote = quote * 3 firstlno = lno w = len(quote) - 1 i = i+w while i < n: ch = str[i] i = i+1 if ch == 'x': continue if str[i-1:i+w] == quote: i = i+w break if ch == '\n': lno = lno + 1 if w == 0: # unterminated single-quoted string if level == 0: push_good(lno) break continue if ch == '\\': assert i < n if str[i] == '\n': lno = lno + 1 i = i+1 continue # else comment char or paren inside string else: # didn't break out of the loop, so we're still # inside a string if (lno - 1) == firstlno: # before the previous \n in str, we were in the first # line of the string continuation = C_STRING_FIRST_LINE else: continuation = C_STRING_NEXT_LINES continue # with outer loop if ch == '#': # consume the comment i = str.find('\n', i) assert i >= 0 continue assert ch == '\\' assert i < n if str[i] == '\n': lno = lno + 1 if i+1 == n: continuation = C_BACKSLASH i = i+1 # The last stmt may be continued for all 3 reasons. # String continuation takes precedence over bracket # continuation, which beats backslash continuation. if (continuation != C_STRING_FIRST_LINE and continuation != C_STRING_NEXT_LINES and level > 0): continuation = C_BRACKET self.continuation = continuation # Push the final line number as a sentinel value, regardless of # whether it's continued. assert (continuation == C_NONE) == (goodlines[-1] == lno) if goodlines[-1] != lno: push_good(lno) def get_continuation_type(self): self._study1() return self.continuation # study1 was sufficient to determine the continuation status, # but doing more requires looking at every character. study2 # does this for the last interesting statement in the block. # Creates: # self.stmt_start, stmt_end # slice indices of last interesting stmt # self.stmt_bracketing # the bracketing structure of the last interesting stmt; # for example, for the statement "say(boo) or die", stmt_bracketing # will be [(0, 0), (3, 1), (8, 0)]. Strings and comments are # treated as brackets, for the matter. # self.lastch # last non-whitespace character before optional trailing # comment # self.lastopenbracketpos # if continuation is C_BRACKET, index of last open bracket def _study2(self): if self.study_level >= 2: return self._study1() self.study_level = 2 # Set p and q to slice indices of last interesting stmt. str, goodlines = self.str, self.goodlines i = len(goodlines) - 1 p = len(str) # index of newest line while i: assert p # p is the index of the stmt at line number goodlines[i]. # Move p back to the stmt at line number goodlines[i-1]. q = p for nothing in range(goodlines[i-1], goodlines[i]): # tricky: sets p to 0 if no preceding newline p = str.rfind('\n', 0, p-1) + 1 # The stmt str[p:q] isn't a continuation, but may be blank # or a non-indenting comment line. if _junkre(str, p): i = i-1 else: break if i == 0: # nothing but junk! assert p == 0 q = p self.stmt_start, self.stmt_end = p, q # Analyze this stmt, to find the last open bracket (if any) # and last interesting character (if any). lastch = "" stack = [] # stack of open bracket indices push_stack = stack.append bracketing = [(p, 0)] while p < q: # suck up all except ()[]{}'"#\\ m = _chew_ordinaryre(str, p, q) if m: # we skipped at least one boring char newp = m.end() # back up over totally boring whitespace i = newp - 1 # index of last boring char while i >= p and str[i] in " \t\n": i = i-1 if i >= p: lastch = str[i] p = newp if p >= q: break ch = str[p] if ch in "([{": push_stack(p) bracketing.append((p, len(stack))) lastch = ch p = p+1 continue if ch in ")]}": if stack: del stack[-1] lastch = ch p = p+1 bracketing.append((p, len(stack))) continue if ch == '"' or ch == "'": # consume string # Note that study1 did this with a Python loop, but # we use a regexp here; the reason is speed in both # cases; the string may be huge, but study1 pre-squashed # strings to a couple of characters per line. study1 # also needed to keep track of newlines, and we don't # have to. bracketing.append((p, len(stack)+1)) lastch = ch p = _match_stringre(str, p, q).end() bracketing.append((p, len(stack))) continue if ch == '#': # consume comment and trailing newline bracketing.append((p, len(stack)+1)) p = str.find('\n', p, q) + 1 assert p > 0 bracketing.append((p, len(stack))) continue assert ch == '\\' p = p+1 # beyond backslash assert p < q if str[p] != '\n': # the program is invalid, but can't complain lastch = ch + str[p] p = p+1 # beyond escaped char # end while p < q: self.lastch = lastch if stack: self.lastopenbracketpos = stack[-1] self.stmt_bracketing = tuple(bracketing) # Assuming continuation is C_BRACKET, return the number # of spaces the next line should be indented. def compute_bracket_indent(self): self._study2() assert self.continuation == C_BRACKET j = self.lastopenbracketpos str = self.str n = len(str) origi = i = str.rfind('\n', 0, j) + 1 j = j+1 # one beyond open bracket # find first list item; set i to start of its line while j < n: m = _itemre(str, j) if m: j = m.end() - 1 # index of first interesting char extra = 0 break else: # this line is junk; advance to next line i = j = str.find('\n', j) + 1 else: # nothing interesting follows the bracket; # reproduce the bracket line's indentation + a level j = i = origi while str[j] in " \t": j = j+1 extra = self.indentwidth return len(str[i:j].expandtabs(self.tabwidth)) + extra # Return number of physical lines in last stmt (whether or not # it's an interesting stmt! this is intended to be called when # continuation is C_BACKSLASH). def get_num_lines_in_stmt(self): self._study1() goodlines = self.goodlines return goodlines[-1] - goodlines[-2] # Assuming continuation is C_BACKSLASH, return the number of spaces # the next line should be indented. Also assuming the new line is # the first one following the initial line of the stmt. def compute_backslash_indent(self): self._study2() assert self.continuation == C_BACKSLASH str = self.str i = self.stmt_start while str[i] in " \t": i = i+1 startpos = i # See whether the initial line starts an assignment stmt; i.e., # look for an = operator endpos = str.find('\n', startpos) + 1 found = level = 0 while i < endpos: ch = str[i] if ch in "([{": level = level + 1 i = i+1 elif ch in ")]}": if level: level = level - 1 i = i+1 elif ch == '"' or ch == "'": i = _match_stringre(str, i, endpos).end() elif ch == '#': break elif level == 0 and ch == '=' and \ (i == 0 or str[i-1] not in "=<>!") and \ str[i+1] != '=': found = 1 break else: i = i+1 if found: # found a legit =, but it may be the last interesting # thing on the line i = i+1 # move beyond the = found = re.match(r"\s*\\", str[i:endpos]) is None if not found: # oh well ... settle for moving beyond the first chunk # of non-whitespace chars i = startpos while str[i] not in " \t\n": i = i+1 return len(str[self.stmt_start:i].expandtabs(\ self.tabwidth)) + 1 # Return the leading whitespace on the initial line of the last # interesting stmt. def get_base_indent_string(self): self._study2() i, n = self.stmt_start, self.stmt_end j = i str = self.str while j < n and str[j] in " \t": j = j + 1 return str[i:j] # Did the last interesting stmt open a block? def is_block_opener(self): self._study2() return self.lastch == ':' # Did the last interesting stmt close a block? def is_block_closer(self): self._study2() return _closere(self.str, self.stmt_start) is not None # index of last open bracket ({[, or None if none lastopenbracketpos = None def get_last_open_bracket_pos(self): self._study2() return self.lastopenbracketpos # the structure of the bracketing of the last interesting statement, # in the format defined in _study2, or None if the text didn't contain # anything stmt_bracketing = None def get_last_stmt_bracketing(self): self._study2() return self.stmt_bracketing
mit
davidvon/pipa-pay-server
site-packages/sqlalchemy/orm/scoping.py
18
6109
# orm/scoping.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from .. import exc as sa_exc from ..util import ScopedRegistry, ThreadLocalRegistry, warn from . import class_mapper, exc as orm_exc from .session import Session __all__ = ['scoped_session'] class scoped_session(object): """Provides scoped management of :class:`.Session` objects. See :ref:`unitofwork_contextual` for a tutorial. """ def __init__(self, session_factory, scopefunc=None): """Construct a new :class:`.scoped_session`. :param session_factory: a factory to create new :class:`.Session` instances. This is usually, but not necessarily, an instance of :class:`.sessionmaker`. :param scopefunc: optional function which defines the current scope. If not passed, the :class:`.scoped_session` object assumes "thread-local" scope, and will use a Python ``threading.local()`` in order to maintain the current :class:`.Session`. If passed, the function should return a hashable token; this token will be used as the key in a dictionary in order to store and retrieve the current :class:`.Session`. """ self.session_factory = session_factory if scopefunc: self.registry = ScopedRegistry(session_factory, scopefunc) else: self.registry = ThreadLocalRegistry(session_factory) def __call__(self, **kw): """Return the current :class:`.Session`, creating it using the session factory if not present. :param \**kw: Keyword arguments will be passed to the session factory callable, if an existing :class:`.Session` is not present. If the :class:`.Session` is present and keyword arguments have been passed, :exc:`~sqlalchemy.exc.InvalidRequestError` is raised. """ if kw: scope = kw.pop('scope', False) if scope is not None: if self.registry.has(): raise sa_exc.InvalidRequestError( "Scoped session is already present; " "no new arguments may be specified.") else: sess = self.session_factory(**kw) self.registry.set(sess) return sess else: return self.session_factory(**kw) else: return self.registry() def remove(self): """Dispose of the current :class:`.Session`, if present. This will first call :meth:`.Session.close` method on the current :class:`.Session`, which releases any existing transactional/connection resources still being held; transactions specifically are rolled back. The :class:`.Session` is then discarded. Upon next usage within the same scope, the :class:`.scoped_session` will produce a new :class:`.Session` object. """ if self.registry.has(): self.registry().close() self.registry.clear() def configure(self, **kwargs): """reconfigure the :class:`.sessionmaker` used by this :class:`.scoped_session`. See :meth:`.sessionmaker.configure`. """ if self.registry.has(): warn('At least one scoped session is already present. ' ' configure() can not affect sessions that have ' 'already been created.') self.session_factory.configure(**kwargs) def query_property(self, query_cls=None): """return a class property which produces a :class:`.Query` object against the class and the current :class:`.Session` when called. e.g.:: Session = scoped_session(sessionmaker()) class MyClass(object): query = Session.query_property() # after mappers are defined result = MyClass.query.filter(MyClass.name=='foo').all() Produces instances of the session's configured query class by default. To override and use a custom implementation, provide a ``query_cls`` callable. The callable will be invoked with the class's mapper as a positional argument and a session keyword argument. There is no limit to the number of query properties placed on a class. """ class query(object): def __get__(s, instance, owner): try: mapper = class_mapper(owner) if mapper: if query_cls: # custom query class return query_cls(mapper, session=self.registry()) else: # session's configured query class return self.registry().query(mapper) except orm_exc.UnmappedClassError: return None return query() ScopedSession = scoped_session """Old name for backwards compatibility.""" def instrument(name): def do(self, *args, **kwargs): return getattr(self.registry(), name)(*args, **kwargs) return do for meth in Session.public_methods: setattr(scoped_session, meth, instrument(meth)) def makeprop(name): def set(self, attr): setattr(self.registry(), name, attr) def get(self): return getattr(self.registry(), name) return property(get, set) for prop in ('bind', 'dirty', 'deleted', 'new', 'identity_map', 'is_active', 'autoflush', 'no_autoflush'): setattr(scoped_session, prop, makeprop(prop)) def clslevel(name): def do(cls, *args, **kwargs): return getattr(Session, name)(*args, **kwargs) return classmethod(do) for prop in ('close_all', 'object_session', 'identity_key'): setattr(scoped_session, prop, clslevel(prop))
apache-2.0
zjuchenyuan/BioWeb
Lib/Bio/Index.py
1
4980
# Copyright 1999 by Jeffrey Chang. All rights reserved. # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. """Index.py This module provides a way to create indexes to text files. Classes: Index Dictionary-like class used to store index information. _ShelveIndex An Index class based on the shelve module. _InMemoryIndex An in-memory Index class. """ import os import array import shelve try: import cPickle as pickle # Only available under Python 2 except ImportError: import pickle # Python 3 class _ShelveIndex(dict): """An index file wrapped around shelve. """ # Without a good dbm module installed, this is pretty slow and # generates large files. When generating an index on a FASTA- # formatted file with 82000 sequences (37Mb), the # index 'dat' file is 42Mb and 'dir' file is 8Mb. __version = 2 __version_key = '__version' def __init__(self, indexname, truncate=None): dict.__init__(self) try: if truncate: # In python 1.52 and before, dumbdbm (under shelve) # doesn't clear the old database. files = [indexname + '.dir', indexname + '.dat', indexname + '.bak' ] for file in files: if os.path.exists(file): os.unlink(file) raise Exception("open a new shelf") self.data = shelve.open(indexname, flag='r') except Exception: # TODO: Which exception? # No database exists. self.data = shelve.open(indexname, flag='n') self.data[self.__version_key] = self.__version else: # Check to make sure the database is the correct version. version = self.data.get(self.__version_key) if version is None: raise IOError("Unrecognized index format") elif version != self.__version: raise IOError("Version %s doesn't match my version %s" % (version, self.__version)) def __del__(self): if 'data' in self.__dict__: self.data.close() class _InMemoryIndex(dict): """This creates an in-memory index file. """ # File Format: # version # key value # [...] __version = 3 __version_key = '__version' def __init__(self, indexname, truncate=None): self._indexname = indexname dict.__init__(self) self.__changed = 0 # the index hasn't changed # Remove the database if truncate is true. if truncate and os.path.exists(indexname): os.unlink(indexname) self.__changed = 1 # Load the database if it exists if os.path.exists(indexname): with open(indexname) as handle: version = self._toobj(handle.readline().rstrip()) if version != self.__version: raise IOError("Version %s doesn't match my version %s" % (version, self.__version)) for line in handle: key, value = line.split() key, value = self._toobj(key), self._toobj(value) self[key] = value self.__changed = 0 def update(self, dict): self.__changed = 1 dict.update(self, dict) def __setitem__(self, key, value): self.__changed = 1 dict.__setitem__(self, key, value) def __delitem__(self, key): self.__changed = 1 dict.__delitem__(self, key) def clear(self): self.__changed = 1 dict.clear(self) def __del__(self): if self.__changed: with open(self._indexname, 'w') as handle: handle.write("%s\n" % self._tostr(self.__version)) for key, value in self.items(): handle.write("%s %s\n" % (self._tostr(key), self._tostr(value))) def _tostr(self, obj): # I need a representation of the object that's saveable to # a file that uses whitespace as delimiters. Thus, I'm # going to pickle the object, and then convert each character of # the string to its ASCII integer value. Then, I'm going to convert # the integers into strings and join them together with commas. # It's not the most efficient way of storing things, but it's # relatively fast. s = pickle.dumps(obj) intlist = array.array('b', s) return ','.join(str(i) for i in intlist) def _toobj(self, str): intlist = [int(i) for i in str.split(',')] intlist = array.array('b', intlist) return pickle.loads(''.join(chr(i) for i in intlist)) Index = _InMemoryIndex
mit
wyc/django
django/contrib/syndication/views.py
192
8680
from __future__ import unicode_literals from calendar import timegm from django.conf import settings from django.contrib.sites.shortcuts import get_current_site from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist from django.http import Http404, HttpResponse from django.template import TemplateDoesNotExist, loader from django.utils import feedgenerator, six from django.utils.encoding import force_text, iri_to_uri, smart_text from django.utils.html import escape from django.utils.http import http_date from django.utils.timezone import get_default_timezone, is_naive, make_aware def add_domain(domain, url, secure=False): protocol = 'https' if secure else 'http' if url.startswith('//'): # Support network-path reference (see #16753) - RSS requires a protocol url = '%s:%s' % (protocol, url) elif not url.startswith(('http://', 'https://', 'mailto:')): url = iri_to_uri('%s://%s%s' % (protocol, domain, url)) return url class FeedDoesNotExist(ObjectDoesNotExist): pass class Feed(object): feed_type = feedgenerator.DefaultFeed title_template = None description_template = None def __call__(self, request, *args, **kwargs): try: obj = self.get_object(request, *args, **kwargs) except ObjectDoesNotExist: raise Http404('Feed object does not exist.') feedgen = self.get_feed(obj, request) response = HttpResponse(content_type=feedgen.content_type) if hasattr(self, 'item_pubdate') or hasattr(self, 'item_updateddate'): # if item_pubdate or item_updateddate is defined for the feed, set # header so as ConditionalGetMiddleware is able to send 304 NOT MODIFIED response['Last-Modified'] = http_date( timegm(feedgen.latest_post_date().utctimetuple())) feedgen.write(response, 'utf-8') return response def item_title(self, item): # Titles should be double escaped by default (see #6533) return escape(force_text(item)) def item_description(self, item): return force_text(item) def item_link(self, item): try: return item.get_absolute_url() except AttributeError: raise ImproperlyConfigured( 'Give your %s class a get_absolute_url() method, or define an ' 'item_link() method in your Feed class.' % item.__class__.__name__ ) def __get_dynamic_attr(self, attname, obj, default=None): try: attr = getattr(self, attname) except AttributeError: return default if callable(attr): # Check co_argcount rather than try/excepting the function and # catching the TypeError, because something inside the function # may raise the TypeError. This technique is more accurate. try: code = six.get_function_code(attr) except AttributeError: code = six.get_function_code(attr.__call__) if code.co_argcount == 2: # one argument is 'self' return attr(obj) else: return attr() return attr def feed_extra_kwargs(self, obj): """ Returns an extra keyword arguments dictionary that is used when initializing the feed generator. """ return {} def item_extra_kwargs(self, item): """ Returns an extra keyword arguments dictionary that is used with the `add_item` call of the feed generator. """ return {} def get_object(self, request, *args, **kwargs): return None def get_context_data(self, **kwargs): """ Returns a dictionary to use as extra context if either ``self.description_template`` or ``self.item_template`` are used. Default implementation preserves the old behavior of using {'obj': item, 'site': current_site} as the context. """ return {'obj': kwargs.get('item'), 'site': kwargs.get('site')} def get_feed(self, obj, request): """ Returns a feedgenerator.DefaultFeed object, fully populated, for this feed. Raises FeedDoesNotExist for invalid parameters. """ current_site = get_current_site(request) link = self.__get_dynamic_attr('link', obj) link = add_domain(current_site.domain, link, request.is_secure()) feed = self.feed_type( title=self.__get_dynamic_attr('title', obj), subtitle=self.__get_dynamic_attr('subtitle', obj), link=link, description=self.__get_dynamic_attr('description', obj), language=settings.LANGUAGE_CODE, feed_url=add_domain( current_site.domain, self.__get_dynamic_attr('feed_url', obj) or request.path, request.is_secure(), ), author_name=self.__get_dynamic_attr('author_name', obj), author_link=self.__get_dynamic_attr('author_link', obj), author_email=self.__get_dynamic_attr('author_email', obj), categories=self.__get_dynamic_attr('categories', obj), feed_copyright=self.__get_dynamic_attr('feed_copyright', obj), feed_guid=self.__get_dynamic_attr('feed_guid', obj), ttl=self.__get_dynamic_attr('ttl', obj), **self.feed_extra_kwargs(obj) ) title_tmp = None if self.title_template is not None: try: title_tmp = loader.get_template(self.title_template) except TemplateDoesNotExist: pass description_tmp = None if self.description_template is not None: try: description_tmp = loader.get_template(self.description_template) except TemplateDoesNotExist: pass for item in self.__get_dynamic_attr('items', obj): context = self.get_context_data(item=item, site=current_site, obj=obj, request=request) if title_tmp is not None: title = title_tmp.render(context, request) else: title = self.__get_dynamic_attr('item_title', item) if description_tmp is not None: description = description_tmp.render(context, request) else: description = self.__get_dynamic_attr('item_description', item) link = add_domain( current_site.domain, self.__get_dynamic_attr('item_link', item), request.is_secure(), ) enc = None enc_url = self.__get_dynamic_attr('item_enclosure_url', item) if enc_url: enc = feedgenerator.Enclosure( url=smart_text(enc_url), length=smart_text(self.__get_dynamic_attr('item_enclosure_length', item)), mime_type=smart_text(self.__get_dynamic_attr('item_enclosure_mime_type', item)) ) author_name = self.__get_dynamic_attr('item_author_name', item) if author_name is not None: author_email = self.__get_dynamic_attr('item_author_email', item) author_link = self.__get_dynamic_attr('item_author_link', item) else: author_email = author_link = None tz = get_default_timezone() pubdate = self.__get_dynamic_attr('item_pubdate', item) if pubdate and is_naive(pubdate): pubdate = make_aware(pubdate, tz) updateddate = self.__get_dynamic_attr('item_updateddate', item) if updateddate and is_naive(updateddate): updateddate = make_aware(updateddate, tz) feed.add_item( title=title, link=link, description=description, unique_id=self.__get_dynamic_attr('item_guid', item, link), unique_id_is_permalink=self.__get_dynamic_attr( 'item_guid_is_permalink', item), enclosure=enc, pubdate=pubdate, updateddate=updateddate, author_name=author_name, author_email=author_email, author_link=author_link, categories=self.__get_dynamic_attr('item_categories', item), item_copyright=self.__get_dynamic_attr('item_copyright', item), **self.item_extra_kwargs(item) ) return feed
bsd-3-clause
royredman4/Rubik_Cube
Rubik_Info.py
1
7722
try: # for Python 2 from Tkinter import * except ImportError: # for Python 3 from tkinter import * import time import Solving_algorithm # Converts a set of coordinates into indexes in the cube # returns square index horizontally and vertically (x,y) def CoordinatesToIndex(coordinates): t = (coordinates[0] - 98) / 60 i = (coordinates[1] - 90) / 60 return[t, i] # Creates the cubes defaults at startup def CreateCube(): ''' side1 = ["dark gray", "dark gray", "dark gray", "dark gray", "dark gray", "dark gray", "dark gray", "dark gray", "dark gray"] side2 = ["dark gray", "dark gray", "dark gray", "dark gray", "dark gray", "dark gray", "dark gray", "dark gray", "dark gray"] side3 = ["dark gray", "dark gray", "dark gray", "dark gray", "dark gray", "dark gray", "dark gray", "dark gray", "dark gray"] side4 = ["dark gray", "dark gray", "dark gray", "dark gray", "dark gray", "dark gray", "dark gray", "dark gray", "dark gray"] side5 = ["dark gray", "dark gray", "dark gray", "dark gray", "dark gray", "dark gray", "dark gray", "dark gray", "dark gray"] frontside = ["dark gray", "dark gray", "dark gray", "dark gray", "White", "dark gray", "dark gray", "dark gray", "dark gray"] ''' # If you want the pieces filled in (for testing purposes), # Uncomment this section and comment out the previous section ''' side1 = ["Yellow", "Yellow", "Yellow", "Yellow", "Yellow", "Yellow", "Yellow", "Yellow", "Yellow"] side2 = ["Blue", "Blue", "Blue", "Blue", "Blue", "Blue", "Blue", "Blue", "Blue"] side3 = ["Green", "Green", "Green", "Green", "Green", "Green", "Green", "Green", "Green"] side4 = ["Red", "Red", "Red", "Red", "Red", "Red", "Red", "Red", "Red"] side5 = ["Orange", "Orange", "Orange", "Orange", "Orange", "Orange", "Orange", "Orange", "Orange"] frontside = ["White", "White", "White", "White", "White", "White", "White", "White", "White"] ''' # For debugging turning up/down ''' side1 = ["Green", "Green", "Green", "Yellow", "Yellow", "Yellow", "Blue", "Blue", "Blue"] side2 = ["White", "White", "White", "Blue", "Blue", "Blue", "Yellow", "Yellow", "Yellow"] side3 = ["Yellow", "Yellow", "Yellow", "Green", "Green", "Green", "White", "White", "White"] side4 = ["Red", "Red", "Red", "Red", "Red", "Red", "Red", "Red", "Red"] side5 = ["Orange", "Orange", "Orange", "Orange", "Orange", "Orange", "Orange", "Orange", "Orange"] frontside = ["Green", "Green", "Green", "White", "White", "White", "Blue", "Blue", "Blue"] ''' # For debugging turning left/right ''' side1 = ["Blue", "Yellow", "White", "Blue", "Yellow", "White", "Blue", "Yellow", "White"] side2 = ["Orange", "Orange", "Orange", "Orange", "Orange", "Orange", "Orange", "Orange", "Orange"] side3 = ["Red", "Red", "Red", "Red", "Red", "Red", "Red", "Red", "Red"] side4 = ["Green", "Blue", "White", "Green", "Blue", "White", "Green", "Blue", "White"] side5 = ["Blue", "Green", "Yellow", "Blue", "Green", "Yellow", "Blue", "Green", "Yellow"] frontside = ["Yellow", "White", "Green", "Yellow", "White", "Green", "Yellow", "White", "Green"] ''' # For testing the yellow cross side1 = ["White", "Orange", "Yellow", "Green", "Orange", "Orange", "White", "Blue", "Red"] side2 = ["Green", "Green", "Orange", "Yellow", "Blue", "Yellow", "Blue", "White", "White"] side3 = ["Red", "Yellow", "Green", "Red", "Green", "Orange", "Red", "Green", "Blue"] side4 = ["Orange", "White", "Orange", "White", "Yellow", "Green", "Yellow", "Blue", "Green"] side5 = ["Red", "Red", "Green", "Red", "White", "Red", "Yellow", "White", "Orange"] frontside = ["Blue", "Orange", "Yellow", "Blue", "Red", "Blue", "Blue", "Yellow", "White"] master = ["Front",frontside, "Back",side1, "Left",side2, "Right",side3, "Top",side4, "Bottom",side5] return master # Creates the GUI portion of the cube # (creates all the colors on the screen # for a cubes side). def RubikSetup(canvas, colors): i = 90 counter = 0 print(colors) # time.sleep(10) for z in range(0, 3): t = 98 for q in range(0, 3): canvas.create_rectangle(t, i, t+60, i+60, fill=colors[counter]) t += 60 counter += 1 i += 60 # Changes a single cubes color to the users requested color on the screen def ChangeColor(canvas, color, index): multiple = (index[0] - 98) / 60 t = 98 + (60 * multiple) multiple = (index[1] - 90) / 60 i = 90 + (60 * multiple) canvas.create_rectangle(t, i, t+60, i+60, fill=color) # Changes the color of an array from its original color to its new color def Update_Array(Master, side, color, coordinates): index = CoordinatesToIndex(coordinates) # print(str(Master.index(side)+1)) print(str(index)) # time.sleep(10) Master[index[0] + (index[1] * 3)] = color def Before_After(canvas, all_sides, colors, temp): canvas.delete("line") if (temp[1] == "Down"): x1 = 65 y1 = 80 x2 = 65 y2 = 40 if temp[2] == 1: x1 += 50 x2 = x1 elif temp[2] == 2: x1 += 100 x2 = x1 elif (temp[1] == "Up"): x1 = 65 y1 = 260 x2 = 65 y2 = 290 if (temp[2] == 1): x1 += 50 x2 = x1 elif (temp[2] == 2): x1 += 100 x2 = x1 elif (temp[1] == "Left"): x1 = 200 y1 = 115 x2 = 230 y2 = 115 if (temp[2] == 1): y1 += 50 y2 = y1 elif (temp[2] == 2): y1 += 100 y2 = y1 elif (temp[1] == "Right"): x1 = 35 y1 = 115 x2 = 5 y2 = 115 if (temp[2] == 1): y1 += 50 y2 = y1 elif (temp[2] == 2): y1 += 100 y2 = y1 # Where you start the end of x, where you start the end of y (arrow spot) # The lines x axis at the end, the y axis at the end (begin line spot) w = canvas.create_line(x1, y1, x2, y2, arrow=FIRST, tag = "line") print(colors) # time.sleep(10) for r in range(0, 2): i = 90 counter = 0 if r ==1: print("This spot needs to change the rubiks cube to the \"after\" section") Solving_algorithm.Rotate_Cube(all_sides, temp[0], all_sides[all_sides.index("Front")+1], temp[2], temp[1], temp[3]) for z in range(0, 3): if r == 1: t = 260 #98 else: t = 40 for q in range(0, 3): canvas.create_rectangle(t, i, t+50, i+50, fill=colors[counter]) t += 50 counter += 1 #i += 60 i += 50
gpl-3.0
MrNuggles/HeyBoet-Telegram-Bot
temboo/Library/Uber/Estimates/GetPriceEstimates.py
5
4198
# -*- coding: utf-8 -*- ############################################################################### # # GetPriceEstimates # Returns an estimated price range for each product offered at a given location. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class GetPriceEstimates(Choreography): def __init__(self, temboo_session): """ Create a new instance of the GetPriceEstimates Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(GetPriceEstimates, self).__init__(temboo_session, '/Library/Uber/Estimates/GetPriceEstimates') def new_input_set(self): return GetPriceEstimatesInputSet() def _make_result_set(self, result, path): return GetPriceEstimatesResultSet(result, path) def _make_execution(self, session, exec_id, path): return GetPriceEstimatesChoreographyExecution(session, exec_id, path) class GetPriceEstimatesInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the GetPriceEstimates Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_EndLatitude(self, value): """ Set the value of the EndLatitude input for this Choreo. ((required, decimal) The latitude coordinate for the destination e.g., 40.650729.) """ super(GetPriceEstimatesInputSet, self)._set_input('EndLatitude', value) def set_EndLongitude(self, value): """ Set the value of the EndLongitude input for this Choreo. ((required, decimal) The longitude coordinate for the destination e.g., -74.009536.) """ super(GetPriceEstimatesInputSet, self)._set_input('EndLongitude', value) def set_ServerToken(self, value): """ Set the value of the ServerToken input for this Choreo. ((required, string) The Sever Token provided by Uber.) """ super(GetPriceEstimatesInputSet, self)._set_input('ServerToken', value) def set_StartLatitude(self, value): """ Set the value of the StartLatitude input for this Choreo. ((required, decimal) The latitude coordinate for the starting location e.g., 40.71863.) """ super(GetPriceEstimatesInputSet, self)._set_input('StartLatitude', value) def set_StartLongitude(self, value): """ Set the value of the StartLongitude input for this Choreo. ((required, decimal) The longitude coordinate for the starting location e.g., -74.005584.) """ super(GetPriceEstimatesInputSet, self)._set_input('StartLongitude', value) class GetPriceEstimatesResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the GetPriceEstimates Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Uber.) """ return self._output.get('Response', None) class GetPriceEstimatesChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return GetPriceEstimatesResultSet(response, path)
gpl-3.0
lbeltrame/bcbio-nextgen
bcbio/structural/gatkcnv.py
2
17264
"""Support for Copy Number Variations (CNVs) with GATK4 https://software.broadinstitute.org/gatk/documentation/article?id=11682 https://gatkforums.broadinstitute.org/dsde/discussion/11683/ """ import glob import os import shutil import numpy as np import toolz as tz from bcbio import broad, utils from bcbio.distributed.transaction import file_transaction from bcbio.log import logger from bcbio.pipeline import datadict as dd from bcbio.variation import bedutils, vcfutils def run(items, background=None): """Detect copy number variations from batched set of samples using GATK4 CNV calling. TODO: implement germline calling with DetermineGermlineContigPloidy and GermlineCNVCaller """ if not background: background = [] paired = vcfutils.get_paired(items + background) if paired: out = _run_paired(paired) else: out = items logger.warn("GATK4 CNV calling currently only available for somatic samples: %s" % ", ".join([dd.get_sample_name(d) for d in items + background])) return out def _run_paired(paired): """Run somatic variant calling pipeline. """ from bcbio.structural import titancna work_dir = _sv_workdir(paired.tumor_data) seg_files = model_segments(tz.get_in(["depth", "bins", "normalized"], paired.tumor_data), work_dir, paired) call_file = call_copy_numbers(seg_files["seg"], work_dir, paired.tumor_data) out = [] if paired.normal_data: out.append(paired.normal_data) if "sv" not in paired.tumor_data: paired.tumor_data["sv"] = [] paired.tumor_data["sv"].append({"variantcaller": "gatk-cnv", "call_file": call_file, "vrn_file": titancna.to_vcf(call_file, "GATK4-CNV", _get_seg_header, _seg_to_vcf, paired.tumor_data), "seg": seg_files["seg"], "plot": plot_model_segments(seg_files, work_dir, paired.tumor_data)}) out.append(paired.tumor_data) return out def call_copy_numbers(seg_file, work_dir, data): """Call copy numbers from a normalized and segmented input file. """ out_file = os.path.join(work_dir, "%s-call.seg" % dd.get_sample_name(data)) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: params = ["-T", "CallCopyRatioSegments", "-I", seg_file, "-O", tx_out_file] _run_with_memory_scaling(params, tx_out_file, data) return out_file def plot_model_segments(seg_files, work_dir, data): """Diagnostic plots of segmentation and inputs. """ from bcbio.heterogeneity import chromhacks out_file = os.path.join(work_dir, "%s.modeled.png" % dd.get_sample_name(data)) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: dict_file = utils.splitext_plus(dd.get_ref_file(data))[0] + ".dict" plot_dict = os.path.join(os.path.dirname(tx_out_file), os.path.basename(dict_file)) with open(dict_file) as in_handle: with open(plot_dict, "w") as out_handle: for line in in_handle: if line.startswith("@SQ"): cur_chrom = [x.split(":", 1)[1].strip() for x in line.split("\t") if x.startswith("SN:")][0] if chromhacks.is_autosomal_or_sex(cur_chrom): out_handle.write(line) else: out_handle.write(line) params = ["-T", "PlotModeledSegments", "--denoised-copy-ratios", tz.get_in(["depth", "bins", "normalized"], data), "--segments", seg_files["final_seg"], "--allelic-counts", seg_files["tumor_hets"], "--sequence-dictionary", plot_dict, "--minimum-contig-length", "10", "--output-prefix", dd.get_sample_name(data), "-O", os.path.dirname(tx_out_file)] _run_with_memory_scaling(params, tx_out_file, data) return {"seg": out_file} def model_segments(copy_file, work_dir, paired): """Perform segmentation on input copy number log2 ratio file. """ out_file = os.path.join(work_dir, "%s.cr.seg" % dd.get_sample_name(paired.tumor_data)) tumor_counts, normal_counts = heterogzygote_counts(paired) if not utils.file_exists(out_file): with file_transaction(paired.tumor_data, out_file) as tx_out_file: params = ["-T", "ModelSegments", "--denoised-copy-ratios", copy_file, "--allelic-counts", tumor_counts, "--output-prefix", dd.get_sample_name(paired.tumor_data), "-O", os.path.dirname(tx_out_file)] if normal_counts: params += ["--normal-allelic-counts", normal_counts] _run_with_memory_scaling(params, tx_out_file, paired.tumor_data) for tx_fname in glob.glob(os.path.join(os.path.dirname(tx_out_file), "%s*" % dd.get_sample_name(paired.tumor_data))): shutil.copy(tx_fname, os.path.join(work_dir, os.path.basename(tx_fname))) return {"seg": out_file, "tumor_hets": out_file.replace(".cr.seg", ".hets.tsv"), "final_seg": out_file.replace(".cr.seg", ".modelFinal.seg")} def denoise(data, pon, work_dir): """Normalize read counts using panel of normal background or GC/mappability """ std_file = os.path.join(work_dir, "%s-crstandardized.tsv" % dd.get_sample_name(data)) denoise_file = os.path.join(work_dir, "%s-crdenoised.tsv" % dd.get_sample_name(data)) if not utils.file_exists(std_file): with file_transaction(data, std_file, denoise_file) as (tx_std_file, tx_denoise_file): params = ["-T", "DenoiseReadCounts", "-I", tz.get_in(["depth", "bins", "target"], data), "--standardized-copy-ratios", tx_std_file, "--denoised-copy-ratios", tx_denoise_file] if pon: params += ["--count-panel-of-normals", pon] else: params += ["--annotated-intervals", tz.get_in(["regions", "bins", "gcannotated"], data)] _run_with_memory_scaling(params, tx_std_file, data) return denoise_file if pon else std_file def create_panel_of_normals(items, group_id, work_dir): """Create a panel of normals from one or more background read counts. """ out_file = os.path.join(work_dir, "%s-%s-pon.hdf5" % (dd.get_sample_name(items[0]), group_id)) if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: params = ["-T", "CreateReadCountPanelOfNormals", "-O", tx_out_file, "--annotated-intervals", tz.get_in(["regions", "bins", "gcannotated"], items[0])] for data in items: params += ["-I", tz.get_in(["depth", "bins", "target"], data)] _run_with_memory_scaling(params, tx_out_file, items[0], ld_preload=True) return out_file def pon_to_bed(pon_file, out_dir, data): """Extract BED intervals from a GATK4 hdf5 panel of normal file. """ out_file = os.path.join(out_dir, "%s-intervals.bed" % (utils.splitext_plus(os.path.basename(pon_file))[0])) if not utils.file_uptodate(out_file, pon_file): import h5py with file_transaction(data, out_file) as tx_out_file: with h5py.File(pon_file, "r") as f: with open(tx_out_file, "w") as out_handle: intervals = f["original_data"]["intervals"] for i in range(len(intervals["transposed_index_start_end"][0])): chrom = intervals["indexed_contig_names"][intervals["transposed_index_start_end"][0][i]] if isinstance(chrom, bytes): chrom = chrom.decode("utf-8") start = int(intervals["transposed_index_start_end"][1][i]) - 1 end = int(intervals["transposed_index_start_end"][2][i]) out_handle.write("%s\t%s\t%s\n" % (chrom, start, end)) return out_file def prepare_intervals(data, region_file, work_dir): """Prepare interval regions for targeted and gene based regions. """ target_file = os.path.join(work_dir, "%s-target.interval_list" % dd.get_sample_name(data)) if not utils.file_uptodate(target_file, region_file): with file_transaction(data, target_file) as tx_out_file: params = ["-T", "PreprocessIntervals", "-R", dd.get_ref_file(data), "--interval-merging-rule", "OVERLAPPING_ONLY", "-O", tx_out_file] if dd.get_coverage_interval(data) == "genome": params += ["--bin-length", "1000", "--padding", "0"] else: params += ["-L", region_file, "--bin-length", "0", "--padding", "250"] _run_with_memory_scaling(params, tx_out_file, data) return target_file def annotate_intervals(target_file, data): """Provide GC annotated intervals for error correction during panels and denoising. TODO: include mappability and segmentation duplication inputs """ out_file = "%s-gcannotated.tsv" % utils.splitext_plus(target_file)[0] if not utils.file_uptodate(out_file, target_file): with file_transaction(data, out_file) as tx_out_file: params = ["-T", "AnnotateIntervals", "-R", dd.get_ref_file(data), "-L", target_file, "--interval-merging-rule", "OVERLAPPING_ONLY", "-O", tx_out_file] _run_with_memory_scaling(params, tx_out_file, data) return out_file def collect_read_counts(data, work_dir): """Count reads in defined bins using CollectReadCounts. """ out_file = os.path.join(work_dir, "%s-target-coverage.hdf5" % dd.get_sample_name(data)) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: params = ["-T", "CollectReadCounts", "-I", dd.get_align_bam(data), "-L", tz.get_in(["regions", "bins", "target"], data), "--interval-merging-rule", "OVERLAPPING_ONLY", "-O", tx_out_file, "--format", "HDF5"] _run_with_memory_scaling(params, tx_out_file, data) return out_file def heterogzygote_counts(paired): """Provide tumor/normal counts at population heterozyogte sites with CollectAllelicCounts. """ work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(paired.tumor_data), "structural", "counts")) key = "germline_het_pon" het_bed = tz.get_in(["genome_resources", "variation", key], paired.tumor_data) vr = bedutils.population_variant_regions([x for x in [paired.tumor_data, paired.normal_data] if x]) cur_het_bed = bedutils.intersect_two(het_bed, vr, work_dir, paired.tumor_data) tumor_counts = _run_collect_allelic_counts(cur_het_bed, key, work_dir, paired.tumor_data) normal_counts = (_run_collect_allelic_counts(cur_het_bed, key, work_dir, paired.normal_data) if paired.normal_data else None) if normal_counts: tumor_counts, normal_counts = _filter_by_normal(tumor_counts, normal_counts, paired.tumor_data) return tumor_counts, normal_counts def _filter_by_normal(tumor_counts, normal_counts, data): """Filter count files based on normal frequency and median depth, avoiding high depth regions. For frequency, restricts normal positions to those between 0.4 and 0.65 For depth, matches approach used in AMBER to try and avoid problematic genomic regions with high count in the normal: https://github.com/hartwigmedical/hmftools/tree/master/amber#usage """ from bcbio.heterogeneity import bubbletree fparams = bubbletree.NORMAL_FILTER_PARAMS tumor_out = "%s-normfilter%s" % utils.splitext_plus(tumor_counts) normal_out = "%s-normfilter%s" % utils.splitext_plus(normal_counts) if not utils.file_uptodate(tumor_out, tumor_counts): with file_transaction(data, tumor_out, normal_out) as (tx_tumor_out, tx_normal_out): median_depth = _get_normal_median_depth(normal_counts) min_normal_depth = median_depth * fparams["min_depth_percent"] max_normal_depth = median_depth * fparams["max_depth_percent"] with open(tumor_counts) as tumor_handle: with open(normal_counts) as normal_handle: with open(tx_tumor_out, "w") as tumor_out_handle: with open(tx_normal_out, "w") as normal_out_handle: header = None for t, n in zip(tumor_handle, normal_handle): if header is None: if not n.startswith("@"): header = n.strip().split() tumor_out_handle.write(t) normal_out_handle.write(n) elif (_normal_passes_depth(header, n, min_normal_depth, max_normal_depth) and _normal_passes_freq(header, n, fparams)): tumor_out_handle.write(t) normal_out_handle.write(n) return tumor_out, normal_out def _normal_passes_freq(header, line, fparams): vals = dict(zip(header, line.strip().split())) cur_depth = float(vals["REF_COUNT"]) + int(vals["ALT_COUNT"]) if cur_depth > 0: cur_freq = float(vals["ALT_COUNT"]) / cur_depth else: cur_freq = 0.0 return cur_freq >= fparams["min_freq_narrow"] and cur_freq <= fparams["max_freq_narrow"] def _normal_passes_depth(header, line, min_normal_depth, max_normal_depth): vals = dict(zip(header, line.strip().split())) cur_depth = int(vals["REF_COUNT"]) + int(vals["ALT_COUNT"]) return cur_depth >= min_normal_depth and cur_depth <= max_normal_depth def _get_normal_median_depth(normal_counts): depths = [] with open(normal_counts) as in_handle: header = None for line in in_handle: if header is None and not line.startswith("@"): header = line.strip().split() elif header: n_vals = dict(zip(header, line.strip().split())) depths.append(int(n_vals["REF_COUNT"]) + int(n_vals["ALT_COUNT"])) return np.median(depths) def _run_collect_allelic_counts(pos_file, pos_name, work_dir, data): """Counts by alleles for a specific sample and set of positions. """ out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "structural", "counts")) out_file = os.path.join(out_dir, "%s-%s-counts.tsv" % (dd.get_sample_name(data), pos_name)) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: params = ["-T", "CollectAllelicCounts", "-L", pos_file, "-I", dd.get_align_bam(data), "-R", dd.get_ref_file(data), "-O", tx_out_file] _run_with_memory_scaling(params, tx_out_file, data) return out_file def _run_with_memory_scaling(params, tx_out_file, data, ld_preload=False): num_cores = dd.get_num_cores(data) memscale = {"magnitude": 0.9 * num_cores, "direction": "increase"} if num_cores > 1 else None # Ignore tools_off: [gatk4], since it doesn't apply to GATK CNV calling config = utils.deepish_copy(data["config"]) if "gatk4" in dd.get_tools_off({"config": config}): config["algorithm"]["tools_off"].remove("gatk4") broad_runner = broad.runner_from_config(config) broad_runner.run_gatk(params, os.path.dirname(tx_out_file), memscale=memscale, ld_preload=ld_preload) # ## VCF output def _get_seg_header(in_handle): for line in in_handle: if not line.startswith("@"): break return line.strip().split("\t"), in_handle def _seg_to_vcf(vals): """Convert GATK CNV calls seg output to a VCF line. """ call_to_cn = {"+": 3, "-": 1} call_to_type = {"+": "DUP", "-": "DEL"} if vals["CALL"] not in ["0"]: info = ["FOLD_CHANGE_LOG=%s" % vals["MEAN_LOG2_COPY_RATIO"], "PROBES=%s" % vals["NUM_POINTS_COPY_RATIO"], "SVTYPE=%s" % call_to_type[vals["CALL"]], "SVLEN=%s" % (int(vals["END"]) - int(vals["START"])), "END=%s" % vals["END"], "CN=%s" % call_to_cn[vals["CALL"]]] return [vals["CONTIG"], vals["START"], ".", "N", "<%s>" % call_to_type[vals["CALL"]], ".", ".", ";".join(info), "GT", "0/1"] def _sv_workdir(data): return utils.safe_makedir(os.path.join(dd.get_work_dir(data), "structural", dd.get_sample_name(data), "gatk-cnv"))
mit
bemcdonnell/SWMMOutputAPI
swmmoutputapi/swmmbinreader.py
1
24788
""" SWMM Output File Wrapper for the New OutputAPI. Author: Bryant E. McDonnell Date: 1/10/2016 """ from ctypes import * from _toolkitpyswmm import * from datetime import datetime, timedelta import os __author__ = 'Bryant E. McDonnell ([email protected])' __copyright__ = 'Copyright (c) 2016 Bryant E. McDonnell' __license__ = 'BSD2' __version__ = '0.2.1' class _Opaque(Structure): """ Used soley for passing the pointer to the smoapu struct to API """ pass class SWMMBinReader: def __init__(self): """ Instantiate python Wrapper Object and build Wrapper functions. """ def get_pkgpath(): import _toolkitpyswmm as tkp return os.path.dirname(tkp.__file__.replace('\\','/')) try: #Later Check for OS Type dllname = 'outputAPI_winx86.dll' #when platform detection is enabled, dllname can be changed dllLoc = get_pkgpath() + '/data/'+ dllname self.swmmdll = CDLL(dllLoc) except: raise Exception('Failed to Open Linked Library') #### Initializing DLL Function List #Initialize Pointer to smoapi self._initsmoapi = self.swmmdll.SMO_init self._initsmoapi.restype = POINTER(_Opaque) #Open File Function Handle self._openBinFile = self.swmmdll.SMO_open self._free = self.swmmdll.SMO_free self._close = self.swmmdll.SMO_close #Project Data self._getProjectSize = self.swmmdll.SMO_getProjectSize self._getTimes = self.swmmdll.SMO_getTimes self._getStartTime = self.swmmdll.SMO_getStartTime self._getUnits = self.swmmdll.SMO_getUnits #Object ID Function Handles self._getIDs = self.swmmdll.SMO_getElementName #Object Series Function Handles self._getSubcatchSeries = self.swmmdll.SMO_getSubcatchSeries self._getNodeSeries = self.swmmdll.SMO_getNodeSeries self._getLinkSeries = self.swmmdll.SMO_getLinkSeries self._getSystemSeries = self.swmmdll.SMO_getSystemSeries #Object Attribure Function Handles self._getSubcatchAttribute = self.swmmdll.SMO_getSubcatchAttribute self._getNodeAttribute = self.swmmdll.SMO_getNodeAttribute self._getLinkAttribute = self.swmmdll.SMO_getLinkAttribute self._getSystemAttribute = self.swmmdll.SMO_getSystemAttribute #Object Result Function Handles self._getSubcatchResult = self.swmmdll.SMO_getSubcatchResult self._getNodeResult = self.swmmdll.SMO_getNodeResult self._getLinkResult = self.swmmdll.SMO_getLinkResult self._getSystemResult = self.swmmdll.SMO_getSystemResult #Array Builder self._newOutValueArray = self.swmmdll.SMO_newOutValueArray self._newOutValueArray.argtypes = [POINTER(_Opaque), c_int, c_int, POINTER(c_int), POINTER(c_int)] self._newOutValueArray.restype = POINTER(c_float) #Series Builder self._newOutValueSeries = self.swmmdll.SMO_newOutValueSeries self._newOutValueSeries.argtypes = [POINTER(_Opaque), c_int, c_int, POINTER(c_int), POINTER(c_int)] self._newOutValueSeries.restype = POINTER(c_float) #SWMM Date num 2 String self.SWMMdateToStr = self.swmmdll.datetime_dateToStr #SWMM Time num 2 String self.SWMMtimeToStr = self.swmmdll.datetime_timeToStr def OpenBinFile(self, OutLoc): """Opens SWMM5 binary output file. :param str OutLoc: Path to Binary output file :return: None Examples: >>> OutputFile = SWMMBinReader() >>> OutputFile.OpenBinFile(r"C:\\PROJECTCODE\\SWMMOutputAPI\\testing\\outputfile.out") """ self.smoapi = self._initsmoapi() ErrNo = self._openBinFile(self.smoapi,OutLoc) if ErrNo != 0: raise Exception("API ErrNo {0}:{1}".format(ErrNo, DLLErrorKeys[ErrNo])) def CloseBinFile(self): """Closes binary output file and cleans up member variables. :returns: None Examples: >>> OutputFile = SWMMBinReader() >>> OutputFile.OpenBinFile(r"C:\\PROJECTCODE\\SWMMOutputAPI\\testing\\outputfile.out") >>> OutputFile.CloseBinFile() """ ErrNo = self._close(self.smoapi) if hasattr(self, 'SubcatchmentIDs'): delattr(self,'SubcatchmentIDs') if hasattr(self, 'NodeIDs'): delattr(self,'NodeIDs') if hasattr(self, 'LinkIDs'): delattr(self,'LinkIDs') if hasattr(self, 'PollutantIDs'): delattr(self,'PollutantIDs') if ErrNo != 0: raise Exception("API ErrNo {0}:{1}".format(ErrNo1.value, DLLErrorKeys[ErrNo.value]) ) def _get_SubcatchIDs(self): """ Purpose: Generates member Element IDs dictionary for Subcatchments """ self.SubcatchmentIDs = {} for i in range(self.get_ProjectSize(subcatchCount)): NAME = create_string_buffer(46) LEN = c_int(46) ErrNo1 = self._getIDs(self.smoapi, SM_subcatch, i, byref(NAME), byref(LEN)) if ErrNo1 != 0: raise Exception("API ErrNo {0}:{1}".format(ErrNo1.value, DLLErrorKeys[ErrNo1.value]) ) self.SubcatchmentIDs[str(NAME.value)] = i def _get_NodeIDs(self): """ Internal Purpose: Generates member Element IDs dictionary for Nodes """ self.NodeIDs = {} for i in range(self.get_ProjectSize(nodeCount)): NAME = create_string_buffer(46) LEN = c_int(46) ErrNo1 = self._getIDs(self.smoapi, SM_node, i, byref(NAME), byref(LEN)) if ErrNo1 != 0: raise Exception("API ErrNo {0}:{1}".format(ErrNo1.value, DLLErrorKeys[ErrNo1.value]) ) self.NodeIDs[str(NAME.value)] = i def _get_LinkIDs(self): """ Internal Purpose: Generates member Element IDs dictionary for Links """ self.LinkIDs = {} for i in range(self.get_ProjectSize(linkCount)): NAME = create_string_buffer(46) LEN = c_int(46) ErrNo1 = self._getIDs(self.smoapi, SM_link, i, byref(NAME), byref(LEN)) if ErrNo1 != 0: raise Exception("API ErrNo {0}:{1}".format(ErrNo1.value, DLLErrorKeys[ErrNo1.value]) ) self.LinkIDs[str(NAME.value)] = i def _get_PollutantIDs(self): """ Internal Purpose: Generates member Element IDs dictionary for Pollutants """ self.PollutantIDs = {} for i in range(self.get_ProjectSize(pollutantCount)): NAME = create_string_buffer(46) LEN = c_int(46) ErrNo1 = self._getIDs(self.smoapi, SM_sys, i, byref(NAME), byref(LEN)) if ErrNo1 != 0: raise Exception("API ErrNo {0}:{1}".format(ErrNo1.value, DLLErrorKeys[ErrNo1.value]) ) self.PollutantIDs[str(NAME.value)] = i def get_IDs(self, SMO_elementIDType): """Returns List Type of Element IDs :param int SMO_elementCount: element ID type :doc:`/keyrefs` :return: list ordered List of IDs :rtype: list Examples: >>> OutputFile = SWMMBinReader() >>> OutputFile.OpenBinFile(r"C:\\PROJECTCODE\\SWMMOutputAPI\\testing\\outputfile.out") >>> Test.get_IDs(SM_subcatch) >>> ['S3', 'S2', 'S1'] >>> Test.get_IDs(SM_node) >>> ['J4', 'J1', 'J2', 'J3'] >>> Test.get_IDs(SM_link) >>> ['C3', 'C2', 'C1'] """ if SMO_elementIDType == subcatchCount: if not hasattr(self, 'SubcatchmentIDs'): self._get_SubcatchIDs() IDlist = self.SubcatchmentIDs.keys() elif SMO_elementIDType == SM_node: if not hasattr(self, 'NodeIDs'): self._get_NodeIDs() IDlist = self.NodeIDs.keys() elif SMO_elementIDType == SM_link: if not hasattr(self, 'LinkIDs'): self._get_LinkIDs() IDlist = self.LinkIDs.keys() elif SMO_elementIDType == SM_sys: if not hasattr(self, 'PollutantIDs'): self._get_PollutantIDs() IDlist = self.PollutantIDs.keys() else: raise Exception("SMO_elementType: {} Outside Valid Types".format(SMO_elementType)) return 0 # Do not sort lists return IDlist def get_Units(self, SMO_unit): """Returns flow units and Concentration :param int SMO_unit: element ID type :doc:`/keyrefs` :return: Unit Type :rtype: str Examples: >>> OutputFile = SWMMBinReader() >>> OutputFile.OpenBinFile(r"C:\\PROJECTCODE\\SWMMOutputAPI\\testing\\outputfile.out") >>> OutputFile.get_Units(flow_rate) >>> 'CFS' """ FlowUnitsType = ['CFS','GPM', 'MGD','CMS', 'LPS', 'MLD'] # cubic feet per second # gallons per minute # million gallons per day # cubic meters per second # liters per second # million liters per day ConcUnitsType = ['mg','ug','COUNT'] # Milligrams / L # Micrograms / L # Counts / L x = c_int() ErrNo1 = self._getUnits(self.smoapi, SMO_unit, byref(x)) if ErrNo1 != 0: raise Exception("API ErrNo {0}:{1}".format(ErrNo1, DLLErrorKeys[ErrNo1]) ) if SMO_unit == flow_rate: return FlowUnitsType[x.value] elif SMO_unit == concentration: return ConcUnitsType[x.value] else: raise Exception("SMO_unit: {} Outside Valid Types".format(SMO_unit)) def get_Times(self, SMO_timeElementType): """Returns report and simulation time related parameters. :param int SMO_timeElementType: element ID type :doc:`/keyrefs` :return: Report Step (seconds) or Number of Time Steps :rtype: int Examples: >>> OutputFile = SWMMBinReader() >>> OutputFile.OpenBinFile(r"C:\\PROJECTCODE\\SWMMOutputAPI\\testing\\outputfile.out") >>> OutputFile.get_Times(reportStep) >>> 300 """ timeElement = c_int() ErrNo1 = self._getTimes(self.smoapi, SMO_timeElementType, byref(timeElement)) if ErrNo1 != 0: raise Exception("API ErrNo {0}:{1}".format(ErrNo1, DLLErrorKeys[ErrNo1]) ) return timeElement.value def _get_StartTimeSWMM(self): """ Internal Purpose: Returns the simulation start datetime as double. """ StartTime = c_double() ErrNo1 = self._getStartTime(self.smoapi, byref(StartTime)) if ErrNo1 != 0: raise Exception("API ErrNo {0}:{1}".format(ErrNo1, DLLErrorKeys[ErrNo1]) ) return StartTime.value def get_StartTime(self): """Uses SWMM5 Conversion Functions to Pull DateTime String and converts to Python datetime format :return: Simulation start time. :rtype: datetime Examples: >>> OutputFile = SWMMBinReader() >>> OutputFile.OpenBinFile(r"C:\\PROJECTCODE\\SWMMOutputAPI\\testing\\outputfile.out") >>> OutputFile.get_StartTime() >>> datetime.datetime(2016,10,4,12,4,0) """ _StartTime = self._get_StartTimeSWMM() _date = int(_StartTime) _time = _StartTime - _date #Pull Date String DateStr = create_string_buffer(50) self.SWMMdateToStr(c_double(_date), byref(DateStr)) DATE = DateStr.value #Pull Time String TimeStr = create_string_buffer(50) self.SWMMtimeToStr(c_double(_time), byref(TimeStr)) TIME = TimeStr.value DTime = datetime.strptime(DATE+' '+TIME,'%Y-%b-%d %H:%M:%S') return DTime def get_TimeSeries(self): """ Gets simulation start time and builds timeseries array based on the reportStep :return: Simulation time series. :rtype: list of datetime Examples: >>> OutputFile = SWMMBinReader() >>> OutputFile.OpenBinFile(r"C:\\PROJECTCODE\\SWMMOutputAPI\\testing\\outputfile.out") >>> OutputFile.get_TimeSeries() >>> [datetime.datetime(2015, 11, 29, 14, 0), datetime.datetime(2015, 11, 29, 14, 1), ..., datetime.datetime(2015, 11, 29, 14, 9)] """ return [self.get_StartTime() + timedelta(seconds = ind*self.get_Times(reportStep))\ for ind in range(self.get_Times(numPeriods))] def get_ProjectSize(self, SMO_elementCount): """Returns number of elements of a specific element type. :param int SMO_elementCount: element ID type :doc:`/keyrefs` :return: Number of Objects :rtype: int Examples: >>> OutputFile = SWMMBinReader() >>> OutputFile.OpenBinFile(r"C:\\PROJECTCODE\\SWMMOutputAPI\\testing\\outputfile.out") >>> OutputFile.get_ProjectSize(nodeCount) >>> 10 """ numel = c_int() ErrNo1 = self._getProjectSize(self.smoapi, SMO_elementCount, byref(numel)) if ErrNo1 != 0: raise Exception("API ErrNo {0}:{1}".format(ErrNo1,DLLErrorKeys[ErrNo1]) ) return numel.value def get_Series(self, SMO_elementType, SMO_Attribute, IDName = None, TimeStartInd = 0, TimeEndInd = -1): """Get time series results for particular attribute for an object. Specify series start and length using TimeStartInd and TimeEndInd respectively. :param int SMO_elementType: Element type :doc:`/keyrefs`. :param int SMO_Attribute: Attribute Type :doc:`/keyrefs`. :param str IDName: Element ID name (Default is None for to reach sys variables) (ID Names are case sensitive). :param int TimeStartInd: Starting index for the time series data period (default is 0). :param int TimeEndInd: Array index for the time series data period (defualt is -1 for end). :return: data series :rtype: list Examples: >>> OutputFile = SWMMBinReader() >>> OutputFile.OpenBinFile(r"C:\\PROJECTCODE\\SWMMOutputAPI\\testing\\outputfile.out") >>> OutputFile.get_Series(SM_subcatch, runoff_rate, 'S3', 0, 50) >>> [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, .... 0.0] >>> OutputFile.get_Series(SM_node, invert_depth, 'J1', 0, 50) >>> [3.908519983291626, 4.6215434074401855, 4.594745635986328, 4.595311641693115, ..., 4.595311641693115] >>> OutputFile.get_Series(SM_link, rainfall_subcatch, 'C2', 0, 50) >>> [10.2869873046875, 10.04793643951416, 9.997148513793945, 10.000744819641113, ..., 10.011372566223145] >>> OutputFile.get_Series(SM_sys, rainfall_system, TimeStartInd = 0, TimeEndInd = 50) >>> [0.017500000074505806, 0.017500000074505806, 0.017500000074505806, 0.017500000074505806, ..., 0.017500000074505806] """ if TimeEndInd > self.get_Times(numPeriods): raise Exception("Outside Number of TimeSteps") elif TimeEndInd == -1: TimeEndInd = self.get_Times(numPeriods) + 1 - TimeEndInd sLength = c_int() ErrNo1 = c_int() SeriesPtr = self._newOutValueSeries(self.smoapi, TimeStartInd,\ TimeEndInd, byref(sLength), byref(ErrNo1)) if ErrNo1.value != 0: raise Exception("API ErrNo {0}:{1}".format(ErrNo1.value,DLLErrorKeys[ErrNo1.value]) ) if SMO_elementType == SM_subcatch: if not hasattr(self, 'SubcatchmentIDs'): self._get_SubcatchIDs() ErrNo2 = self._getSubcatchSeries(self.smoapi, self.SubcatchmentIDs[IDName], SMO_Attribute, \ TimeStartInd, sLength.value, SeriesPtr) elif SMO_elementType == SM_node: if not hasattr(self, 'NodeIDs'): self._get_NodeIDs() ErrNo2 = self._getNodeSeries(self.smoapi, self.NodeIDs[IDName], SMO_Attribute, \ TimeStartInd, sLength.value, SeriesPtr) elif SMO_elementType == SM_link: if not hasattr(self, 'LinkIDs'): self._get_LinkIDs() ErrNo2 = self._getLinkSeries(self.smoapi, self.LinkIDs[IDName], SMO_Attribute, \ TimeStartInd, sLength.value, SeriesPtr) ## Add Pollutants Later elif SMO_elementType == SM_sys: ErrNo2 = self._getSystemSeries(self.smoapi, SMO_Attribute, \ TimeStartInd, sLength.value, SeriesPtr) else: raise Exception("SMO_elementType: {} Outside Valid Types".format(SMO_elementType)) if ErrNo2 != 0: raise Exception("API ErrNo {0}:{1}".format(ErrNo2,DLLErrorKeys[ErrNo2]) ) BldArray = [SeriesPtr[i] for i in range(sLength.value)] self._free(SeriesPtr) return BldArray def get_Attribute(self, SMO_elementType, SMO_Attribute, TimeInd): """Get results for particular attribute for all elements at a specific time index. :param int SMO_elementType: Element type :doc:`/keyrefs`. :param int SMO_Attribute: Attribute Type :doc:`/keyrefs`. :param int TimeInd: TimeInd :return: data list in order of the IDs of the SMO_elementType :rtype: list Examples: >>> OutputFile = SWMMBinReader() >>> OutputFile.OpenBinFile(r"C:\\PROJECTCODE\\SWMMOutputAPI\\testing\\outputfile.out") >>> OutputFile.get_Attribute(SM_subcatch, rainfall_subcatch, 0) >>> [0.017500000074505806, 0.017500000074505806, 0.017500000074505806] >>> OutputFile.get_Attribute(SM_node, invert_depth, 10) >>> [4.596884250640869, 0.720202624797821, 0.6315776705741882, 0.6312257051467896] >>> OutputFile.get_Attribute(SM_link, flow_rate_link, 50) >>> [9.00419807434082, 10.011459350585938, 11.020767211914062] """ if TimeInd > self.get_Times(numPeriods)-1: raise Exception("Outside Number of TimeSteps") aLength = c_int() ErrNo1 = c_int() ValArrayPtr = self._newOutValueArray(self.smoapi, getAttribute,\ SMO_elementType, byref(aLength), byref(ErrNo1)) if ErrNo1.value != 0: raise Exception("API ErrNo {0}:{1}".format(ErrNo1.value,DLLErrorKeys[ErrNo1.value]) ) if SMO_elementType == SM_subcatch: ErrNo2 = self._getSubcatchAttribute(self.smoapi, TimeInd, SMO_Attribute, ValArrayPtr) elif SMO_elementType == SM_link: ErrNo2 = self._getLinkAttribute(self.smoapi, TimeInd, SMO_Attribute, ValArrayPtr) elif SMO_elementType == SM_node: ErrNo2 = self._getNodeAttribute(self.smoapi, TimeInd, SMO_Attribute, ValArrayPtr) ## Add Pollutants Later else: raise Exception("SMO_elementType: {} Outside Valid Types".format(SMO_elementType)) if ErrNo2 != 0: raise Exception("API ErrNo {0}:{1}".format(ErrNo2,DLLErrorKeys[ErrNo2]) ) BldArray = [ValArrayPtr[i] for i in range(aLength.value)] self._free(ValArrayPtr) return BldArray def get_Result(self, SMO_elementType, TimeInd, IDName = None): """For a element ID at given time, get all attributes :param int SMO_elementType: Element type :doc:`/keyrefs`. :param int TimeInd: Time Index :param int IDName: IDName (default None for System Variables) Examples: >>> OutputFile = SWMMBinReader() >>> OutputFile.OpenBinFile(r"C:\\PROJECTCODE\\SWMMOutputAPI\\testing\\outputfile.out") >>> OutputFile.get_Result(SM_subcatch,3000,'S3') >>> [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] >>> OutputFile.get_Result(SM_node,3000,'J1') >>> [4.594789505004883, 25.322790145874023, 0.0, 9.000000953674316, 9.000000953674316, 0.0] >>> OutputFile.get_Result(SM_link,9000,'C3') >>> [11.0, 0.6312892436981201, 12.93112564086914, 185.72474670410156, 0.270773708820343] >>> OutputFile.get_Result(SM_sys,3000,'S3') >>> [70.0, 0.0, 0.0, 0.0, 0.0, 8.0, 0.0, 0.0, 3.0, 11.0, 0.0, 11.000021934509277, 532.2583618164062, 0.0, 0.0] """ if TimeInd > self.get_Times(numPeriods)-1: raise Exception("Outside Number of TimeSteps") alength = c_int() ErrNo1 = c_int() ValArrayPtr = self._newOutValueArray(self.smoapi, getResult,\ SMO_elementType, byref(alength), byref(ErrNo1)) if SMO_elementType == SM_subcatch: if not hasattr(self, 'SubcatchmentIDs'): self._get_SubcatchIDs() ErrNo2 = self._getSubcatchResult(self.smoapi, TimeInd, self.SubcatchmentIDs[IDName], ValArrayPtr) elif SMO_elementType == SM_node: if not hasattr(self, 'NodeIDs'): self._get_NodeIDs() ErrNo2 = self._getNodeResult(self.smoapi, TimeInd, self.NodeIDs[IDName], ValArrayPtr) elif SMO_elementType == SM_link: if not hasattr(self, 'LinkIDs'): self._get_LinkIDs() ErrNo2 = self._getLinkResult(self.smoapi, TimeInd, self.LinkIDs[IDName], ValArrayPtr) ## Add Pollutants Later elif SMO_elementType == SM_sys: ErrNo2 = self._getSystemResult(self.smoapi, TimeInd, ValArrayPtr) else: raise Exception("SMO_elementType: {} Outside Valid Types".format(SMO_elementType)) BldArray = [ValArrayPtr[i] for i in range(alength.value)] self._free(ValArrayPtr) return BldArray if __name__ in "__main__": ## Run Tests ## Open Test = SWMMBinReader() Test.OpenBinFile(r"C:\PROJECTCODE\SWMMOutputAPI\testing\OutputTestModel522_SHORT.out") ## Get IDs print("\nProject Element ID Info") print(Test.get_IDs(SM_subcatch)) print(Test.get_IDs(SM_node)) print(Test.get_IDs(SM_link)) print("\nGet Units") print('flow_rate: {}'.format(Test.get_Units(flow_rate))) print('concentration: {}'.format(Test.get_Units(concentration))) ## Get Project Size print("\nProject Size Info") print("Subcatchments: {}".format(Test.get_ProjectSize(subcatchCount))) print("Nodes: {}".format(Test.get_ProjectSize(nodeCount))) print("Links: {}".format(Test.get_ProjectSize(linkCount))) print("Pollutants: {}".format(Test.get_ProjectSize(pollutantCount))) ## Project Time Steps print("\nProject Time Info") print("Report Step: {}".format(Test.get_Times(reportStep))) print("Periods: {}".format(Test.get_Times(numPeriods))) ## Get Time Series print("\nGet Time Series") TimeSeries = Test.get_TimeSeries() print(TimeSeries[:10]) ## Get Series print("\nSeries Tests") SubcSeries = Test.get_Series(SM_subcatch, runoff_rate, 'S3', 0, 50) print(SubcSeries) NodeSeries = Test.get_Series(SM_node, invert_depth, 'J1', 0, 50) print(NodeSeries) LinkSeries = Test.get_Series(SM_link, rainfall_subcatch, 'C2', 0, 50) print(LinkSeries) SystSeries = Test.get_Series(SM_sys, rainfall_system, TimeStartInd = 0, TimeEndInd = 50) print(SystSeries) ## Get Attributes print("\nAttributes Tests") SubcAttributes = Test.get_Attribute(SM_subcatch, rainfall_subcatch, 0) #<- Check Values.. Might be issue here print(SubcAttributes) NodeAttributes = Test.get_Attribute(SM_node, invert_depth, 10) print(NodeAttributes) LinkAttributes = Test.get_Attribute(SM_link, flow_rate_link, 50) print(LinkAttributes) ## Get Results print("\nResult Tests") SubcResults = Test.get_Result(SM_subcatch,3000,'S3') print(SubcResults) NodeResults = Test.get_Result(SM_node,3000,'J1') print(NodeResults) LinkResults = Test.get_Result(SM_link,9000,'C3') print(LinkResults) SystResults = Test.get_Result(SM_sys,3000,'S3') print(SystResults) ## Close Output File Test.CloseBinFile() help(SWMMBinReader)
bsd-2-clause
geokala/cloudify-plugins-common
cloudify/workflows/tasks.py
2
24871
######## # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. import sys import time import uuid import Queue from cloudify import exceptions from cloudify.workflows import api INFINITE_TOTAL_RETRIES = -1 DEFAULT_TOTAL_RETRIES = INFINITE_TOTAL_RETRIES DEFAULT_RETRY_INTERVAL = 30 DEFAULT_SEND_TASK_EVENTS = True TASK_PENDING = 'pending' TASK_SENDING = 'sending' TASK_SENT = 'sent' TASK_STARTED = 'started' TASK_RESCHEDULED = 'rescheduled' TASK_SUCCEEDED = 'succeeded' TASK_FAILED = 'failed' TERMINATED_STATES = [TASK_RESCHEDULED, TASK_SUCCEEDED, TASK_FAILED] def retry_failure_handler(task): """Basic on_success/on_failure handler that always returns retry""" return HandlerResult.retry() class WorkflowTask(object): """A base class for workflow tasks""" def __init__(self, workflow_context, task_id=None, info=None, on_success=None, on_failure=None, total_retries=DEFAULT_TOTAL_RETRIES, retry_interval=DEFAULT_RETRY_INTERVAL, send_task_events=DEFAULT_SEND_TASK_EVENTS): """ :param task_id: The id of this task (generated if none is provided) :param info: A short description of this task (for logging) :param on_success: A handler called when the task's execution terminates successfully. Expected to return one of [HandlerResult.retry(), HandlerResult.cont()] to indicate whether this task should be re-executed. :param on_failure: A handler called when the task's execution fails. Expected to return one of [HandlerResult.retry(), HandlerResult.ignore(), HandlerResult.fail()] to indicate whether this task should be re-executed, cause the engine to terminate workflow execution immediately or simply ignore this task failure and move on. :param total_retries: Maximum retry attempt for this task, in case the handlers return a retry attempt. :param retry_interval: Number of seconds to wait between retries :param workflow_context: the CloudifyWorkflowContext instance """ self.id = task_id or str(uuid.uuid4()) self._state = TASK_PENDING self.async_result = None self.on_success = on_success self.on_failure = on_failure self.info = info self.error = None self.total_retries = total_retries self.retry_interval = retry_interval self.terminated = Queue.Queue(maxsize=1) self.is_terminated = False self.workflow_context = workflow_context self.send_task_events = send_task_events self.current_retries = 0 # timestamp for which the task should not be executed # by the task graph before reached, overridden by the task # graph during retries self.execute_after = time.time() def dump(self): return { 'id': self.id, 'state': self.get_state(), 'info': self.info, 'error': self.error, 'current_retries': self.current_retries, 'cloudify_context': self.cloudify_context } def is_remote(self): """ :return: Is this a remote task """ return not self.is_local() def is_local(self): """ :return: Is this a local task """ raise NotImplementedError('Implemented by subclasses') def is_nop(self): """ :return: Is this a NOP task """ return False def get_state(self): """ Get the task state :return: The task state [pending, sending, sent, started, rescheduled, succeeded, failed] """ return self._state def set_state(self, state): """ Set the task state :param state: The state to set [pending, sending, sent, started, rescheduled, succeeded, failed] """ if state not in [TASK_PENDING, TASK_SENDING, TASK_SENT, TASK_STARTED, TASK_RESCHEDULED, TASK_SUCCEEDED, TASK_FAILED]: raise RuntimeError('Illegal state set on task: {0} ' '[task={1}]'.format(state, str(self))) self._state = state if state in TERMINATED_STATES: self.is_terminated = True self.terminated.put_nowait(True) def wait_for_terminated(self, timeout=None): if self.is_terminated: return self.terminated.get(timeout=timeout) def handle_task_terminated(self): if self.get_state() in (TASK_FAILED, TASK_RESCHEDULED): handler_result = self._handle_task_not_succeeded() else: handler_result = self._handle_task_succeeded() if handler_result.action == HandlerResult.HANDLER_RETRY: if any([self.total_retries == INFINITE_TOTAL_RETRIES, self.current_retries < self.total_retries, handler_result.ignore_total_retries]): if handler_result.retry_after is None: handler_result.retry_after = self.retry_interval new_task = self.duplicate_for_retry( time.time() + handler_result.retry_after) handler_result.retried_task = new_task else: handler_result.action = HandlerResult.HANDLER_FAIL return handler_result def _handle_task_succeeded(self): """Call handler for task success""" if self.on_success: return self.on_success(self) else: return HandlerResult.cont() def _handle_task_not_succeeded(self): """ Call handler for task which hasn't ended in 'succeeded' state (i.e. has either failed or been rescheduled) """ try: exception = self.async_result.result except Exception as e: exception = exceptions.NonRecoverableError( 'Could not de-serialize ' 'exception of task {0} --> {1}: {2}' .format(self.name, type(e).__name__, str(e))) if isinstance(exception, exceptions.OperationRetry): # operation explicitly requested a retry, so we ignore # the handler set on the task. handler_result = HandlerResult.retry() elif self.on_failure: handler_result = self.on_failure(self) else: handler_result = HandlerResult.retry() if handler_result.action == HandlerResult.HANDLER_RETRY: if isinstance(exception, exceptions.NonRecoverableError): handler_result = HandlerResult.fail() elif isinstance(exception, exceptions.RecoverableError): handler_result.retry_after = exception.retry_after return handler_result def __str__(self): suffix = self.info if self.info is not None else '' return '{0}({1})'.format(self.name, suffix) def duplicate_for_retry(self, execute_after): """ :return: A new instance of this task with a new task id """ dup = self._duplicate() dup.execute_after = execute_after dup.current_retries = self.current_retries + 1 if dup.cloudify_context and 'operation' in dup.cloudify_context: op_ctx = dup.cloudify_context['operation'] op_ctx['retry_number'] = dup.current_retries return dup def _duplicate(self): raise NotImplementedError('Implemented by subclasses') @property def cloudify_context(self): raise NotImplementedError('Implemented by subclasses') @property def name(self): """ :return: The task name """ raise NotImplementedError('Implemented by subclasses') class RemoteWorkflowTask(WorkflowTask): """A WorkflowTask wrapping a celery based task""" # cache for registered tasks queries to celery workers cache = {} def __init__(self, task, cloudify_context, workflow_context, task_id=None, info=None, on_success=None, on_failure=retry_failure_handler, total_retries=DEFAULT_TOTAL_RETRIES, retry_interval=DEFAULT_RETRY_INTERVAL, send_task_events=DEFAULT_SEND_TASK_EVENTS): """ :param task: The celery task :param cloudify_context: the cloudify context dict :param task_id: The id of this task (generated if none is provided) :param info: A short description of this task (for logging) :param on_success: A handler called when the task's execution terminates successfully. Expected to return one of [HandlerResult.retry(), HandlerResult.cont()] to indicate whether this task should be re-executed. :param on_failure: A handler called when the task's execution fails. Expected to return one of [HandlerResult.retry(), HandlerResult.ignore(), HandlerResult.fail()] to indicate whether this task should be re-executed, cause the engine to terminate workflow execution immediately or simply ignore this task failure and move on. :param total_retries: Maximum retry attempt for this task, in case the handlers return a retry attempt. :param retry_interval: Number of seconds to wait between retries :param workflow_context: the CloudifyWorkflowContext instance """ super(RemoteWorkflowTask, self).__init__( workflow_context, task_id, info=info, on_success=on_success, on_failure=on_failure, total_retries=total_retries, retry_interval=retry_interval, send_task_events=send_task_events) self.task = task self._cloudify_context = cloudify_context def apply_async(self): """ Call the underlying celery tasks apply_async. Verify the task is registered and send an event before doing so. :return: a RemoteWorkflowTaskResult instance wrapping the celery async result """ try: self._verify_task_registered() self.workflow_context.internal.send_task_event(TASK_SENDING, self) self.set_state(TASK_SENT) async_result = self.task.apply_async(task_id=self.id) self.async_result = RemoteWorkflowTaskResult(self, async_result) except exceptions.NonRecoverableError as e: self.set_state(TASK_FAILED) self.workflow_context.internal\ .send_task_event(TASK_FAILED, self, {'exception': e}) self.error = e self.async_result = RemoteWorkflowNotExistTaskResult(self) return self.async_result def is_local(self): return False def _duplicate(self): dup = RemoteWorkflowTask(task=self.task, cloudify_context=self.cloudify_context, workflow_context=self.workflow_context, task_id=None, # we want a new task id info=self.info, on_success=self.on_success, on_failure=self.on_failure, total_retries=self.total_retries, retry_interval=self.retry_interval, send_task_events=self.send_task_events) dup.cloudify_context['task_id'] = dup.id return dup @property def name(self): """The task name""" return self.cloudify_context['task_name'] @property def cloudify_context(self): return self._cloudify_context @property def target(self): """The task target (queue name)""" return self.cloudify_context['task_target'] def _verify_task_registered(self): verify_task_registered(self.name, self.target, self._get_registered) def _get_registered(self): # import here because this only applies in remote execution # environments from cloudify.celery import celery worker_name = 'celery@{0}'.format(self.target) inspect = celery.control.inspect(destination=[worker_name]) registered = inspect.registered() or {} result = registered.get(worker_name, set()) return set(result) class LocalWorkflowTask(WorkflowTask): """A WorkflowTask wrapping a local callable""" def __init__(self, local_task, workflow_context, node=None, info=None, on_success=None, on_failure=retry_failure_handler, total_retries=DEFAULT_TOTAL_RETRIES, retry_interval=DEFAULT_RETRY_INTERVAL, send_task_events=DEFAULT_SEND_TASK_EVENTS, kwargs=None, task_id=None, name=None): """ :param local_task: A callable :param workflow_context: the CloudifyWorkflowContext instance :param node: The CloudifyWorkflowNode instance (if in node context) :param info: A short description of this task (for logging) :param on_success: A handler called when the task's execution terminates successfully. Expected to return one of [HandlerResult.retry(), HandlerResult.cont()] to indicate whether this task should be re-executed. :param on_failure: A handler called when the task's execution fails. Expected to return one of [HandlerResult.retry(), HandlerResult.ignore(), HandlerResult.fail()] to indicate whether this task should be re-executed, cause the engine to terminate workflow execution immediately or simply ignore this task failure and move on. :param total_retries: Maximum retry attempt for this task, in case the handlers return a retry attempt. :param retry_interval: Number of seconds to wait between retries :param kwargs: Local task keyword arguments :param name: optional parameter (default: local_task.__name__) """ super(LocalWorkflowTask, self).__init__( info=info, on_success=on_success, on_failure=on_failure, total_retries=total_retries, retry_interval=retry_interval, task_id=task_id, workflow_context=workflow_context, send_task_events=send_task_events) self.local_task = local_task self.node = node self.kwargs = kwargs or {} self._name = name or local_task.__name__ def dump(self): super_dump = super(LocalWorkflowTask, self).dump() super_dump.update({ 'name': self._name }) return super_dump def apply_async(self): """ Execute the task in the local task thread pool :return: A wrapper for the task result """ def local_task_wrapper(): try: self.workflow_context.internal.send_task_event(TASK_STARTED, self) result = self.local_task(**self.kwargs) self.workflow_context.internal.send_task_event( TASK_SUCCEEDED, self, event={'result': str(result)}) self.async_result._holder.result = result self.set_state(TASK_SUCCEEDED) except BaseException as e: new_task_state = TASK_RESCHEDULED if isinstance( e, exceptions.OperationRetry) else TASK_FAILED exc_type, exception, tb = sys.exc_info() self.workflow_context.internal.send_task_event( new_task_state, self, event={'exception': str(exception)}) self.async_result._holder.error = (exception, tb) self.set_state(new_task_state) self.async_result = LocalWorkflowTaskResult(self) self.workflow_context.internal.send_task_event(TASK_SENDING, self) self.set_state(TASK_SENT) self.workflow_context.internal.add_local_task(local_task_wrapper) return self.async_result def is_local(self): return True def _duplicate(self): dup = LocalWorkflowTask(local_task=self.local_task, workflow_context=self.workflow_context, node=self.node, info=self.info, on_success=self.on_success, on_failure=self.on_failure, total_retries=self.total_retries, retry_interval=self.retry_interval, send_task_events=self.send_task_events, kwargs=self.kwargs, name=self.name) return dup @property def name(self): """The task name""" return self._name @property def cloudify_context(self): return self.kwargs.get('__cloudify_context') # NOP tasks class class NOPLocalWorkflowTask(LocalWorkflowTask): def __init__(self, workflow_context): super(NOPLocalWorkflowTask, self).__init__(lambda: None, workflow_context) @property def name(self): """The task name""" return 'NOP' def apply_async(self): self.set_state(TASK_SUCCEEDED) return LocalWorkflowTaskResult(self) def is_nop(self): return True class WorkflowTaskResult(object): """A base wrapper for workflow task results""" def __init__(self, task): self.task = task def _process(self, retry_on_failure): if self.task.workflow_context.internal.graph_mode: return self._get() task_graph = self.task.workflow_context.internal.task_graph while True: self._wait_for_task_terminated() handler_result = self.task.handle_task_terminated() task_graph.remove_task(self.task) try: result = self._get() if handler_result.action != HandlerResult.HANDLER_RETRY: return result except: if (not retry_on_failure or handler_result.action == HandlerResult.HANDLER_FAIL): raise self._sleep(handler_result.retry_after) self.task = handler_result.retried_task task_graph.add_task(self.task) self._check_execution_cancelled() self.task.apply_async() self._refresh_state() @staticmethod def _check_execution_cancelled(): if api.has_cancel_request(): raise api.ExecutionCancelled() def _wait_for_task_terminated(self): while True: self._check_execution_cancelled() try: self.task.wait_for_terminated(timeout=1) break except Queue.Empty: continue def _sleep(self, seconds): while seconds > 0: self._check_execution_cancelled() sleep_time = 1 if seconds > 1 else seconds time.sleep(sleep_time) seconds -= sleep_time def get(self, retry_on_failure=True): """ Get the task result. Will block until the task execution ends. :return: The task result """ return self._process(retry_on_failure) def _get(self): raise NotImplementedError('Implemented by subclasses') def _refresh_state(self): raise NotImplementedError('Implemented by subclasses') class RemoteWorkflowNotExistTaskResult(WorkflowTaskResult): def __init__(self, task): super(RemoteWorkflowNotExistTaskResult, self).__init__(task) self.task = task def _get(self): raise self.task.error @property def result(self): return self.task.error class RemoteWorkflowTaskResult(WorkflowTaskResult): """A wrapper for celery's AsyncResult""" def __init__(self, task, async_result): super(RemoteWorkflowTaskResult, self).__init__(task) self.async_result = async_result def _get(self): return self.async_result.get() def _refresh_state(self): self.async_result = self.task.async_result.async_result @property def result(self): return self.async_result.result class LocalWorkflowTaskResult(WorkflowTaskResult): """A wrapper for local workflow task results""" class ResultHolder(object): def __init__(self, result=None, error=None): self.result = result self.error = error def __init__(self, task): """ :param task: The LocalWorkflowTask instance """ super(LocalWorkflowTaskResult, self).__init__(task) self._holder = self.ResultHolder() def _get(self): if self._holder.error is not None: exception, traceback = self._holder.error raise exception, None, traceback return self._holder.result def _refresh_state(self): self._holder = self.task.async_result._holder @property def result(self): if self._holder.error: return self._holder.error[0] else: return self._holder.result class HandlerResult(object): HANDLER_RETRY = 'handler_retry' HANDLER_FAIL = 'handler_fail' HANDLER_IGNORE = 'handler_ignore' HANDLER_CONTINUE = 'handler_continue' def __init__(self, action, ignore_total_retries=False, retry_after=None): self.action = action self.ignore_total_retries = ignore_total_retries self.retry_after = retry_after # this field is filled by handle_terminated_task() below after # duplicating the task and updating the relevant task fields self.retried_task = None @classmethod def retry(cls, ignore_total_retries=False, retry_after=None): return HandlerResult(cls.HANDLER_RETRY, ignore_total_retries=ignore_total_retries, retry_after=retry_after) @classmethod def fail(cls): return HandlerResult(cls.HANDLER_FAIL) @classmethod def cont(cls): return HandlerResult(cls.HANDLER_CONTINUE) @classmethod def ignore(cls): return HandlerResult(cls.HANDLER_IGNORE) def verify_task_registered(name, target, get_registered): cache = RemoteWorkflowTask.cache registered = cache.get(target, set()) if name not in registered: registered = get_registered() cache[target] = registered if name not in registered: raise exceptions.NonRecoverableError( 'Missing task: {0} in worker celery.{1} \n' 'Registered tasks are: {2}' .format(name, target, registered))
apache-2.0
akintolga/superdesk-aap
server/aap/commands/import_text_archive.py
2
15489
# -*- coding: utf-8; -*- # # This file is part of Superdesk. # # Copyright 2013, 2014 Sourcefabric z.u. and contributors. # # For the full copyright and license information, please see the # AUTHORS and LICENSE files distributed with this source code, or # at https://www.sourcefabric.org/superdesk/license from bson import ObjectId import superdesk import urllib3 import urllib import xml.etree.ElementTree as etree import pytz from pytz import NonExistentTimeError, AmbiguousTimeError from superdesk import config from superdesk.io.iptc import subject_codes from datetime import datetime import time from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE, ITEM_STATE, CONTENT_STATE, FORMAT, FORMATS from superdesk.io.commands.update_ingest import process_iptc_codes from superdesk.etree import get_text_word_count from apps.archive.common import generate_unique_id_and_name import json from eve.utils import ParsedRequest # The older content does not contain an anpa category, so we derive it from the # publication name pubnames = { 'International Sport': 's', 'Racing': 'r', 'Parliamentary Press Releases': 'p', 'Features': 'c', 'Financial News': 'f', 'General': 'a', 'aap Features': 'c', 'aap International News': 'i', 'aap Australian Sport': 't', 'Australian General News': 'a', 'Asia Pulse Full': 'i', 'AFR Summary': 'a', 'Australian Sport': 't', 'PR Releases': 'j', 'Entertainment News': 'e', 'Special Events': 'y', 'Asia Pulse': 'i', 'aap International Sport': 's', 'Emergency Services': 'a', 'BRW Summary': 'a', 'FBM Summary': 'a', 'aap Australian General News': 'a', 'International News': 'i', 'aap Financial News': 'f', 'Asia Pulse Basic': 'i', 'Political News': 'p', 'Advisories': 'v' } class AppImportTextArchiveCommand(superdesk.Command): option_list = ( superdesk.Option('--start', '-strt', dest='start_id', required=True), superdesk.Option('--user', '-usr', dest='user', required=True), superdesk.Option('--password', '-pwd', dest='password', required=True), superdesk.Option('--url_root', '-url', dest='url', required=True), superdesk.Option('--query', '-qry', dest='query', required=True), superdesk.Option('--count', '-c', dest='limit', required=False), superdesk.Option('--direction', '-d', dest='direction', required=False) ) BATCH_SIZE = 500 def run(self, start_id, user, password, url, query, limit, direction): print('Starting text archive import at {}'.format(start_id)) self._user = user self._password = password self._id = int(start_id) self._url_root = url self._query = urllib.parse.quote(query) # direction True is forwards self._direction = True if direction is not None: if direction.lower()[0] == 'r': self._direction = False if limit is not None: self._limit = int(limit) else: self._limit = None self._api_login() x = self._get_bunch(self._id) while x: self._process_bunch(x) x = self._get_bunch(self._id) if self._limit is not None and self._limit <= 0: break if limit is None and int(x.find('doc_count').text) == 0: print('Complete') break print('finished text archive import') def _api_login(self): self._http = urllib3.PoolManager() credentials = '?login[username]={}&login[password]={}'.format(self._user, self._password) url = self._url_root + credentials r = self._http.urlopen('GET', url, headers={'Content-Type': 'application/xml'}) self._headers = {'cookie': r.getheader('set-cookie')} self._anpa_categories = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='categories') def _get_bunch(self, id): url = self._url_root if self._direction: d = '>' else: d = '<' url += 'archives/txtarch?search_docs[struct_query]=(DCDATA_ID{0}{1})&search_docs[query]='.format(d, id) url += self._query url += '&search_docs[format]=full&search_docs[pagesize]={0}&search_docs[page]=1'.format(self.BATCH_SIZE) if self._direction: url += '&search_docs[sortorder]=DCDATA_ID%20ASC' else: url += '&search_docs[sortorder]=DCDATA_ID%20DESC' print('Getting batch from DC url [{0}]'.format(url)) retries = 3 while retries > 0: s = time.time() r = self._http.request('GET', url, headers=self._headers) print('DC returned in {:.2f} seconds'.format(time.time() - s)) if r.status == 200: e = etree.fromstring(r.data) # print(str(r.data)) count = int(e.find('doc_count').text) if count > 0: print('count : {}'.format(count)) return e else: self._api_login() retries -= 1 return None def _get_head_value(self, doc, field): el = doc.find('dcdossier/document/head/' + field) if el is not None: return el.text return None def _addkeywords(self, key, doc, item): code = self._get_head_value(doc, key) if code: if 'keywords' not in item: item['keywords'] = [] item['keywords'].append(code) def _process_bunch(self, x): # x.findall('dc_rest_docs/dc_rest_doc')[0].get('href') items = [] for doc in x.findall('dc_rest_docs/dc_rest_doc'): try: # print(doc.get('href')) id = doc.find('dcdossier').get('id') if self._direction: if int(id) > self._id: self._id = int(id) else: if int(id) < self._id: self._id = int(id) item = {} item['guid'] = doc.find('dcdossier').get('guid') item[ITEM_TYPE] = CONTENT_TYPE.TEXT format = self._get_head_value(doc, 'Format') if format == 't': item[FORMAT] = FORMATS.PRESERVED else: item[FORMAT] = FORMATS.HTML # item[FORMAT] = FORMATS.HTML # if the item has been modified in the archive then it is due to a kill # there is an argument that this item should not be imported at all if doc.find('dcdossier').get('created') != doc.find('dcdossier').get('modified'): # item[ITEM_STATE] = CONTENT_STATE.KILLED continue else: item[ITEM_STATE] = CONTENT_STATE.PUBLISHED value = datetime.strptime(self._get_head_value(doc, 'PublicationDate'), '%Y%m%d%H%M%S') local_tz = pytz.timezone('Australia/Sydney') try: aus_dt = local_tz.localize(value, is_dst=None) except NonExistentTimeError as ex: aus_dt = local_tz.localize(value, is_dst=True) except AmbiguousTimeError: aus_dt = local_tz.localize(value, is_dst=False) item['firstcreated'] = aus_dt.astimezone(pytz.utc) item['versioncreated'] = item['firstcreated'] generate_unique_id_and_name(item) item['ingest_id'] = id last_line = None el = doc.find('dcdossier/document/body/BodyText') if el is not None: story = el.text lines = story.split('\n') if len(lines) > 0: last_line = lines[-1] if item.get(FORMAT) == FORMATS.HTML: story = story.replace('\n ', '<p></p>') story = story.replace('\n', '<br>') item['body_html'] = '<p>' + story + '</p>' else: item['body_html'] = '<pre>' + story + '</pre>' try: item['word_count'] = get_text_word_count(item['body_html']) except: pass else: # Items with no body are ignored continue item['source'] = self._get_head_value(doc, 'Agency') # if the source document contains no agency then by definition it is unknown if item['source'] is None: item['source'] = 'UNKNOWN' else: # check if the source of the document was Newscentre dc_unique = doc.find('dcdossier').get('unique') if dc_unique.startswith('NC.') and last_line is not None: # The AFR summary articles all have agency values 25 chars long if len(item['source']) == 25: item['source'] = 'AAP' # is it a numeric Agency elif self._get_head_value(doc, 'Agency').isdigit(): sign_off = last_line.split(' ') if len(sign_off) > 0: item['source'] = sign_off[0].upper() else: item['source'] = sign_off.upper() # clean up what we have extracted if item['source'].startswith('AAP'): item['source'] = 'AAP' else: # make sure it is one of the known values if item['source'] not in {'AAP', 'AP', 'REUT', 'Asia Pulse', 'DPA', 'AFP', 'RAW', 'NZA', 'NZPA', 'KRT', 'PA', 'PAA', 'SNI', 'REUTERS'}: print('Source : {}'.format(item['source'])) item['source'] = 'UNKNOWN' # self._addkeywords('AsiaPulseCodes', doc, item) byline = self._get_head_value(doc, 'Byline') if byline: item['byline'] = byline # item['service'] = self._get_head_value(doc,'Service') category = self._get_head_value(doc, 'Category') if not category: publication_name = self._get_head_value(doc, 'PublicationName') if publication_name in pubnames: category = pubnames[publication_name] if category: anpacategory = {} anpacategory['qcode'] = category for anpa_category in self._anpa_categories['items']: if anpacategory['qcode'].lower() == anpa_category['qcode'].lower(): anpacategory = {'qcode': anpacategory['qcode'], 'name': anpa_category['name']} break item['anpa_category'] = [anpacategory] self._addkeywords('CompanyCodes', doc, item) item['keyword'] = self._get_head_value(doc, 'Keyword') item['ingest_provider_sequence'] = self._get_head_value(doc, 'Sequence') orginal_source = self._get_head_value(doc, 'Author') if orginal_source: item['original_source'] = orginal_source item['headline'] = self._get_head_value(doc, 'Headline') code = self._get_head_value(doc, 'SubjectRefNum') if code and len(code) == 7: code = '0' + code if code and code in subject_codes: item['subject'] = [] item['subject'].append({'qcode': code, 'name': subject_codes[code]}) try: process_iptc_codes(item, None) except: pass slug = self._get_head_value(doc, 'SLUG') if slug: item['slugline'] = slug else: item['slugline'] = self._get_head_value(doc, 'Keyword') take_key = self._get_head_value(doc, 'Takekey') if take_key: item['anpa_take_key'] = take_key self._addkeywords('Topic', doc, item) # self._addkeywords('Selectors', doc, item) item['pubstatus'] = 'usable' # this is required for the archived service additional lookup item['item_id'] = item['guid'] item[config.VERSION] = 1 item['flags'] = {'marked_archived_only': True} # item['_id'] = ObjectId(id.rjust(24,'0')) item['_id'] = ObjectId() items.append(item) if self._limit: self._limit -= 1 # print(item) except Exception as ex: print('Exception parsing DC documnent {}'.format(id)) pass try: res = superdesk.get_resource_service('archived') s = time.time() res.post(items) print('Post to Batch to Superdesk took {:.2f}'.format(time.time() - s)) except Exception as ex: if ex.code == 409: print('Key clash exceptionn detected') # create a list of the guids we tried to post guids = [g['guid'] for g in items] # create a query for all those id's query = { 'size': self.BATCH_SIZE, 'query': { 'filtered': { 'filter': { "terms": { "guid": [guids] } } } } } req = ParsedRequest() repos = 'archived' req.args = {'source': json.dumps(query), 'repo': repos} search_res = superdesk.get_resource_service('search') existing = search_res.get(req=req, lookup=None) existing_guids = [e['guid'] for e in existing] not_existing = [g for g in guids if g not in existing_guids] for missing_guid in not_existing: i = [m for m in items if m['guid'] == missing_guid] original = res.find_one(req=None, guid=i[0]['guid']) if not original: try: s = time.time() res.post(i) print('Post single item to Superdesk in {:.2f} seconds'.format(time.time() - s)) except Exception as ex: print('Exception posting single item') else: print('Exception posting batch') superdesk.command('app:import_text_archive', AppImportTextArchiveCommand())
agpl-3.0
updateing/android_kernel_sony_msm8960
tools/perf/scripts/python/failed-syscalls-by-pid.py
11180
2058
# failed system call counts, by pid # (c) 2010, Tom Zanussi <[email protected]> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide failed system call totals, broken down by pid. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n"; for_comm = None for_pid = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: try: for_pid = int(sys.argv[1]) except: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_error_totals() def raw_syscalls__sys_exit(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, ret): if (for_comm and common_comm != for_comm) or \ (for_pid and common_pid != for_pid ): return if ret < 0: try: syscalls[common_comm][common_pid][id][ret] += 1 except TypeError: syscalls[common_comm][common_pid][id][ret] = 1 def print_error_totals(): if for_comm is not None: print "\nsyscall errors for %s:\n\n" % (for_comm), else: print "\nsyscall errors:\n\n", print "%-30s %10s\n" % ("comm [pid]", "count"), print "%-30s %10s\n" % ("------------------------------", \ "----------"), comm_keys = syscalls.keys() for comm in comm_keys: pid_keys = syscalls[comm].keys() for pid in pid_keys: print "\n%s [%d]\n" % (comm, pid), id_keys = syscalls[comm][pid].keys() for id in id_keys: print " syscall: %-16s\n" % syscall_name(id), ret_keys = syscalls[comm][pid][id].keys() for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True): print " err = %-20s %10d\n" % (strerror(ret), val),
gpl-2.0
dawran6/zulip
zerver/management/commands/generate_invite_links.py
22
2688
from __future__ import absolute_import from __future__ import print_function from typing import Any from argparse import ArgumentParser from django.core.management.base import BaseCommand from confirmation.models import Confirmation from zerver.models import UserProfile, PreregistrationUser, \ get_user_profile_by_email, get_realm, email_allowed_for_realm class Command(BaseCommand): help = "Generate activation links for users and print them to stdout." def add_arguments(self, parser): # type: (ArgumentParser) -> None parser.add_argument('--realm', dest='string_id', type=str, help='The realm in which to generate the invites (use for open realms).') parser.add_argument('--force', dest='force', action="store_true", default=False, help='Override that the domain is restricted to external users.') parser.add_argument('emails', metavar='<email>', type=str, nargs='*', help='email of user to generate an activation link for') def handle(self, *args, **options): # type: (*Any, **Any) -> None duplicates = False for email in options['emails']: try: get_user_profile_by_email(email) print(email + ": There is already a user registered with that address.") duplicates = True continue except UserProfile.DoesNotExist: pass if duplicates: return realm = None string_id = options["string_id"] if string_id: realm = get_realm(string_id) if not realm: print("The realm %s doesn't exist yet, please create it first." % (string_id,)) print("Don't forget default streams!") exit(1) for email in options['emails']: if realm: if not email_allowed_for_realm(email, realm) and not options["force"]: print("You've asked to add an external user (%s) to a closed realm (%s)." % ( email, string_id)) print("Are you sure? To do this, pass --force.") exit(1) else: prereg_user = PreregistrationUser(email=email, realm=realm) else: prereg_user = PreregistrationUser(email=email) prereg_user.save() print(email + ": " + Confirmation.objects.get_link_for_object(prereg_user, host=realm.host))
apache-2.0
igemsoftware/SYSU-Software2013
project/Python27_32/Lib/test/test_anydbm.py
93
2288
#! /usr/bin/env python """Test script for the anydbm module based on testdumbdbm.py """ import os import unittest import glob from test import test_support _fname = test_support.TESTFN # Silence Py3k warning anydbm = test_support.import_module('anydbm', deprecated=True) def _delete_files(): # we don't know the precise name the underlying database uses # so we use glob to locate all names for f in glob.glob(_fname + "*"): try: os.unlink(f) except OSError: pass class AnyDBMTestCase(unittest.TestCase): _dict = {'0': '', 'a': 'Python:', 'b': 'Programming', 'c': 'the', 'd': 'way', 'f': 'Guido', 'g': 'intended' } def __init__(self, *args): unittest.TestCase.__init__(self, *args) def test_anydbm_creation(self): f = anydbm.open(_fname, 'c') self.assertEqual(f.keys(), []) for key in self._dict: f[key] = self._dict[key] self.read_helper(f) f.close() def test_anydbm_modification(self): self.init_db() f = anydbm.open(_fname, 'c') self._dict['g'] = f['g'] = "indented" self.read_helper(f) f.close() def test_anydbm_read(self): self.init_db() f = anydbm.open(_fname, 'r') self.read_helper(f) f.close() def test_anydbm_keys(self): self.init_db() f = anydbm.open(_fname, 'r') keys = self.keys_helper(f) f.close() def read_helper(self, f): keys = self.keys_helper(f) for key in self._dict: self.assertEqual(self._dict[key], f[key]) def init_db(self): f = anydbm.open(_fname, 'n') for k in self._dict: f[k] = self._dict[k] f.close() def keys_helper(self, f): keys = f.keys() keys.sort() dkeys = self._dict.keys() dkeys.sort() self.assertEqual(keys, dkeys) return keys def tearDown(self): _delete_files() def setUp(self): _delete_files() def test_main(): try: test_support.run_unittest(AnyDBMTestCase) finally: _delete_files() if __name__ == "__main__": test_main()
mit
stevemao/brackets-shell
gyp/pylib/gyp/generator/msvs.py
17
118480
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import copy import ntpath import os import posixpath import re import subprocess import sys import gyp.common import gyp.easy_xml as easy_xml import gyp.MSVSNew as MSVSNew import gyp.MSVSProject as MSVSProject import gyp.MSVSSettings as MSVSSettings import gyp.MSVSToolFile as MSVSToolFile import gyp.MSVSUserFile as MSVSUserFile import gyp.MSVSVersion as MSVSVersion from gyp.common import GypError # Regular expression for validating Visual Studio GUIDs. If the GUID # contains lowercase hex letters, MSVS will be fine. However, # IncrediBuild BuildConsole will parse the solution file, but then # silently skip building the target causing hard to track down errors. # Note that this only happens with the BuildConsole, and does not occur # if IncrediBuild is executed from inside Visual Studio. This regex # validates that the string looks like a GUID with all uppercase hex # letters. VALID_MSVS_GUID_CHARS = re.compile('^[A-F0-9\-]+$') generator_default_variables = { 'EXECUTABLE_PREFIX': '', 'EXECUTABLE_SUFFIX': '.exe', 'STATIC_LIB_PREFIX': '', 'SHARED_LIB_PREFIX': '', 'STATIC_LIB_SUFFIX': '.lib', 'SHARED_LIB_SUFFIX': '.dll', 'INTERMEDIATE_DIR': '$(IntDir)', 'SHARED_INTERMEDIATE_DIR': '$(OutDir)obj/global_intermediate', 'OS': 'win', 'PRODUCT_DIR': '$(OutDir)', 'LIB_DIR': '$(OutDir)lib', 'RULE_INPUT_ROOT': '$(InputName)', 'RULE_INPUT_DIRNAME': '$(InputDir)', 'RULE_INPUT_EXT': '$(InputExt)', 'RULE_INPUT_NAME': '$(InputFileName)', 'RULE_INPUT_PATH': '$(InputPath)', 'CONFIGURATION_NAME': '$(ConfigurationName)', } # The msvs specific sections that hold paths generator_additional_path_sections = [ 'msvs_cygwin_dirs', 'msvs_props', ] generator_additional_non_configuration_keys = [ 'msvs_cygwin_dirs', 'msvs_cygwin_shell', 'msvs_shard', ] # List of precompiled header related keys. precomp_keys = [ 'msvs_precompiled_header', 'msvs_precompiled_source', ] cached_username = None cached_domain = None # TODO(gspencer): Switch the os.environ calls to be # win32api.GetDomainName() and win32api.GetUserName() once the # python version in depot_tools has been updated to work on Vista # 64-bit. def _GetDomainAndUserName(): if sys.platform not in ('win32', 'cygwin'): return ('DOMAIN', 'USERNAME') global cached_username global cached_domain if not cached_domain or not cached_username: domain = os.environ.get('USERDOMAIN') username = os.environ.get('USERNAME') if not domain or not username: call = subprocess.Popen(['net', 'config', 'Workstation'], stdout=subprocess.PIPE) config = call.communicate()[0] username_re = re.compile('^User name\s+(\S+)', re.MULTILINE) username_match = username_re.search(config) if username_match: username = username_match.group(1) domain_re = re.compile('^Logon domain\s+(\S+)', re.MULTILINE) domain_match = domain_re.search(config) if domain_match: domain = domain_match.group(1) cached_domain = domain cached_username = username return (cached_domain, cached_username) fixpath_prefix = None def _NormalizedSource(source): """Normalize the path. But not if that gets rid of a variable, as this may expand to something larger than one directory. Arguments: source: The path to be normalize.d Returns: The normalized path. """ normalized = os.path.normpath(source) if source.count('$') == normalized.count('$'): source = normalized return source def _FixPath(path): """Convert paths to a form that will make sense in a vcproj file. Arguments: path: The path to convert, may contain / etc. Returns: The path with all slashes made into backslashes. """ if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$': path = os.path.join(fixpath_prefix, path) path = path.replace('/', '\\') path = _NormalizedSource(path) if path and path[-1] == '\\': path = path[:-1] return path def _FixPaths(paths): """Fix each of the paths of the list.""" return [_FixPath(i) for i in paths] def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None, list_excluded=True): """Converts a list split source file paths into a vcproj folder hierarchy. Arguments: sources: A list of source file paths split. prefix: A list of source file path layers meant to apply to each of sources. excluded: A set of excluded files. Returns: A hierarchy of filenames and MSVSProject.Filter objects that matches the layout of the source tree. For example: _ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']], prefix=['joe']) --> [MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']), MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])] """ if not prefix: prefix = [] result = [] excluded_result = [] folders = dict() # Gather files into the final result, excluded, or folders. for s in sources: if len(s) == 1: filename = _NormalizedSource('\\'.join(prefix + s)) if filename in excluded: excluded_result.append(filename) else: result.append(filename) else: if not folders.get(s[0]): folders[s[0]] = [] folders[s[0]].append(s[1:]) # Add a folder for excluded files. if excluded_result and list_excluded: excluded_folder = MSVSProject.Filter('_excluded_files', contents=excluded_result) result.append(excluded_folder) # Populate all the folders. for f in folders: contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f], excluded=excluded, list_excluded=list_excluded) contents = MSVSProject.Filter(f, contents=contents) result.append(contents) return result def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False): if not value: return # TODO(bradnelson): ugly hack, fix this more generally!!! if 'Directories' in setting or 'Dependencies' in setting: if type(value) == str: value = value.replace('/', '\\') else: value = [i.replace('/', '\\') for i in value] if not tools.get(tool_name): tools[tool_name] = dict() tool = tools[tool_name] if tool.get(setting): if only_if_unset: return if type(tool[setting]) == list: tool[setting] += value else: raise TypeError( 'Appending "%s" to a non-list setting "%s" for tool "%s" is ' 'not allowed, previous value: %s' % ( value, setting, tool_name, str(tool[setting]))) else: tool[setting] = value def _ConfigPlatform(config_data): return config_data.get('msvs_configuration_platform', 'Win32') def _ConfigBaseName(config_name, platform_name): if config_name.endswith('_' + platform_name): return config_name[0:-len(platform_name)-1] else: return config_name def _ConfigFullName(config_name, config_data): platform_name = _ConfigPlatform(config_data) return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name) def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, has_input_path, quote_cmd, do_setup_env): if [x for x in cmd if '$(InputDir)' in x]: input_dir_preamble = ( 'set INPUTDIR=$(InputDir)\n' 'set INPUTDIR=%INPUTDIR:$(ProjectDir)=%\n' 'set INPUTDIR=%INPUTDIR:~0,-1%\n' ) else: input_dir_preamble = '' if cygwin_shell: # Find path to cygwin. cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0]) # Prepare command. direct_cmd = cmd direct_cmd = [i.replace('$(IntDir)', '`cygpath -m "${INTDIR}"`') for i in direct_cmd] direct_cmd = [i.replace('$(OutDir)', '`cygpath -m "${OUTDIR}"`') for i in direct_cmd] direct_cmd = [i.replace('$(InputDir)', '`cygpath -m "${INPUTDIR}"`') for i in direct_cmd] if has_input_path: direct_cmd = [i.replace('$(InputPath)', '`cygpath -m "${INPUTPATH}"`') for i in direct_cmd] direct_cmd = ['\\"%s\\"' % i.replace('"', '\\\\\\"') for i in direct_cmd] #direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd) direct_cmd = ' '.join(direct_cmd) # TODO(quote): regularize quoting path names throughout the module cmd = '' if do_setup_env: cmd += 'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && ' cmd += 'set CYGWIN=nontsec&& ' if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0: cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& ' if direct_cmd.find('INTDIR') >= 0: cmd += 'set INTDIR=$(IntDir)&& ' if direct_cmd.find('OUTDIR') >= 0: cmd += 'set OUTDIR=$(OutDir)&& ' if has_input_path and direct_cmd.find('INPUTPATH') >= 0: cmd += 'set INPUTPATH=$(InputPath) && ' cmd += 'bash -c "%(cmd)s"' cmd = cmd % {'cygwin_dir': cygwin_dir, 'cmd': direct_cmd} return input_dir_preamble + cmd else: # Convert cat --> type to mimic unix. if cmd[0] == 'cat': command = ['type'] else: command = [cmd[0].replace('/', '\\')] # Add call before command to ensure that commands can be tied together one # after the other without aborting in Incredibuild, since IB makes a bat # file out of the raw command string, and some commands (like python) are # actually batch files themselves. command.insert(0, 'call') # Fix the paths # TODO(quote): This is a really ugly heuristic, and will miss path fixing # for arguments like "--arg=path" or "/opt:path". # If the argument starts with a slash or dash, it's probably a command line # switch arguments = [i if (i[:1] in "/-") else _FixPath(i) for i in cmd[1:]] arguments = [i.replace('$(InputDir)','%INPUTDIR%') for i in arguments] arguments = [MSVSSettings.FixVCMacroSlashes(i) for i in arguments] if quote_cmd: # Support a mode for using cmd directly. # Convert any paths to native form (first element is used directly). # TODO(quote): regularize quoting path names throughout the module arguments = ['"%s"' % i for i in arguments] # Collapse into a single command. return input_dir_preamble + ' '.join(command + arguments) def _BuildCommandLineForRule(spec, rule, has_input_path, do_setup_env): # Currently this weird argument munging is used to duplicate the way a # python script would need to be run as part of the chrome tree. # Eventually we should add some sort of rule_default option to set this # per project. For now the behavior chrome needs is the default. mcs = rule.get('msvs_cygwin_shell') if mcs is None: mcs = int(spec.get('msvs_cygwin_shell', 1)) elif isinstance(mcs, str): mcs = int(mcs) quote_cmd = int(rule.get('msvs_quote_cmd', 1)) return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, has_input_path, quote_cmd, do_setup_env=do_setup_env) def _AddActionStep(actions_dict, inputs, outputs, description, command): """Merge action into an existing list of actions. Care must be taken so that actions which have overlapping inputs either don't get assigned to the same input, or get collapsed into one. Arguments: actions_dict: dictionary keyed on input name, which maps to a list of dicts describing the actions attached to that input file. inputs: list of inputs outputs: list of outputs description: description of the action command: command line to execute """ # Require there to be at least one input (call sites will ensure this). assert inputs action = { 'inputs': inputs, 'outputs': outputs, 'description': description, 'command': command, } # Pick where to stick this action. # While less than optimal in terms of build time, attach them to the first # input for now. chosen_input = inputs[0] # Add it there. if chosen_input not in actions_dict: actions_dict[chosen_input] = [] actions_dict[chosen_input].append(action) def _AddCustomBuildToolForMSVS(p, spec, primary_input, inputs, outputs, description, cmd): """Add a custom build tool to execute something. Arguments: p: the target project spec: the target project dict primary_input: input file to attach the build tool to inputs: list of inputs outputs: list of outputs description: description of the action cmd: command line to execute """ inputs = _FixPaths(inputs) outputs = _FixPaths(outputs) tool = MSVSProject.Tool( 'VCCustomBuildTool', {'Description': description, 'AdditionalDependencies': ';'.join(inputs), 'Outputs': ';'.join(outputs), 'CommandLine': cmd, }) # Add to the properties of primary input for each config. for config_name, c_data in spec['configurations'].iteritems(): p.AddFileConfig(_FixPath(primary_input), _ConfigFullName(config_name, c_data), tools=[tool]) def _AddAccumulatedActionsToMSVS(p, spec, actions_dict): """Add actions accumulated into an actions_dict, merging as needed. Arguments: p: the target project spec: the target project dict actions_dict: dictionary keyed on input name, which maps to a list of dicts describing the actions attached to that input file. """ for primary_input in actions_dict: inputs = set() outputs = set() descriptions = [] commands = [] for action in actions_dict[primary_input]: inputs.update(set(action['inputs'])) outputs.update(set(action['outputs'])) descriptions.append(action['description']) commands.append(action['command']) # Add the custom build step for one input file. description = ', and also '.join(descriptions) command = '\r\n'.join(commands) _AddCustomBuildToolForMSVS(p, spec, primary_input=primary_input, inputs=inputs, outputs=outputs, description=description, cmd=command) def _RuleExpandPath(path, input_file): """Given the input file to which a rule applied, string substitute a path. Arguments: path: a path to string expand input_file: the file to which the rule applied. Returns: The string substituted path. """ path = path.replace('$(InputName)', os.path.splitext(os.path.split(input_file)[1])[0]) path = path.replace('$(InputDir)', os.path.dirname(input_file)) path = path.replace('$(InputExt)', os.path.splitext(os.path.split(input_file)[1])[1]) path = path.replace('$(InputFileName)', os.path.split(input_file)[1]) path = path.replace('$(InputPath)', input_file) return path def _FindRuleTriggerFiles(rule, sources): """Find the list of files which a particular rule applies to. Arguments: rule: the rule in question sources: the set of all known source files for this project Returns: The list of sources that trigger a particular rule. """ rule_ext = rule['extension'] return [s for s in sources if s.endswith('.' + rule_ext)] def _RuleInputsAndOutputs(rule, trigger_file): """Find the inputs and outputs generated by a rule. Arguments: rule: the rule in question. trigger_file: the main trigger for this rule. Returns: The pair of (inputs, outputs) involved in this rule. """ raw_inputs = _FixPaths(rule.get('inputs', [])) raw_outputs = _FixPaths(rule.get('outputs', [])) inputs = set() outputs = set() inputs.add(trigger_file) for i in raw_inputs: inputs.add(_RuleExpandPath(i, trigger_file)) for o in raw_outputs: outputs.add(_RuleExpandPath(o, trigger_file)) return (inputs, outputs) def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options): """Generate a native rules file. Arguments: p: the target project rules: the set of rules to include output_dir: the directory in which the project/gyp resides spec: the project dict options: global generator options """ rules_filename = '%s%s.rules' % (spec['target_name'], options.suffix) rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename), spec['target_name']) # Add each rule. for r in rules: rule_name = r['rule_name'] rule_ext = r['extension'] inputs = _FixPaths(r.get('inputs', [])) outputs = _FixPaths(r.get('outputs', [])) # Skip a rule with no action and no inputs. if 'action' not in r and not r.get('rule_sources', []): continue cmd = _BuildCommandLineForRule(spec, r, has_input_path=True, do_setup_env=True) rules_file.AddCustomBuildRule(name=rule_name, description=r.get('message', rule_name), extensions=[rule_ext], additional_dependencies=inputs, outputs=outputs, cmd=cmd) # Write out rules file. rules_file.WriteIfChanged() # Add rules file to project. p.AddToolFile(rules_filename) def _Cygwinify(path): path = path.replace('$(OutDir)', '$(OutDirCygwin)') path = path.replace('$(IntDir)', '$(IntDirCygwin)') return path def _GenerateExternalRules(rules, output_dir, spec, sources, options, actions_to_add): """Generate an external makefile to do a set of rules. Arguments: rules: the list of rules to include output_dir: path containing project and gyp files spec: project specification data sources: set of sources known options: global generator options actions_to_add: The list of actions we will add to. """ filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix) mk_file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename)) # Find cygwin style versions of some paths. mk_file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n') mk_file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n') # Gather stuff needed to emit all: target. all_inputs = set() all_outputs = set() all_output_dirs = set() first_outputs = [] for rule in rules: trigger_files = _FindRuleTriggerFiles(rule, sources) for tf in trigger_files: inputs, outputs = _RuleInputsAndOutputs(rule, tf) all_inputs.update(set(inputs)) all_outputs.update(set(outputs)) # Only use one target from each rule as the dependency for # 'all' so we don't try to build each rule multiple times. first_outputs.append(list(outputs)[0]) # Get the unique output directories for this rule. output_dirs = [os.path.split(i)[0] for i in outputs] for od in output_dirs: all_output_dirs.add(od) first_outputs_cyg = [_Cygwinify(i) for i in first_outputs] # Write out all: target, including mkdir for each output directory. mk_file.write('all: %s\n' % ' '.join(first_outputs_cyg)) for od in all_output_dirs: if od: mk_file.write('\tmkdir -p `cygpath -u "%s"`\n' % od) mk_file.write('\n') # Define how each output is generated. for rule in rules: trigger_files = _FindRuleTriggerFiles(rule, sources) for tf in trigger_files: # Get all the inputs and outputs for this rule for this trigger file. inputs, outputs = _RuleInputsAndOutputs(rule, tf) inputs = [_Cygwinify(i) for i in inputs] outputs = [_Cygwinify(i) for i in outputs] # Prepare the command line for this rule. cmd = [_RuleExpandPath(c, tf) for c in rule['action']] cmd = ['"%s"' % i for i in cmd] cmd = ' '.join(cmd) # Add it to the makefile. mk_file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs))) mk_file.write('\t%s\n\n' % cmd) # Close up the file. mk_file.close() # Add makefile to list of sources. sources.add(filename) # Add a build action to call makefile. cmd = ['make', 'OutDir=$(OutDir)', 'IntDir=$(IntDir)', '-j', '${NUMBER_OF_PROCESSORS_PLUS_1}', '-f', filename] cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True, True) # Insert makefile as 0'th input, so it gets the action attached there, # as this is easier to understand from in the IDE. all_inputs = list(all_inputs) all_inputs.insert(0, filename) _AddActionStep(actions_to_add, inputs=_FixPaths(all_inputs), outputs=_FixPaths(all_outputs), description='Running external rules for %s' % spec['target_name'], command=cmd) def _EscapeEnvironmentVariableExpansion(s): """Escapes % characters. Escapes any % characters so that Windows-style environment variable expansions will leave them alone. See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile to understand why we have to do this. Args: s: The string to be escaped. Returns: The escaped string. """ s = s.replace('%', '%%') return s quote_replacer_regex = re.compile(r'(\\*)"') def _EscapeCommandLineArgumentForMSVS(s): """Escapes a Windows command-line argument. So that the Win32 CommandLineToArgv function will turn the escaped result back into the original string. See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx ("Parsing C++ Command-Line Arguments") to understand why we have to do this. Args: s: the string to be escaped. Returns: the escaped string. """ def _Replace(match): # For a literal quote, CommandLineToArgv requires an odd number of # backslashes preceding it, and it produces half as many literal backslashes # (rounded down). So we need to produce 2n+1 backslashes. return 2 * match.group(1) + '\\"' # Escape all quotes so that they are interpreted literally. s = quote_replacer_regex.sub(_Replace, s) # Now add unescaped quotes so that any whitespace is interpreted literally. s = '"' + s + '"' return s delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)') def _EscapeVCProjCommandLineArgListItem(s): """Escapes command line arguments for MSVS. The VCProj format stores string lists in a single string using commas and semi-colons as separators, which must be quoted if they are to be interpreted literally. However, command-line arguments may already have quotes, and the VCProj parser is ignorant of the backslash escaping convention used by CommandLineToArgv, so the command-line quotes and the VCProj quotes may not be the same quotes. So to store a general command-line argument in a VCProj list, we need to parse the existing quoting according to VCProj's convention and quote any delimiters that are not already quoted by that convention. The quotes that we add will also be seen by CommandLineToArgv, so if backslashes precede them then we also have to escape those backslashes according to the CommandLineToArgv convention. Args: s: the string to be escaped. Returns: the escaped string. """ def _Replace(match): # For a non-literal quote, CommandLineToArgv requires an even number of # backslashes preceding it, and it produces half as many literal # backslashes. So we need to produce 2n backslashes. return 2 * match.group(1) + '"' + match.group(2) + '"' segments = s.split('"') # The unquoted segments are at the even-numbered indices. for i in range(0, len(segments), 2): segments[i] = delimiters_replacer_regex.sub(_Replace, segments[i]) # Concatenate back into a single string s = '"'.join(segments) if len(segments) % 2 == 0: # String ends while still quoted according to VCProj's convention. This # means the delimiter and the next list item that follow this one in the # .vcproj file will be misinterpreted as part of this item. There is nothing # we can do about this. Adding an extra quote would correct the problem in # the VCProj but cause the same problem on the final command-line. Moving # the item to the end of the list does works, but that's only possible if # there's only one such item. Let's just warn the user. print >> sys.stderr, ('Warning: MSVS may misinterpret the odd number of ' + 'quotes in ' + s) return s def _EscapeCppDefineForMSVS(s): """Escapes a CPP define so that it will reach the compiler unaltered.""" s = _EscapeEnvironmentVariableExpansion(s) s = _EscapeCommandLineArgumentForMSVS(s) s = _EscapeVCProjCommandLineArgListItem(s) # cl.exe replaces literal # characters with = in preprocesor definitions for # some reason. Octal-encode to work around that. s = s.replace('#', '\\%03o' % ord('#')) return s quote_replacer_regex2 = re.compile(r'(\\+)"') def _EscapeCommandLineArgumentForMSBuild(s): """Escapes a Windows command-line argument for use by MSBuild.""" def _Replace(match): return (len(match.group(1))/2*4)*'\\' + '\\"' # Escape all quotes so that they are interpreted literally. s = quote_replacer_regex2.sub(_Replace, s) return s def _EscapeMSBuildSpecialCharacters(s): escape_dictionary = { '%': '%25', '$': '%24', '@': '%40', "'": '%27', ';': '%3B', '?': '%3F', '*': '%2A' } result = ''.join([escape_dictionary.get(c, c) for c in s]) return result def _EscapeCppDefineForMSBuild(s): """Escapes a CPP define so that it will reach the compiler unaltered.""" s = _EscapeEnvironmentVariableExpansion(s) s = _EscapeCommandLineArgumentForMSBuild(s) s = _EscapeMSBuildSpecialCharacters(s) # cl.exe replaces literal # characters with = in preprocesor definitions for # some reason. Octal-encode to work around that. s = s.replace('#', '\\%03o' % ord('#')) return s def _GenerateRulesForMSVS(p, output_dir, options, spec, sources, excluded_sources, actions_to_add): """Generate all the rules for a particular project. Arguments: p: the project output_dir: directory to emit rules to options: global options passed to the generator spec: the specification for this project sources: the set of all known source files in this project excluded_sources: the set of sources excluded from normal processing actions_to_add: deferred list of actions to add in """ rules = spec.get('rules', []) rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))] rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))] # Handle rules that use a native rules file. if rules_native: _GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options) # Handle external rules (non-native rules). if rules_external: _GenerateExternalRules(rules_external, output_dir, spec, sources, options, actions_to_add) _AdjustSourcesForRules(rules, sources, excluded_sources) def _AdjustSourcesForRules(rules, sources, excluded_sources): # Add outputs generated by each rule (if applicable). for rule in rules: # Done if not processing outputs as sources. if int(rule.get('process_outputs_as_sources', False)): # Add in the outputs from this rule. trigger_files = _FindRuleTriggerFiles(rule, sources) for trigger_file in trigger_files: inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file) inputs = set(_FixPaths(inputs)) outputs = set(_FixPaths(outputs)) inputs.remove(_FixPath(trigger_file)) sources.update(inputs) excluded_sources.update(inputs) sources.update(outputs) def _FilterActionsFromExcluded(excluded_sources, actions_to_add): """Take inputs with actions attached out of the list of exclusions. Arguments: excluded_sources: list of source files not to be built. actions_to_add: dict of actions keyed on source file they're attached to. Returns: excluded_sources with files that have actions attached removed. """ must_keep = set(_FixPaths(actions_to_add.keys())) return [s for s in excluded_sources if s not in must_keep] def _GetDefaultConfiguration(spec): return spec['configurations'][spec['default_configuration']] def _GetGuidOfProject(proj_path, spec): """Get the guid for the project. Arguments: proj_path: Path of the vcproj or vcxproj file to generate. spec: The target dictionary containing the properties of the target. Returns: the guid. Raises: ValueError: if the specified GUID is invalid. """ # Pluck out the default configuration. default_config = _GetDefaultConfiguration(spec) # Decide the guid of the project. guid = default_config.get('msvs_guid') if guid: if VALID_MSVS_GUID_CHARS.match(guid) is None: raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' % (guid, VALID_MSVS_GUID_CHARS.pattern)) guid = '{%s}' % guid guid = guid or MSVSNew.MakeGuid(proj_path) return guid def _GetMsbuildToolsetOfProject(proj_path, spec, version): """Get the platform toolset for the project. Arguments: proj_path: Path of the vcproj or vcxproj file to generate. spec: The target dictionary containing the properties of the target. version: The MSVSVersion object. Returns: the platform toolset string or None. """ # Pluck out the default configuration. default_config = _GetDefaultConfiguration(spec) toolset = default_config.get('msbuild_toolset') if not toolset and version.DefaultToolset(): toolset = version.DefaultToolset() return toolset def _GenerateProject(project, options, version, generator_flags): """Generates a vcproj file. Arguments: project: the MSVSProject object. options: global generator options. version: the MSVSVersion object. generator_flags: dict of generator-specific flags. Returns: A list of source files that cannot be found on disk. """ default_config = _GetDefaultConfiguration(project.spec) # Skip emitting anything if told to with msvs_existing_vcproj option. if default_config.get('msvs_existing_vcproj'): return [] if version.UsesVcxproj(): return _GenerateMSBuildProject(project, options, version, generator_flags) else: return _GenerateMSVSProject(project, options, version, generator_flags) def _GenerateMSVSProject(project, options, version, generator_flags): """Generates a .vcproj file. It may create .rules and .user files too. Arguments: project: The project object we will generate the file for. options: Global options passed to the generator. version: The VisualStudioVersion object. generator_flags: dict of generator-specific flags. """ spec = project.spec vcproj_dir = os.path.dirname(project.path) if vcproj_dir and not os.path.exists(vcproj_dir): os.makedirs(vcproj_dir) platforms = _GetUniquePlatforms(spec) p = MSVSProject.Writer(project.path, version, spec['target_name'], project.guid, platforms) # Get directory project file is in. project_dir = os.path.split(project.path)[0] gyp_path = _NormalizedSource(project.build_file) relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir) config_type = _GetMSVSConfigurationType(spec, project.build_file) for config_name, config in spec['configurations'].iteritems(): _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config) # Prepare list of sources and excluded sources. gyp_file = os.path.split(project.build_file)[1] sources, excluded_sources = _PrepareListOfSources(spec, generator_flags, gyp_file) # Add rules. actions_to_add = {} _GenerateRulesForMSVS(p, project_dir, options, spec, sources, excluded_sources, actions_to_add) list_excluded = generator_flags.get('msvs_list_excluded_files', True) sources, excluded_sources, excluded_idl = ( _AdjustSourcesAndConvertToFilterHierarchy( spec, options, project_dir, sources, excluded_sources, list_excluded)) # Add in files. missing_sources = _VerifySourcesExist(sources, project_dir) p.AddFiles(sources) _AddToolFilesToMSVS(p, spec) _HandlePreCompiledHeaders(p, sources, spec) _AddActions(actions_to_add, spec, relative_path_of_gyp_file) _AddCopies(actions_to_add, spec) _WriteMSVSUserFile(project.path, version, spec) # NOTE: this stanza must appear after all actions have been decided. # Don't excluded sources with actions attached, or they won't run. excluded_sources = _FilterActionsFromExcluded( excluded_sources, actions_to_add) _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl, list_excluded) _AddAccumulatedActionsToMSVS(p, spec, actions_to_add) # Write it out. p.WriteIfChanged() return missing_sources def _GetUniquePlatforms(spec): """Returns the list of unique platforms for this spec, e.g ['win32', ...]. Arguments: spec: The target dictionary containing the properties of the target. Returns: The MSVSUserFile object created. """ # Gather list of unique platforms. platforms = set() for configuration in spec['configurations']: platforms.add(_ConfigPlatform(spec['configurations'][configuration])) platforms = list(platforms) return platforms def _CreateMSVSUserFile(proj_path, version, spec): """Generates a .user file for the user running this Gyp program. Arguments: proj_path: The path of the project file being created. The .user file shares the same path (with an appropriate suffix). version: The VisualStudioVersion object. spec: The target dictionary containing the properties of the target. Returns: The MSVSUserFile object created. """ (domain, username) = _GetDomainAndUserName() vcuser_filename = '.'.join([proj_path, domain, username, 'user']) user_file = MSVSUserFile.Writer(vcuser_filename, version, spec['target_name']) return user_file def _GetMSVSConfigurationType(spec, build_file): """Returns the configuration type for this project. It's a number defined by Microsoft. May raise an exception. Args: spec: The target dictionary containing the properties of the target. build_file: The path of the gyp file. Returns: An integer, the configuration type. """ try: config_type = { 'executable': '1', # .exe 'shared_library': '2', # .dll 'loadable_module': '2', # .dll 'static_library': '4', # .lib 'none': '10', # Utility type }[spec['type']] except KeyError: if spec.get('type'): raise Exception('Target type %s is not a valid target type for ' 'target %s in %s.' % (spec['type'], spec['target_name'], build_file)) else: raise Exception('Missing type field for target %s in %s.' % (spec['target_name'], build_file)) return config_type def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config): """Adds a configuration to the MSVS project. Many settings in a vcproj file are specific to a configuration. This function the main part of the vcproj file that's configuration specific. Arguments: p: The target project being generated. spec: The target dictionary containing the properties of the target. config_type: The configuration type, a number as defined by Microsoft. config_name: The name of the configuration. config: The dictionnary that defines the special processing to be done for this configuration. """ # Get the information for this configuration include_dirs, resource_include_dirs = _GetIncludeDirs(config) libraries = _GetLibraries(spec) out_file, vc_tool, _ = _GetOutputFilePathAndTool(spec, msbuild=False) defines = _GetDefines(config) defines = [_EscapeCppDefineForMSVS(d) for d in defines] disabled_warnings = _GetDisabledWarnings(config) prebuild = config.get('msvs_prebuild') postbuild = config.get('msvs_postbuild') def_file = _GetModuleDefinition(spec) precompiled_header = config.get('msvs_precompiled_header') # Prepare the list of tools as a dictionary. tools = dict() # Add in user specified msvs_settings. msvs_settings = config.get('msvs_settings', {}) MSVSSettings.ValidateMSVSSettings(msvs_settings) for tool in msvs_settings: settings = config['msvs_settings'][tool] for setting in settings: _ToolAppend(tools, tool, setting, settings[setting]) # Add the information to the appropriate tool _ToolAppend(tools, 'VCCLCompilerTool', 'AdditionalIncludeDirectories', include_dirs) _ToolAppend(tools, 'VCResourceCompilerTool', 'AdditionalIncludeDirectories', resource_include_dirs) # Add in libraries. _ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries) if out_file: _ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True) # Add defines. _ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines) _ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions', defines) # Change program database directory to prevent collisions. _ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName', '$(IntDir)$(ProjectName)\\vc80.pdb', only_if_unset=True) # Add disabled warnings. _ToolAppend(tools, 'VCCLCompilerTool', 'DisableSpecificWarnings', disabled_warnings) # Add Pre-build. _ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild) # Add Post-build. _ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild) # Turn on precompiled headers if appropriate. if precompiled_header: precompiled_header = os.path.split(precompiled_header)[1] _ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2') _ToolAppend(tools, 'VCCLCompilerTool', 'PrecompiledHeaderThrough', precompiled_header) _ToolAppend(tools, 'VCCLCompilerTool', 'ForcedIncludeFiles', precompiled_header) # Loadable modules don't generate import libraries; # tell dependent projects to not expect one. if spec['type'] == 'loadable_module': _ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true') # Set the module definition file if any. if def_file: _ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file) _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name) def _GetIncludeDirs(config): """Returns the list of directories to be used for #include directives. Arguments: config: The dictionnary that defines the special processing to be done for this configuration. Returns: The list of directory paths. """ # TODO(bradnelson): include_dirs should really be flexible enough not to # require this sort of thing. include_dirs = ( config.get('include_dirs', []) + config.get('msvs_system_include_dirs', [])) resource_include_dirs = config.get('resource_include_dirs', include_dirs) include_dirs = _FixPaths(include_dirs) resource_include_dirs = _FixPaths(resource_include_dirs) return include_dirs, resource_include_dirs def _GetLibraries(spec): """Returns the list of libraries for this configuration. Arguments: spec: The target dictionary containing the properties of the target. Returns: The list of directory paths. """ libraries = spec.get('libraries', []) # Strip out -l, as it is not used on windows (but is needed so we can pass # in libraries that are assumed to be in the default library path). # Also remove duplicate entries, leaving only the last duplicate, while # preserving order. found = set() unique_libraries_list = [] for entry in reversed(libraries): library = re.sub('^\-l', '', entry) if not os.path.splitext(library)[1]: library += '.lib' if library not in found: found.add(library) unique_libraries_list.append(library) unique_libraries_list.reverse() return unique_libraries_list def _GetOutputFilePathAndTool(spec, msbuild): """Returns the path and tool to use for this target. Figures out the path of the file this spec will create and the name of the VC tool that will create it. Arguments: spec: The target dictionary containing the properties of the target. Returns: A triple of (file path, name of the vc tool, name of the msbuild tool) """ # Select a name for the output file. out_file = '' vc_tool = '' msbuild_tool = '' output_file_map = { 'executable': ('VCLinkerTool', 'Link', '$(OutDir)', '.exe'), 'shared_library': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'), 'loadable_module': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'), 'static_library': ('VCLibrarianTool', 'Lib', '$(OutDir)lib\\', '.lib'), } output_file_props = output_file_map.get(spec['type']) if output_file_props and int(spec.get('msvs_auto_output_file', 1)): vc_tool, msbuild_tool, out_dir, suffix = output_file_props if spec.get('standalone_static_library', 0): out_dir = '$(OutDir)' out_dir = spec.get('product_dir', out_dir) product_extension = spec.get('product_extension') if product_extension: suffix = '.' + product_extension elif msbuild: suffix = '$(TargetExt)' prefix = spec.get('product_prefix', '') product_name = spec.get('product_name', '$(ProjectName)') out_file = ntpath.join(out_dir, prefix + product_name + suffix) return out_file, vc_tool, msbuild_tool def _GetDefines(config): """Returns the list of preprocessor definitions for this configuation. Arguments: config: The dictionnary that defines the special processing to be done for this configuration. Returns: The list of preprocessor definitions. """ defines = [] for d in config.get('defines', []): if type(d) == list: fd = '='.join([str(dpart) for dpart in d]) else: fd = str(d) defines.append(fd) return defines def _GetDisabledWarnings(config): return [str(i) for i in config.get('msvs_disabled_warnings', [])] def _GetModuleDefinition(spec): def_file = '' if spec['type'] in ['shared_library', 'loadable_module', 'executable']: def_files = [s for s in spec.get('sources', []) if s.endswith('.def')] if len(def_files) == 1: def_file = _FixPath(def_files[0]) elif def_files: raise ValueError( 'Multiple module definition files in one target, target %s lists ' 'multiple .def files: %s' % ( spec['target_name'], ' '.join(def_files))) return def_file def _ConvertToolsToExpectedForm(tools): """Convert tools to a form expected by Visual Studio. Arguments: tools: A dictionnary of settings; the tool name is the key. Returns: A list of Tool objects. """ tool_list = [] for tool, settings in tools.iteritems(): # Collapse settings with lists. settings_fixed = {} for setting, value in settings.iteritems(): if type(value) == list: if ((tool == 'VCLinkerTool' and setting == 'AdditionalDependencies') or setting == 'AdditionalOptions'): settings_fixed[setting] = ' '.join(value) else: settings_fixed[setting] = ';'.join(value) else: settings_fixed[setting] = value # Add in this tool. tool_list.append(MSVSProject.Tool(tool, settings_fixed)) return tool_list def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name): """Add to the project file the configuration specified by config. Arguments: p: The target project being generated. spec: the target project dict. tools: A dictionnary of settings; the tool name is the key. config: The dictionnary that defines the special processing to be done for this configuration. config_type: The configuration type, a number as defined by Microsoft. config_name: The name of the configuration. """ attributes = _GetMSVSAttributes(spec, config, config_type) # Add in this configuration. tool_list = _ConvertToolsToExpectedForm(tools) p.AddConfig(_ConfigFullName(config_name, config), attrs=attributes, tools=tool_list) def _GetMSVSAttributes(spec, config, config_type): # Prepare configuration attributes. prepared_attrs = {} source_attrs = config.get('msvs_configuration_attributes', {}) for a in source_attrs: prepared_attrs[a] = source_attrs[a] # Add props files. vsprops_dirs = config.get('msvs_props', []) vsprops_dirs = _FixPaths(vsprops_dirs) if vsprops_dirs: prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs) # Set configuration type. prepared_attrs['ConfigurationType'] = config_type output_dir = prepared_attrs.get('OutputDirectory', '$(SolutionDir)$(ConfigurationName)') prepared_attrs['OutputDirectory'] = _FixPath(output_dir) + '\\' if 'IntermediateDirectory' not in prepared_attrs: intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)' prepared_attrs['IntermediateDirectory'] = _FixPath(intermediate) + '\\' else: intermediate = _FixPath(prepared_attrs['IntermediateDirectory']) + '\\' intermediate = MSVSSettings.FixVCMacroSlashes(intermediate) prepared_attrs['IntermediateDirectory'] = intermediate return prepared_attrs def _AddNormalizedSources(sources_set, sources_array): sources = [_NormalizedSource(s) for s in sources_array] sources_set.update(set(sources)) def _PrepareListOfSources(spec, generator_flags, gyp_file): """Prepare list of sources and excluded sources. Besides the sources specified directly in the spec, adds the gyp file so that a change to it will cause a re-compile. Also adds appropriate sources for actions and copies. Assumes later stage will un-exclude files which have custom build steps attached. Arguments: spec: The target dictionary containing the properties of the target. gyp_file: The name of the gyp file. Returns: A pair of (list of sources, list of excluded sources). The sources will be relative to the gyp file. """ sources = set() _AddNormalizedSources(sources, spec.get('sources', [])) excluded_sources = set() # Add in the gyp file. if not generator_flags.get('standalone'): sources.add(gyp_file) # Add in 'action' inputs and outputs. for a in spec.get('actions', []): inputs = a['inputs'] inputs = [_NormalizedSource(i) for i in inputs] # Add all inputs to sources and excluded sources. inputs = set(inputs) sources.update(inputs) excluded_sources.update(inputs) if int(a.get('process_outputs_as_sources', False)): _AddNormalizedSources(sources, a.get('outputs', [])) # Add in 'copies' inputs and outputs. for cpy in spec.get('copies', []): _AddNormalizedSources(sources, cpy.get('files', [])) return (sources, excluded_sources) def _AdjustSourcesAndConvertToFilterHierarchy( spec, options, gyp_dir, sources, excluded_sources, list_excluded): """Adjusts the list of sources and excluded sources. Also converts the sets to lists. Arguments: spec: The target dictionary containing the properties of the target. options: Global generator options. gyp_dir: The path to the gyp file being processed. sources: A set of sources to be included for this project. excluded_sources: A set of sources to be excluded for this project. Returns: A trio of (list of sources, list of excluded sources, path of excluded IDL file) """ # Exclude excluded sources coming into the generator. excluded_sources.update(set(spec.get('sources_excluded', []))) # Add excluded sources into sources for good measure. sources.update(excluded_sources) # Convert to proper windows form. # NOTE: sources goes from being a set to a list here. # NOTE: excluded_sources goes from being a set to a list here. sources = _FixPaths(sources) # Convert to proper windows form. excluded_sources = _FixPaths(excluded_sources) excluded_idl = _IdlFilesHandledNonNatively(spec, sources) precompiled_related = _GetPrecompileRelatedFiles(spec) # Find the excluded ones, minus the precompiled header related ones. fully_excluded = [i for i in excluded_sources if i not in precompiled_related] # Convert to folders and the right slashes. sources = [i.split('\\') for i in sources] sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded, list_excluded=list_excluded) return sources, excluded_sources, excluded_idl def _IdlFilesHandledNonNatively(spec, sources): # If any non-native rules use 'idl' as an extension exclude idl files. # Gather a list here to use later. using_idl = False for rule in spec.get('rules', []): if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)): using_idl = True break if using_idl: excluded_idl = [i for i in sources if i.endswith('.idl')] else: excluded_idl = [] return excluded_idl def _GetPrecompileRelatedFiles(spec): # Gather a list of precompiled header related sources. precompiled_related = [] for _, config in spec['configurations'].iteritems(): for k in precomp_keys: f = config.get(k) if f: precompiled_related.append(_FixPath(f)) return precompiled_related def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl, list_excluded): exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl) for file_name, excluded_configs in exclusions.iteritems(): if (not list_excluded and len(excluded_configs) == len(spec['configurations'])): # If we're not listing excluded files, then they won't appear in the # project, so don't try to configure them to be excluded. pass else: for config_name, config in excluded_configs: p.AddFileConfig(file_name, _ConfigFullName(config_name, config), {'ExcludedFromBuild': 'true'}) def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl): exclusions = {} # Exclude excluded sources from being built. for f in excluded_sources: excluded_configs = [] for config_name, config in spec['configurations'].iteritems(): precomped = [_FixPath(config.get(i, '')) for i in precomp_keys] # Don't do this for ones that are precompiled header related. if f not in precomped: excluded_configs.append((config_name, config)) exclusions[f] = excluded_configs # If any non-native rules use 'idl' as an extension exclude idl files. # Exclude them now. for f in excluded_idl: excluded_configs = [] for config_name, config in spec['configurations'].iteritems(): excluded_configs.append((config_name, config)) exclusions[f] = excluded_configs return exclusions def _AddToolFilesToMSVS(p, spec): # Add in tool files (rules). tool_files = set() for _, config in spec['configurations'].iteritems(): for f in config.get('msvs_tool_files', []): tool_files.add(f) for f in tool_files: p.AddToolFile(f) def _HandlePreCompiledHeaders(p, sources, spec): # Pre-compiled header source stubs need a different compiler flag # (generate precompiled header) and any source file not of the same # kind (i.e. C vs. C++) as the precompiled header source stub needs # to have use of precompiled headers disabled. extensions_excluded_from_precompile = [] for config_name, config in spec['configurations'].iteritems(): source = config.get('msvs_precompiled_source') if source: source = _FixPath(source) # UsePrecompiledHeader=1 for if using precompiled headers. tool = MSVSProject.Tool('VCCLCompilerTool', {'UsePrecompiledHeader': '1'}) p.AddFileConfig(source, _ConfigFullName(config_name, config), {}, tools=[tool]) basename, extension = os.path.splitext(source) if extension == '.c': extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx'] else: extensions_excluded_from_precompile = ['.c'] def DisableForSourceTree(source_tree): for source in source_tree: if isinstance(source, MSVSProject.Filter): DisableForSourceTree(source.contents) else: basename, extension = os.path.splitext(source) if extension in extensions_excluded_from_precompile: for config_name, config in spec['configurations'].iteritems(): tool = MSVSProject.Tool('VCCLCompilerTool', {'UsePrecompiledHeader': '0', 'ForcedIncludeFiles': '$(NOINHERIT)'}) p.AddFileConfig(_FixPath(source), _ConfigFullName(config_name, config), {}, tools=[tool]) # Do nothing if there was no precompiled source. if extensions_excluded_from_precompile: DisableForSourceTree(sources) def _AddActions(actions_to_add, spec, relative_path_of_gyp_file): # Add actions. actions = spec.get('actions', []) # Don't setup_env every time. When all the actions are run together in one # batch file in VS, the PATH will grow too long. # Membership in this set means that the cygwin environment has been set up, # and does not need to be set up again. have_setup_env = set() for a in actions: # Attach actions to the gyp file if nothing else is there. inputs = a.get('inputs') or [relative_path_of_gyp_file] attached_to = inputs[0] need_setup_env = attached_to not in have_setup_env cmd = _BuildCommandLineForRule(spec, a, has_input_path=False, do_setup_env=need_setup_env) have_setup_env.add(attached_to) # Add the action. _AddActionStep(actions_to_add, inputs=inputs, outputs=a.get('outputs', []), description=a.get('message', a['action_name']), command=cmd) def _WriteMSVSUserFile(project_path, version, spec): # Add run_as and test targets. if 'run_as' in spec: run_as = spec['run_as'] action = run_as.get('action', []) environment = run_as.get('environment', []) working_directory = run_as.get('working_directory', '.') elif int(spec.get('test', 0)): action = ['$(TargetPath)', '--gtest_print_time'] environment = [] working_directory = '.' else: return # Nothing to add # Write out the user file. user_file = _CreateMSVSUserFile(project_path, version, spec) for config_name, c_data in spec['configurations'].iteritems(): user_file.AddDebugSettings(_ConfigFullName(config_name, c_data), action, environment, working_directory) user_file.WriteIfChanged() def _AddCopies(actions_to_add, spec): copies = _GetCopies(spec) for inputs, outputs, cmd, description in copies: _AddActionStep(actions_to_add, inputs=inputs, outputs=outputs, description=description, command=cmd) def _GetCopies(spec): copies = [] # Add copies. for cpy in spec.get('copies', []): for src in cpy.get('files', []): dst = os.path.join(cpy['destination'], os.path.basename(src)) # _AddCustomBuildToolForMSVS() will call _FixPath() on the inputs and # outputs, so do the same for our generated command line. if src.endswith('/'): src_bare = src[:-1] base_dir = posixpath.split(src_bare)[0] outer_dir = posixpath.split(src_bare)[1] cmd = 'xcopy /e /f /y "%s\\%s" "%s\\%s\\"' % ( _FixPath(base_dir), outer_dir, _FixPath(dst), outer_dir) copies.append(([src], ['dummy_copies', dst], cmd, 'Copying %s to %s' % (src, dst))) else: cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % ( _FixPath(cpy['destination']), _FixPath(src), _FixPath(dst)) copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, dst))) return copies def _GetPathDict(root, path): # |path| will eventually be empty (in the recursive calls) if it was initially # relative; otherwise it will eventually end up as '\', 'D:\', etc. if not path or path.endswith(os.sep): return root parent, folder = os.path.split(path) parent_dict = _GetPathDict(root, parent) if folder not in parent_dict: parent_dict[folder] = dict() return parent_dict[folder] def _DictsToFolders(base_path, bucket, flat): # Convert to folders recursively. children = [] for folder, contents in bucket.iteritems(): if type(contents) == dict: folder_children = _DictsToFolders(os.path.join(base_path, folder), contents, flat) if flat: children += folder_children else: folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder), name='(' + folder + ')', entries=folder_children) children.append(folder_children) else: children.append(contents) return children def _CollapseSingles(parent, node): # Recursively explorer the tree of dicts looking for projects which are # the sole item in a folder which has the same name as the project. Bring # such projects up one level. if (type(node) == dict and len(node) == 1 and node.keys()[0] == parent + '.vcproj'): return node[node.keys()[0]] if type(node) != dict: return node for child in node: node[child] = _CollapseSingles(child, node[child]) return node def _GatherSolutionFolders(sln_projects, project_objects, flat): root = {} # Convert into a tree of dicts on path. for p in sln_projects: gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2] gyp_dir = os.path.dirname(gyp_file) path_dict = _GetPathDict(root, gyp_dir) path_dict[target + '.vcproj'] = project_objects[p] # Walk down from the top until we hit a folder that has more than one entry. # In practice, this strips the top-level "src/" dir from the hierarchy in # the solution. while len(root) == 1 and type(root[root.keys()[0]]) == dict: root = root[root.keys()[0]] # Collapse singles. root = _CollapseSingles('', root) # Merge buckets until everything is a root entry. return _DictsToFolders('', root, flat) def _GetPathOfProject(qualified_target, spec, options, msvs_version): default_config = _GetDefaultConfiguration(spec) proj_filename = default_config.get('msvs_existing_vcproj') if not proj_filename: proj_filename = (spec['target_name'] + options.suffix + msvs_version.ProjectExtension()) build_file = gyp.common.BuildFile(qualified_target) proj_path = os.path.join(os.path.dirname(build_file), proj_filename) fix_prefix = None if options.generator_output: project_dir_path = os.path.dirname(os.path.abspath(proj_path)) proj_path = os.path.join(options.generator_output, proj_path) fix_prefix = gyp.common.RelativePath(project_dir_path, os.path.dirname(proj_path)) return proj_path, fix_prefix def _GetPlatformOverridesOfProject(spec): # Prepare a dict indicating which project configurations are used for which # solution configurations for this target. config_platform_overrides = {} for config_name, c in spec['configurations'].iteritems(): config_fullname = _ConfigFullName(config_name, c) platform = c.get('msvs_target_platform', _ConfigPlatform(c)) fixed_config_fullname = '%s|%s' % ( _ConfigBaseName(config_name, _ConfigPlatform(c)), platform) config_platform_overrides[config_fullname] = fixed_config_fullname return config_platform_overrides def _CreateProjectObjects(target_list, target_dicts, options, msvs_version): """Create a MSVSProject object for the targets found in target list. Arguments: target_list: the list of targets to generate project objects for. target_dicts: the dictionary of specifications. options: global generator options. msvs_version: the MSVSVersion object. Returns: A set of created projects, keyed by target. """ global fixpath_prefix # Generate each project. projects = {} for qualified_target in target_list: spec = target_dicts[qualified_target] if spec['toolset'] != 'target': raise Exception( 'Multiple toolsets not supported in msvs build (target %s)' % qualified_target) proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec, options, msvs_version) guid = _GetGuidOfProject(proj_path, spec) overrides = _GetPlatformOverridesOfProject(spec) build_file = gyp.common.BuildFile(qualified_target) # Create object for this project. obj = MSVSNew.MSVSProject( proj_path, name=spec['target_name'], guid=guid, spec=spec, build_file=build_file, config_platform_overrides=overrides, fixpath_prefix=fixpath_prefix) # Set project toolset if any (MS build only) if msvs_version.UsesVcxproj(): obj.set_msbuild_toolset( _GetMsbuildToolsetOfProject(proj_path, spec, msvs_version)) projects[qualified_target] = obj # Set all the dependencies for project in projects.values(): deps = project.spec.get('dependencies', []) deps = [projects[d] for d in deps] project.set_dependencies(deps) return projects def CalculateVariables(default_variables, params): """Generated variables that require params to be known.""" generator_flags = params.get('generator_flags', {}) # Select project file format version (if unset, default to auto detecting). msvs_version = MSVSVersion.SelectVisualStudioVersion( generator_flags.get('msvs_version', 'auto')) # Stash msvs_version for later (so we don't have to probe the system twice). params['msvs_version'] = msvs_version # Set a variable so conditions can be based on msvs_version. default_variables['MSVS_VERSION'] = msvs_version.ShortName() # To determine processor word size on Windows, in addition to checking # PROCESSOR_ARCHITECTURE (which reflects the word size of the current # process), it is also necessary to check PROCESSOR_ARCITEW6432 (which # contains the actual word size of the system when running thru WOW64). if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0): default_variables['MSVS_OS_BITS'] = 64 else: default_variables['MSVS_OS_BITS'] = 32 def _ShardName(name, number): """Add a shard number to the end of a target. Arguments: name: name of the target (foo#target) number: shard number Returns: Target name with shard added (foo_1#target) """ parts = name.rsplit('#', 1) parts[0] = '%s_%d' % (parts[0], number) return '#'.join(parts) def _ShardTargets(target_list, target_dicts): """Shard some targets apart to work around the linkers limits. Arguments: target_list: List of target pairs: 'base/base.gyp:base'. target_dicts: Dict of target properties keyed on target pair. Returns: Tuple of the new sharded versions of the inputs. """ # Gather the targets to shard, and how many pieces. targets_to_shard = {} for t in target_dicts: shards = int(target_dicts[t].get('msvs_shard', 0)) if shards: targets_to_shard[t] = shards # Shard target_list. new_target_list = [] for t in target_list: if t in targets_to_shard: for i in range(targets_to_shard[t]): new_target_list.append(_ShardName(t, i)) else: new_target_list.append(t) # Shard target_dict. new_target_dicts = {} for t in target_dicts: if t in targets_to_shard: for i in range(targets_to_shard[t]): name = _ShardName(t, i) new_target_dicts[name] = copy.copy(target_dicts[t]) new_target_dicts[name]['target_name'] = _ShardName( new_target_dicts[name]['target_name'], i) sources = new_target_dicts[name].get('sources', []) new_sources = [] for pos in range(i, len(sources), targets_to_shard[t]): new_sources.append(sources[pos]) new_target_dicts[name]['sources'] = new_sources else: new_target_dicts[t] = target_dicts[t] # Shard dependencies. for t in new_target_dicts: dependencies = copy.copy(new_target_dicts[t].get('dependencies', [])) new_dependencies = [] for d in dependencies: if d in targets_to_shard: for i in range(targets_to_shard[d]): new_dependencies.append(_ShardName(d, i)) else: new_dependencies.append(d) new_target_dicts[t]['dependencies'] = new_dependencies return (new_target_list, new_target_dicts) def PerformBuild(data, configurations, params): options = params['options'] msvs_version = params['msvs_version'] devenv = os.path.join(msvs_version.path, 'Common7', 'IDE', 'devenv.com') for build_file, build_file_dict in data.iteritems(): (build_file_root, build_file_ext) = os.path.splitext(build_file) if build_file_ext != '.gyp': continue sln_path = build_file_root + options.suffix + '.sln' if options.generator_output: sln_path = os.path.join(options.generator_output, sln_path) for config in configurations: arguments = [devenv, sln_path, '/Build', config] print 'Building [%s]: %s' % (config, arguments) rtn = subprocess.check_call(arguments) def GenerateOutput(target_list, target_dicts, data, params): """Generate .sln and .vcproj files. This is the entry point for this generator. Arguments: target_list: List of target pairs: 'base/base.gyp:base'. target_dicts: Dict of target properties keyed on target pair. data: Dictionary containing per .gyp data. """ global fixpath_prefix options = params['options'] # Get the project file format version back out of where we stashed it in # GeneratorCalculatedVariables. msvs_version = params['msvs_version'] generator_flags = params.get('generator_flags', {}) # Optionally shard targets marked with 'msvs_shard': SHARD_COUNT. (target_list, target_dicts) = _ShardTargets(target_list, target_dicts) # Prepare the set of configurations. configs = set() for qualified_target in target_list: spec = target_dicts[qualified_target] for config_name, config in spec['configurations'].iteritems(): configs.add(_ConfigFullName(config_name, config)) configs = list(configs) # Figure out all the projects that will be generated and their guids project_objects = _CreateProjectObjects(target_list, target_dicts, options, msvs_version) # Generate each project. missing_sources = [] for project in project_objects.values(): fixpath_prefix = project.fixpath_prefix missing_sources.extend(_GenerateProject(project, options, msvs_version, generator_flags)) fixpath_prefix = None for build_file in data: # Validate build_file extension if not build_file.endswith('.gyp'): continue sln_path = os.path.splitext(build_file)[0] + options.suffix + '.sln' if options.generator_output: sln_path = os.path.join(options.generator_output, sln_path) # Get projects in the solution, and their dependents. sln_projects = gyp.common.BuildFileTargets(target_list, build_file) sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects) # Create folder hierarchy. root_entries = _GatherSolutionFolders( sln_projects, project_objects, flat=msvs_version.FlatSolution()) # Create solution. sln = MSVSNew.MSVSSolution(sln_path, entries=root_entries, variants=configs, websiteProperties=False, version=msvs_version) sln.Write() if missing_sources: error_message = "Missing input files:\n" + \ '\n'.join(set(missing_sources)) if generator_flags.get('msvs_error_on_missing_sources', False): raise Exception(error_message) else: print >>sys.stdout, "Warning: " + error_message def _GenerateMSBuildFiltersFile(filters_path, source_files, extension_to_rule_name): """Generate the filters file. This file is used by Visual Studio to organize the presentation of source files into folders. Arguments: filters_path: The path of the file to be created. source_files: The hierarchical structure of all the sources. extension_to_rule_name: A dictionary mapping file extensions to rules. """ filter_group = [] source_group = [] _AppendFiltersForMSBuild('', source_files, extension_to_rule_name, filter_group, source_group) if filter_group: content = ['Project', {'ToolsVersion': '4.0', 'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003' }, ['ItemGroup'] + filter_group, ['ItemGroup'] + source_group ] easy_xml.WriteXmlIfChanged(content, filters_path, pretty=True, win32=True) elif os.path.exists(filters_path): # We don't need this filter anymore. Delete the old filter file. os.unlink(filters_path) def _AppendFiltersForMSBuild(parent_filter_name, sources, extension_to_rule_name, filter_group, source_group): """Creates the list of filters and sources to be added in the filter file. Args: parent_filter_name: The name of the filter under which the sources are found. sources: The hierarchy of filters and sources to process. extension_to_rule_name: A dictionary mapping file extensions to rules. filter_group: The list to which filter entries will be appended. source_group: The list to which source entries will be appeneded. """ for source in sources: if isinstance(source, MSVSProject.Filter): # We have a sub-filter. Create the name of that sub-filter. if not parent_filter_name: filter_name = source.name else: filter_name = '%s\\%s' % (parent_filter_name, source.name) # Add the filter to the group. filter_group.append( ['Filter', {'Include': filter_name}, ['UniqueIdentifier', MSVSNew.MakeGuid(source.name)]]) # Recurse and add its dependents. _AppendFiltersForMSBuild(filter_name, source.contents, extension_to_rule_name, filter_group, source_group) else: # It's a source. Create a source entry. _, element = _MapFileToMsBuildSourceType(source, extension_to_rule_name) source_entry = [element, {'Include': source}] # Specify the filter it is part of, if any. if parent_filter_name: source_entry.append(['Filter', parent_filter_name]) source_group.append(source_entry) def _MapFileToMsBuildSourceType(source, extension_to_rule_name): """Returns the group and element type of the source file. Arguments: source: The source file name. extension_to_rule_name: A dictionary mapping file extensions to rules. Returns: A pair of (group this file should be part of, the label of element) """ _, ext = os.path.splitext(source) if ext in extension_to_rule_name: group = 'rule' element = extension_to_rule_name[ext] elif ext in ['.cc', '.cpp', '.c', '.cxx']: group = 'compile' element = 'ClCompile' elif ext in ['.h', '.hxx']: group = 'include' element = 'ClInclude' elif ext == '.rc': group = 'resource' element = 'ResourceCompile' elif ext == '.idl': group = 'midl' element = 'Midl' else: group = 'none' element = 'None' return (group, element) def _GenerateRulesForMSBuild(output_dir, options, spec, sources, excluded_sources, props_files_of_rules, targets_files_of_rules, actions_to_add, extension_to_rule_name): # MSBuild rules are implemented using three files: an XML file, a .targets # file and a .props file. # See http://blogs.msdn.com/b/vcblog/archive/2010/04/21/quick-help-on-vs2010-custom-build-rule.aspx # for more details. rules = spec.get('rules', []) rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))] rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))] msbuild_rules = [] for rule in rules_native: # Skip a rule with no action and no inputs. if 'action' not in rule and not rule.get('rule_sources', []): continue msbuild_rule = MSBuildRule(rule, spec) msbuild_rules.append(msbuild_rule) extension_to_rule_name[msbuild_rule.extension] = msbuild_rule.rule_name if msbuild_rules: base = spec['target_name'] + options.suffix props_name = base + '.props' targets_name = base + '.targets' xml_name = base + '.xml' props_files_of_rules.add(props_name) targets_files_of_rules.add(targets_name) props_path = os.path.join(output_dir, props_name) targets_path = os.path.join(output_dir, targets_name) xml_path = os.path.join(output_dir, xml_name) _GenerateMSBuildRulePropsFile(props_path, msbuild_rules) _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules) _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules) if rules_external: _GenerateExternalRules(rules_external, output_dir, spec, sources, options, actions_to_add) _AdjustSourcesForRules(rules, sources, excluded_sources) class MSBuildRule(object): """Used to store information used to generate an MSBuild rule. Attributes: rule_name: The rule name, sanitized to use in XML. target_name: The name of the target. after_targets: The name of the AfterTargets element. before_targets: The name of the BeforeTargets element. depends_on: The name of the DependsOn element. compute_output: The name of the ComputeOutput element. dirs_to_make: The name of the DirsToMake element. inputs: The name of the _inputs element. tlog: The name of the _tlog element. extension: The extension this rule applies to. description: The message displayed when this rule is invoked. additional_dependencies: A string listing additional dependencies. outputs: The outputs of this rule. command: The command used to run the rule. """ def __init__(self, rule, spec): self.display_name = rule['rule_name'] # Assure that the rule name is only characters and numbers self.rule_name = re.sub(r'\W', '_', self.display_name) # Create the various element names, following the example set by the # Visual Studio 2008 to 2010 conversion. I don't know if VS2010 # is sensitive to the exact names. self.target_name = '_' + self.rule_name self.after_targets = self.rule_name + 'AfterTargets' self.before_targets = self.rule_name + 'BeforeTargets' self.depends_on = self.rule_name + 'DependsOn' self.compute_output = 'Compute%sOutput' % self.rule_name self.dirs_to_make = self.rule_name + 'DirsToMake' self.inputs = self.rule_name + '_inputs' self.tlog = self.rule_name + '_tlog' self.extension = rule['extension'] if not self.extension.startswith('.'): self.extension = '.' + self.extension self.description = MSVSSettings.ConvertVCMacrosToMSBuild( rule.get('message', self.rule_name)) old_additional_dependencies = _FixPaths(rule.get('inputs', [])) self.additional_dependencies = ( ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in old_additional_dependencies])) old_outputs = _FixPaths(rule.get('outputs', [])) self.outputs = ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in old_outputs]) old_command = _BuildCommandLineForRule(spec, rule, has_input_path=True, do_setup_env=True) self.command = MSVSSettings.ConvertVCMacrosToMSBuild(old_command) def _GenerateMSBuildRulePropsFile(props_path, msbuild_rules): """Generate the .props file.""" content = ['Project', {'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'}] for rule in msbuild_rules: content.extend([ ['PropertyGroup', {'Condition': "'$(%s)' == '' and '$(%s)' == '' and " "'$(ConfigurationType)' != 'Makefile'" % (rule.before_targets, rule.after_targets) }, [rule.before_targets, 'Midl'], [rule.after_targets, 'CustomBuild'], ], ['PropertyGroup', [rule.depends_on, {'Condition': "'$(ConfigurationType)' != 'Makefile'"}, '_SelectedFiles;$(%s)' % rule.depends_on ], ], ['ItemDefinitionGroup', [rule.rule_name, ['CommandLineTemplate', rule.command], ['Outputs', rule.outputs], ['ExecutionDescription', rule.description], ['AdditionalDependencies', rule.additional_dependencies], ], ] ]) easy_xml.WriteXmlIfChanged(content, props_path, pretty=True, win32=True) def _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules): """Generate the .targets file.""" content = ['Project', {'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003' } ] item_group = [ 'ItemGroup', ['PropertyPageSchema', {'Include': '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'} ] ] for rule in msbuild_rules: item_group.append( ['AvailableItemName', {'Include': rule.rule_name}, ['Targets', rule.target_name], ]) content.append(item_group) for rule in msbuild_rules: content.append( ['UsingTask', {'TaskName': rule.rule_name, 'TaskFactory': 'XamlTaskFactory', 'AssemblyName': 'Microsoft.Build.Tasks.v4.0' }, ['Task', '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'], ]) for rule in msbuild_rules: rule_name = rule.rule_name target_outputs = '%%(%s.Outputs)' % rule_name target_inputs = ('%%(%s.Identity);%%(%s.AdditionalDependencies);' '$(MSBuildProjectFile)') % (rule_name, rule_name) rule_inputs = '%%(%s.Identity)' % rule_name extension_condition = ("'%(Extension)'=='.obj' or " "'%(Extension)'=='.res' or " "'%(Extension)'=='.rsc' or " "'%(Extension)'=='.lib'") remove_section = [ 'ItemGroup', {'Condition': "'@(SelectedFiles)' != ''"}, [rule_name, {'Remove': '@(%s)' % rule_name, 'Condition': "'%(Identity)' != '@(SelectedFiles)'" } ] ] inputs_section = [ 'ItemGroup', [rule.inputs, {'Include': '%%(%s.AdditionalDependencies)' % rule_name}] ] logging_section = [ 'ItemGroup', [rule.tlog, {'Include': '%%(%s.Outputs)' % rule_name, 'Condition': ("'%%(%s.Outputs)' != '' and " "'%%(%s.ExcludedFromBuild)' != 'true'" % (rule_name, rule_name)) }, ['Source', "@(%s, '|')" % rule_name], ['Inputs', "@(%s -> '%%(Fullpath)', ';')" % rule.inputs], ], ] message_section = [ 'Message', {'Importance': 'High', 'Text': '%%(%s.ExecutionDescription)' % rule_name } ] write_tlog_section = [ 'WriteLinesToFile', {'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != " "'true'" % (rule.tlog, rule.tlog), 'File': '$(IntDir)$(ProjectName).write.1.tlog', 'Lines': "^%%(%s.Source);@(%s->'%%(Fullpath)')" % (rule.tlog, rule.tlog) } ] read_tlog_section = [ 'WriteLinesToFile', {'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != " "'true'" % (rule.tlog, rule.tlog), 'File': '$(IntDir)$(ProjectName).read.1.tlog', 'Lines': "^%%(%s.Source);%%(%s.Inputs)" % (rule.tlog, rule.tlog) } ] command_and_input_section = [ rule_name, {'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != " "'true'" % (rule_name, rule_name), 'CommandLineTemplate': '%%(%s.CommandLineTemplate)' % rule_name, 'AdditionalOptions': '%%(%s.AdditionalOptions)' % rule_name, 'Inputs': rule_inputs } ] content.extend([ ['Target', {'Name': rule.target_name, 'BeforeTargets': '$(%s)' % rule.before_targets, 'AfterTargets': '$(%s)' % rule.after_targets, 'Condition': "'@(%s)' != ''" % rule_name, 'DependsOnTargets': '$(%s);%s' % (rule.depends_on, rule.compute_output), 'Outputs': target_outputs, 'Inputs': target_inputs }, remove_section, inputs_section, logging_section, message_section, write_tlog_section, read_tlog_section, command_and_input_section, ], ['PropertyGroup', ['ComputeLinkInputsTargets', '$(ComputeLinkInputsTargets);', '%s;' % rule.compute_output ], ['ComputeLibInputsTargets', '$(ComputeLibInputsTargets);', '%s;' % rule.compute_output ], ], ['Target', {'Name': rule.compute_output, 'Condition': "'@(%s)' != ''" % rule_name }, ['ItemGroup', [rule.dirs_to_make, {'Condition': "'@(%s)' != '' and " "'%%(%s.ExcludedFromBuild)' != 'true'" % (rule_name, rule_name), 'Include': '%%(%s.Outputs)' % rule_name } ], ['Link', {'Include': '%%(%s.Identity)' % rule.dirs_to_make, 'Condition': extension_condition } ], ['Lib', {'Include': '%%(%s.Identity)' % rule.dirs_to_make, 'Condition': extension_condition } ], ['ImpLib', {'Include': '%%(%s.Identity)' % rule.dirs_to_make, 'Condition': extension_condition } ], ], ['MakeDir', {'Directories': ("@(%s->'%%(RootDir)%%(Directory)')" % rule.dirs_to_make) } ] ], ]) easy_xml.WriteXmlIfChanged(content, targets_path, pretty=True, win32=True) def _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules): # Generate the .xml file content = [ 'ProjectSchemaDefinitions', {'xmlns': ('clr-namespace:Microsoft.Build.Framework.XamlTypes;' 'assembly=Microsoft.Build.Framework'), 'xmlns:x': 'http://schemas.microsoft.com/winfx/2006/xaml', 'xmlns:sys': 'clr-namespace:System;assembly=mscorlib', 'xmlns:transformCallback': 'Microsoft.Cpp.Dev10.ConvertPropertyCallback' } ] for rule in msbuild_rules: content.extend([ ['Rule', {'Name': rule.rule_name, 'PageTemplate': 'tool', 'DisplayName': rule.display_name, 'Order': '200' }, ['Rule.DataSource', ['DataSource', {'Persistence': 'ProjectFile', 'ItemType': rule.rule_name } ] ], ['Rule.Categories', ['Category', {'Name': 'General'}, ['Category.DisplayName', ['sys:String', 'General'], ], ], ['Category', {'Name': 'Command Line', 'Subtype': 'CommandLine' }, ['Category.DisplayName', ['sys:String', 'Command Line'], ], ], ], ['StringListProperty', {'Name': 'Inputs', 'Category': 'Command Line', 'IsRequired': 'true', 'Switch': ' ' }, ['StringListProperty.DataSource', ['DataSource', {'Persistence': 'ProjectFile', 'ItemType': rule.rule_name, 'SourceType': 'Item' } ] ], ], ['StringProperty', {'Name': 'CommandLineTemplate', 'DisplayName': 'Command Line', 'Visible': 'False', 'IncludeInCommandLine': 'False' } ], ['DynamicEnumProperty', {'Name': rule.before_targets, 'Category': 'General', 'EnumProvider': 'Targets', 'IncludeInCommandLine': 'False' }, ['DynamicEnumProperty.DisplayName', ['sys:String', 'Execute Before'], ], ['DynamicEnumProperty.Description', ['sys:String', 'Specifies the targets for the build customization' ' to run before.' ], ], ['DynamicEnumProperty.ProviderSettings', ['NameValuePair', {'Name': 'Exclude', 'Value': '^%s|^Compute' % rule.before_targets } ] ], ['DynamicEnumProperty.DataSource', ['DataSource', {'Persistence': 'ProjectFile', 'HasConfigurationCondition': 'true' } ] ], ], ['DynamicEnumProperty', {'Name': rule.after_targets, 'Category': 'General', 'EnumProvider': 'Targets', 'IncludeInCommandLine': 'False' }, ['DynamicEnumProperty.DisplayName', ['sys:String', 'Execute After'], ], ['DynamicEnumProperty.Description', ['sys:String', ('Specifies the targets for the build customization' ' to run after.') ], ], ['DynamicEnumProperty.ProviderSettings', ['NameValuePair', {'Name': 'Exclude', 'Value': '^%s|^Compute' % rule.after_targets } ] ], ['DynamicEnumProperty.DataSource', ['DataSource', {'Persistence': 'ProjectFile', 'ItemType': '', 'HasConfigurationCondition': 'true' } ] ], ], ['StringListProperty', {'Name': 'Outputs', 'DisplayName': 'Outputs', 'Visible': 'False', 'IncludeInCommandLine': 'False' } ], ['StringProperty', {'Name': 'ExecutionDescription', 'DisplayName': 'Execution Description', 'Visible': 'False', 'IncludeInCommandLine': 'False' } ], ['StringListProperty', {'Name': 'AdditionalDependencies', 'DisplayName': 'Additional Dependencies', 'IncludeInCommandLine': 'False', 'Visible': 'false' } ], ['StringProperty', {'Subtype': 'AdditionalOptions', 'Name': 'AdditionalOptions', 'Category': 'Command Line' }, ['StringProperty.DisplayName', ['sys:String', 'Additional Options'], ], ['StringProperty.Description', ['sys:String', 'Additional Options'], ], ], ], ['ItemType', {'Name': rule.rule_name, 'DisplayName': rule.display_name } ], ['FileExtension', {'Name': '*' + rule.extension, 'ContentType': rule.rule_name } ], ['ContentType', {'Name': rule.rule_name, 'DisplayName': '', 'ItemType': rule.rule_name } ] ]) easy_xml.WriteXmlIfChanged(content, xml_path, pretty=True, win32=True) def _GetConfigurationAndPlatform(name, settings): configuration = name.rsplit('_', 1)[0] platform = settings.get('msvs_configuration_platform', 'Win32') return (configuration, platform) def _GetConfigurationCondition(name, settings): return (r"'$(Configuration)|$(Platform)'=='%s|%s'" % _GetConfigurationAndPlatform(name, settings)) def _GetMSBuildProjectConfigurations(configurations): group = ['ItemGroup', {'Label': 'ProjectConfigurations'}] for (name, settings) in sorted(configurations.iteritems()): configuration, platform = _GetConfigurationAndPlatform(name, settings) designation = '%s|%s' % (configuration, platform) group.append( ['ProjectConfiguration', {'Include': designation}, ['Configuration', configuration], ['Platform', platform]]) return [group] def _GetMSBuildGlobalProperties(spec, guid, gyp_file_name): namespace = os.path.splitext(gyp_file_name)[0] return [ ['PropertyGroup', {'Label': 'Globals'}, ['ProjectGuid', guid], ['Keyword', 'Win32Proj'], ['RootNamespace', namespace], ] ] def _GetMSBuildConfigurationDetails(spec, build_file): properties = {} for name, settings in spec['configurations'].iteritems(): msbuild_attributes = _GetMSBuildAttributes(spec, settings, build_file) condition = _GetConfigurationCondition(name, settings) character_set = msbuild_attributes.get('CharacterSet') _AddConditionalProperty(properties, condition, 'ConfigurationType', msbuild_attributes['ConfigurationType']) if character_set: _AddConditionalProperty(properties, condition, 'CharacterSet', character_set) return _GetMSBuildPropertyGroup(spec, 'Configuration', properties) def _GetMSBuildLocalProperties(msbuild_toolset): # Currently the only local property we support is PlatformToolset properties = {} if msbuild_toolset: properties = [ ['PropertyGroup', {'Label': 'Locals'}, ['PlatformToolset', msbuild_toolset], ] ] return properties def _GetMSBuildPropertySheets(configurations): user_props = r'$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props' additional_props = {} props_specified = False for name, settings in sorted(configurations.iteritems()): configuration = _GetConfigurationCondition(name, settings) if settings.has_key('msbuild_props'): additional_props[configuration] = _FixPaths(settings['msbuild_props']) props_specified = True else: additional_props[configuration] = '' if not props_specified: return [ ['ImportGroup', {'Label': 'PropertySheets'}, ['Import', {'Project': user_props, 'Condition': "exists('%s')" % user_props, 'Label': 'LocalAppDataPlatform' } ] ] ] else: sheets = [] for condition, props in additional_props.iteritems(): import_group = [ 'ImportGroup', {'Label': 'PropertySheets', 'Condition': condition }, ['Import', {'Project': user_props, 'Condition': "exists('%s')" % user_props, 'Label': 'LocalAppDataPlatform' } ] ] for props_file in props: import_group.append(['Import', {'Project':props_file}]) sheets.append(import_group) return sheets def _ConvertMSVSBuildAttributes(spec, config, build_file): config_type = _GetMSVSConfigurationType(spec, build_file) msvs_attributes = _GetMSVSAttributes(spec, config, config_type) msbuild_attributes = {} for a in msvs_attributes: if a in ['IntermediateDirectory', 'OutputDirectory']: directory = MSVSSettings.ConvertVCMacrosToMSBuild(msvs_attributes[a]) if not directory.endswith('\\'): directory += '\\' msbuild_attributes[a] = directory elif a == 'CharacterSet': msbuild_attributes[a] = _ConvertMSVSCharacterSet(msvs_attributes[a]) elif a == 'ConfigurationType': msbuild_attributes[a] = _ConvertMSVSConfigurationType(msvs_attributes[a]) else: print 'Warning: Do not know how to convert MSVS attribute ' + a return msbuild_attributes def _ConvertMSVSCharacterSet(char_set): if char_set.isdigit(): char_set = { '0': 'MultiByte', '1': 'Unicode', '2': 'MultiByte', }[char_set] return char_set def _ConvertMSVSConfigurationType(config_type): if config_type.isdigit(): config_type = { '1': 'Application', '2': 'DynamicLibrary', '4': 'StaticLibrary', '10': 'Utility' }[config_type] return config_type def _GetMSBuildAttributes(spec, config, build_file): if 'msbuild_configuration_attributes' not in config: msbuild_attributes = _ConvertMSVSBuildAttributes(spec, config, build_file) else: config_type = _GetMSVSConfigurationType(spec, build_file) config_type = _ConvertMSVSConfigurationType(config_type) msbuild_attributes = config.get('msbuild_configuration_attributes', {}) msbuild_attributes.setdefault('ConfigurationType', config_type) output_dir = msbuild_attributes.get('OutputDirectory', '$(SolutionDir)$(Configuration)') msbuild_attributes['OutputDirectory'] = _FixPath(output_dir) + '\\' if 'IntermediateDirectory' not in msbuild_attributes: intermediate = _FixPath('$(Configuration)') + '\\' msbuild_attributes['IntermediateDirectory'] = intermediate if 'CharacterSet' in msbuild_attributes: msbuild_attributes['CharacterSet'] = _ConvertMSVSCharacterSet( msbuild_attributes['CharacterSet']) if 'TargetName' not in msbuild_attributes: prefix = spec.get('product_prefix', '') product_name = spec.get('product_name', '$(ProjectName)') target_name = prefix + product_name msbuild_attributes['TargetName'] = target_name # Make sure that 'TargetPath' matches 'Lib.OutputFile' or 'Link.OutputFile' # (depending on the tool used) to avoid MSB8012 warning. msbuild_tool_map = { 'executable': 'Link', 'shared_library': 'Link', 'loadable_module': 'Link', 'static_library': 'Lib', } msbuild_tool = msbuild_tool_map.get(spec['type']) if msbuild_tool: msbuild_settings = config['finalized_msbuild_settings'] out_file = msbuild_settings[msbuild_tool].get('OutputFile') if out_file: msbuild_attributes['TargetPath'] = _FixPath(out_file) return msbuild_attributes def _GetMSBuildConfigurationGlobalProperties(spec, configurations, build_file): # TODO(jeanluc) We could optimize out the following and do it only if # there are actions. # TODO(jeanluc) Handle the equivalent of setting 'CYGWIN=nontsec'. new_paths = [] cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])[0] if cygwin_dirs: cyg_path = '$(MSBuildProjectDirectory)\\%s\\bin\\' % _FixPath(cygwin_dirs) new_paths.append(cyg_path) # TODO(jeanluc) Change the convention to have both a cygwin_dir and a # python_dir. python_path = cyg_path.replace('cygwin\\bin', 'python_26') new_paths.append(python_path) if new_paths: new_paths = '$(ExecutablePath);' + ';'.join(new_paths) properties = {} for (name, configuration) in sorted(configurations.iteritems()): condition = _GetConfigurationCondition(name, configuration) attributes = _GetMSBuildAttributes(spec, configuration, build_file) msbuild_settings = configuration['finalized_msbuild_settings'] _AddConditionalProperty(properties, condition, 'IntDir', attributes['IntermediateDirectory']) _AddConditionalProperty(properties, condition, 'OutDir', attributes['OutputDirectory']) _AddConditionalProperty(properties, condition, 'TargetName', attributes['TargetName']) if attributes.get('TargetPath'): _AddConditionalProperty(properties, condition, 'TargetPath', attributes['TargetPath']) if new_paths: _AddConditionalProperty(properties, condition, 'ExecutablePath', new_paths) tool_settings = msbuild_settings.get('', {}) for name, value in sorted(tool_settings.iteritems()): formatted_value = _GetValueFormattedForMSBuild('', name, value) _AddConditionalProperty(properties, condition, name, formatted_value) return _GetMSBuildPropertyGroup(spec, None, properties) def _AddConditionalProperty(properties, condition, name, value): """Adds a property / conditional value pair to a dictionary. Arguments: properties: The dictionary to be modified. The key is the name of the property. The value is itself a dictionary; its key is the value and the value a list of condition for which this value is true. condition: The condition under which the named property has the value. name: The name of the property. value: The value of the property. """ if name not in properties: properties[name] = {} values = properties[name] if value not in values: values[value] = [] conditions = values[value] conditions.append(condition) # Regex for msvs variable references ( i.e. $(FOO) ). MSVS_VARIABLE_REFERENCE = re.compile('\$\(([a-zA-Z_][a-zA-Z0-9_]*)\)') def _GetMSBuildPropertyGroup(spec, label, properties): """Returns a PropertyGroup definition for the specified properties. Arguments: spec: The target project dict. label: An optional label for the PropertyGroup. properties: The dictionary to be converted. The key is the name of the property. The value is itself a dictionary; its key is the value and the value a list of condition for which this value is true. """ group = ['PropertyGroup'] if label: group.append({'Label': label}) num_configurations = len(spec['configurations']) def GetEdges(node): # Use a definition of edges such that user_of_variable -> used_varible. # This happens to be easier in this case, since a variable's # definition contains all variables it references in a single string. edges = set() for value in sorted(properties[node].keys()): # Add to edges all $(...) references to variables. # # Variable references that refer to names not in properties are excluded # These can exist for instance to refer built in definitions like # $(SolutionDir). # # Self references are ignored. Self reference is used in a few places to # append to the default value. I.e. PATH=$(PATH);other_path edges.update(set([v for v in MSVS_VARIABLE_REFERENCE.findall(value) if v in properties and v != node])) return edges properties_ordered = gyp.common.TopologicallySorted( properties.keys(), GetEdges) # Walk properties in the reverse of a topological sort on # user_of_variable -> used_variable as this ensures variables are # defined before they are used. # NOTE: reverse(topsort(DAG)) = topsort(reverse_edges(DAG)) for name in reversed(properties_ordered): values = properties[name] for value, conditions in sorted(values.iteritems()): if len(conditions) == num_configurations: # If the value is the same all configurations, # just add one unconditional entry. group.append([name, value]) else: for condition in conditions: group.append([name, {'Condition': condition}, value]) return [group] def _GetMSBuildToolSettingsSections(spec, configurations): groups = [] for (name, configuration) in sorted(configurations.iteritems()): msbuild_settings = configuration['finalized_msbuild_settings'] group = ['ItemDefinitionGroup', {'Condition': _GetConfigurationCondition(name, configuration)} ] for tool_name, tool_settings in sorted(msbuild_settings.iteritems()): # Skip the tool named '' which is a holder of global settings handled # by _GetMSBuildConfigurationGlobalProperties. if tool_name: if tool_settings: tool = [tool_name] for name, value in sorted(tool_settings.iteritems()): formatted_value = _GetValueFormattedForMSBuild(tool_name, name, value) tool.append([name, formatted_value]) group.append(tool) groups.append(group) return groups def _FinalizeMSBuildSettings(spec, configuration): if 'msbuild_settings' in configuration: converted = False msbuild_settings = configuration['msbuild_settings'] MSVSSettings.ValidateMSBuildSettings(msbuild_settings) else: converted = True msvs_settings = configuration.get('msvs_settings', {}) msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(msvs_settings) include_dirs, resource_include_dirs = _GetIncludeDirs(configuration) libraries = _GetLibraries(spec) out_file, _, msbuild_tool = _GetOutputFilePathAndTool(spec, msbuild=True) defines = _GetDefines(configuration) if converted: # Visual Studio 2010 has TR1 defines = [d for d in defines if d != '_HAS_TR1=0'] # Warn of ignored settings ignored_settings = ['msvs_prebuild', 'msvs_postbuild', 'msvs_tool_files'] for ignored_setting in ignored_settings: value = configuration.get(ignored_setting) if value: print ('Warning: The automatic conversion to MSBuild does not handle ' '%s. Ignoring setting of %s' % (ignored_setting, str(value))) defines = [_EscapeCppDefineForMSBuild(d) for d in defines] disabled_warnings = _GetDisabledWarnings(configuration) # TODO(jeanluc) Validate & warn that we don't translate # prebuild = configuration.get('msvs_prebuild') # postbuild = configuration.get('msvs_postbuild') def_file = _GetModuleDefinition(spec) precompiled_header = configuration.get('msvs_precompiled_header') # Add the information to the appropriate tool # TODO(jeanluc) We could optimize and generate these settings only if # the corresponding files are found, e.g. don't generate ResourceCompile # if you don't have any resources. _ToolAppend(msbuild_settings, 'ClCompile', 'AdditionalIncludeDirectories', include_dirs) _ToolAppend(msbuild_settings, 'ResourceCompile', 'AdditionalIncludeDirectories', resource_include_dirs) # Add in libraries. _ToolAppend(msbuild_settings, 'Link', 'AdditionalDependencies', libraries) if out_file: _ToolAppend(msbuild_settings, msbuild_tool, 'OutputFile', out_file, only_if_unset=True) # Add defines. _ToolAppend(msbuild_settings, 'ClCompile', 'PreprocessorDefinitions', defines) _ToolAppend(msbuild_settings, 'ResourceCompile', 'PreprocessorDefinitions', defines) # Add disabled warnings. _ToolAppend(msbuild_settings, 'ClCompile', 'DisableSpecificWarnings', disabled_warnings) # Turn on precompiled headers if appropriate. if precompiled_header: precompiled_header = os.path.split(precompiled_header)[1] _ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'Use') _ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeaderFile', precompiled_header) _ToolAppend(msbuild_settings, 'ClCompile', 'ForcedIncludeFiles', precompiled_header) # Loadable modules don't generate import libraries; # tell dependent projects to not expect one. if spec['type'] == 'loadable_module': _ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'true') # Set the module definition file if any. if def_file: _ToolAppend(msbuild_settings, 'Link', 'ModuleDefinitionFile', def_file) configuration['finalized_msbuild_settings'] = msbuild_settings def _GetValueFormattedForMSBuild(tool_name, name, value): if type(value) == list: # For some settings, VS2010 does not automatically extends the settings # TODO(jeanluc) Is this what we want? if name in ['AdditionalDependencies', 'AdditionalIncludeDirectories', 'AdditionalLibraryDirectories', 'AdditionalOptions', 'DelayLoadDLLs', 'DisableSpecificWarnings', 'PreprocessorDefinitions']: value.append('%%(%s)' % name) # For most tools, entries in a list should be separated with ';' but some # settings use a space. Check for those first. exceptions = { 'ClCompile': ['AdditionalOptions'], 'Link': ['AdditionalOptions'], 'Lib': ['AdditionalOptions']} if tool_name in exceptions and name in exceptions[tool_name]: char = ' ' else: char = ';' formatted_value = char.join( [MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in value]) else: formatted_value = MSVSSettings.ConvertVCMacrosToMSBuild(value) return formatted_value def _VerifySourcesExist(sources, root_dir): """Verifies that all source files exist on disk. Checks that all regular source files, i.e. not created at run time, exist on disk. Missing files cause needless recompilation but no otherwise visible errors. Arguments: sources: A recursive list of Filter/file names. root_dir: The root directory for the relative path names. Returns: A list of source files that cannot be found on disk. """ missing_sources = [] for source in sources: if isinstance(source, MSVSProject.Filter): missing_sources.extend(_VerifySourcesExist(source.contents, root_dir)) else: if '$' not in source: full_path = os.path.join(root_dir, source) if not os.path.exists(full_path): missing_sources.append(full_path) return missing_sources def _GetMSBuildSources(spec, sources, exclusions, extension_to_rule_name, actions_spec, sources_handled_by_action, list_excluded): groups = ['none', 'midl', 'include', 'compile', 'resource', 'rule'] grouped_sources = {} for g in groups: grouped_sources[g] = [] _AddSources2(spec, sources, exclusions, grouped_sources, extension_to_rule_name, sources_handled_by_action, list_excluded) sources = [] for g in groups: if grouped_sources[g]: sources.append(['ItemGroup'] + grouped_sources[g]) if actions_spec: sources.append(['ItemGroup'] + actions_spec) return sources def _AddSources2(spec, sources, exclusions, grouped_sources, extension_to_rule_name, sources_handled_by_action, list_excluded): extensions_excluded_from_precompile = [] for source in sources: if isinstance(source, MSVSProject.Filter): _AddSources2(spec, source.contents, exclusions, grouped_sources, extension_to_rule_name, sources_handled_by_action, list_excluded) else: if not source in sources_handled_by_action: detail = [] excluded_configurations = exclusions.get(source, []) if len(excluded_configurations) == len(spec['configurations']): detail.append(['ExcludedFromBuild', 'true']) else: for config_name, configuration in sorted(excluded_configurations): condition = _GetConfigurationCondition(config_name, configuration) detail.append(['ExcludedFromBuild', {'Condition': condition}, 'true']) # Add precompile if needed for config_name, configuration in spec['configurations'].iteritems(): precompiled_source = configuration.get('msvs_precompiled_source', '') if precompiled_source != '': precompiled_source = _FixPath(precompiled_source) if not extensions_excluded_from_precompile: # If the precompiled header is generated by a C source, we must # not try to use it for C++ sources, and vice versa. basename, extension = os.path.splitext(precompiled_source) if extension == '.c': extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx'] else: extensions_excluded_from_precompile = ['.c'] if precompiled_source == source: condition = _GetConfigurationCondition(config_name, configuration) detail.append(['PrecompiledHeader', {'Condition': condition}, 'Create' ]) else: # Turn off precompiled header usage for source files of a # different type than the file that generated the # precompiled header. for extension in extensions_excluded_from_precompile: if source.endswith(extension): detail.append(['PrecompiledHeader', '']) detail.append(['ForcedIncludeFiles', '']) group, element = _MapFileToMsBuildSourceType(source, extension_to_rule_name) grouped_sources[group].append([element, {'Include': source}] + detail) def _GetMSBuildProjectReferences(project): references = [] if project.dependencies: group = ['ItemGroup'] for dependency in project.dependencies: guid = dependency.guid project_dir = os.path.split(project.path)[0] relative_path = gyp.common.RelativePath(dependency.path, project_dir) project_ref = ['ProjectReference', {'Include': relative_path}, ['Project', guid], ['ReferenceOutputAssembly', 'false'] ] for config in dependency.spec.get('configurations', {}).itervalues(): # If it's disabled in any config, turn it off in the reference. if config.get('msvs_2010_disable_uldi_when_referenced', 0): project_ref.append(['UseLibraryDependencyInputs', 'false']) break group.append(project_ref) references.append(group) return references def _GenerateMSBuildProject(project, options, version, generator_flags): spec = project.spec configurations = spec['configurations'] project_dir, project_file_name = os.path.split(project.path) msbuildproj_dir = os.path.dirname(project.path) if msbuildproj_dir and not os.path.exists(msbuildproj_dir): os.makedirs(msbuildproj_dir) # Prepare list of sources and excluded sources. gyp_path = _NormalizedSource(project.build_file) relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir) gyp_file = os.path.split(project.build_file)[1] sources, excluded_sources = _PrepareListOfSources(spec, generator_flags, gyp_file) # Add rules. actions_to_add = {} props_files_of_rules = set() targets_files_of_rules = set() extension_to_rule_name = {} list_excluded = generator_flags.get('msvs_list_excluded_files', True) _GenerateRulesForMSBuild(project_dir, options, spec, sources, excluded_sources, props_files_of_rules, targets_files_of_rules, actions_to_add, extension_to_rule_name) sources, excluded_sources, excluded_idl = ( _AdjustSourcesAndConvertToFilterHierarchy(spec, options, project_dir, sources, excluded_sources, list_excluded)) _AddActions(actions_to_add, spec, project.build_file) _AddCopies(actions_to_add, spec) # NOTE: this stanza must appear after all actions have been decided. # Don't excluded sources with actions attached, or they won't run. excluded_sources = _FilterActionsFromExcluded( excluded_sources, actions_to_add) exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl) actions_spec, sources_handled_by_action = _GenerateActionsForMSBuild( spec, actions_to_add) _GenerateMSBuildFiltersFile(project.path + '.filters', sources, extension_to_rule_name) missing_sources = _VerifySourcesExist(sources, project_dir) for configuration in configurations.itervalues(): _FinalizeMSBuildSettings(spec, configuration) # Add attributes to root element import_default_section = [ ['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.Default.props'}]] import_cpp_props_section = [ ['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.props'}]] import_cpp_targets_section = [ ['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.targets'}]] macro_section = [['PropertyGroup', {'Label': 'UserMacros'}]] content = [ 'Project', {'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003', 'ToolsVersion': version.ProjectVersion(), 'DefaultTargets': 'Build' }] content += _GetMSBuildProjectConfigurations(configurations) content += _GetMSBuildGlobalProperties(spec, project.guid, project_file_name) content += import_default_section content += _GetMSBuildConfigurationDetails(spec, project.build_file) content += _GetMSBuildLocalProperties(project.msbuild_toolset) content += import_cpp_props_section content += _GetMSBuildExtensions(props_files_of_rules) content += _GetMSBuildPropertySheets(configurations) content += macro_section content += _GetMSBuildConfigurationGlobalProperties(spec, configurations, project.build_file) content += _GetMSBuildToolSettingsSections(spec, configurations) content += _GetMSBuildSources( spec, sources, exclusions, extension_to_rule_name, actions_spec, sources_handled_by_action, list_excluded) content += _GetMSBuildProjectReferences(project) content += import_cpp_targets_section content += _GetMSBuildExtensionTargets(targets_files_of_rules) # TODO(jeanluc) File a bug to get rid of runas. We had in MSVS: # has_run_as = _WriteMSVSUserFile(project.path, version, spec) easy_xml.WriteXmlIfChanged(content, project.path, pretty=True, win32=True) return missing_sources def _GetMSBuildExtensions(props_files_of_rules): extensions = ['ImportGroup', {'Label': 'ExtensionSettings'}] for props_file in props_files_of_rules: extensions.append(['Import', {'Project': props_file}]) return [extensions] def _GetMSBuildExtensionTargets(targets_files_of_rules): targets_node = ['ImportGroup', {'Label': 'ExtensionTargets'}] for targets_file in sorted(targets_files_of_rules): targets_node.append(['Import', {'Project': targets_file}]) return [targets_node] def _GenerateActionsForMSBuild(spec, actions_to_add): """Add actions accumulated into an actions_to_add, merging as needed. Arguments: spec: the target project dict actions_to_add: dictionary keyed on input name, which maps to a list of dicts describing the actions attached to that input file. Returns: A pair of (action specification, the sources handled by this action). """ sources_handled_by_action = set() actions_spec = [] for primary_input, actions in actions_to_add.iteritems(): inputs = set() outputs = set() descriptions = [] commands = [] for action in actions: inputs.update(set(action['inputs'])) outputs.update(set(action['outputs'])) descriptions.append(action['description']) cmd = action['command'] # For most actions, add 'call' so that actions that invoke batch files # return and continue executing. msbuild_use_call provides a way to # disable this but I have not seen any adverse effect from doing that # for everything. if action.get('msbuild_use_call', True): cmd = 'call ' + cmd commands.append(cmd) # Add the custom build action for one input file. description = ', and also '.join(descriptions) # We can't join the commands simply with && because the command line will # get too long. See also _AddActions: cygwin's setup_env mustn't be called # for every invocation or the command that sets the PATH will grow too # long. command = ( '\r\nif %errorlevel% neq 0 exit /b %errorlevel%\r\n'.join(commands)) _AddMSBuildAction(spec, primary_input, inputs, outputs, command, description, sources_handled_by_action, actions_spec) return actions_spec, sources_handled_by_action def _AddMSBuildAction(spec, primary_input, inputs, outputs, cmd, description, sources_handled_by_action, actions_spec): command = MSVSSettings.ConvertVCMacrosToMSBuild(cmd) primary_input = _FixPath(primary_input) inputs_array = _FixPaths(inputs) outputs_array = _FixPaths(outputs) additional_inputs = ';'.join([i for i in inputs_array if i != primary_input]) outputs = ';'.join(outputs_array) sources_handled_by_action.add(primary_input) action_spec = ['CustomBuild', {'Include': primary_input}] action_spec.extend( # TODO(jeanluc) 'Document' for all or just if as_sources? [['FileType', 'Document'], ['Command', command], ['Message', description], ['Outputs', outputs] ]) if additional_inputs: action_spec.append(['AdditionalInputs', additional_inputs]) actions_spec.append(action_spec)
mit
CyanogenMod/android_kernel_motorola_msm8974
scripts/tracing/draw_functrace.py
14676
3560
#!/usr/bin/python """ Copyright 2008 (c) Frederic Weisbecker <[email protected]> Licensed under the terms of the GNU GPL License version 2 This script parses a trace provided by the function tracer in kernel/trace/trace_functions.c The resulted trace is processed into a tree to produce a more human view of the call stack by drawing textual but hierarchical tree of calls. Only the functions's names and the the call time are provided. Usage: Be sure that you have CONFIG_FUNCTION_TRACER # mount -t debugfs nodev /sys/kernel/debug # echo function > /sys/kernel/debug/tracing/current_tracer $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func Wait some times but not too much, the script is a bit slow. Break the pipe (Ctrl + Z) $ scripts/draw_functrace.py < raw_trace_func > draw_functrace Then you have your drawn trace in draw_functrace """ import sys, re class CallTree: """ This class provides a tree representation of the functions call stack. If a function has no parent in the kernel (interrupt, syscall, kernel thread...) then it is attached to a virtual parent called ROOT. """ ROOT = None def __init__(self, func, time = None, parent = None): self._func = func self._time = time if parent is None: self._parent = CallTree.ROOT else: self._parent = parent self._children = [] def calls(self, func, calltime): """ If a function calls another one, call this method to insert it into the tree at the appropriate place. @return: A reference to the newly created child node. """ child = CallTree(func, calltime, self) self._children.append(child) return child def getParent(self, func): """ Retrieve the last parent of the current node that has the name given by func. If this function is not on a parent, then create it as new child of root @return: A reference to the parent. """ tree = self while tree != CallTree.ROOT and tree._func != func: tree = tree._parent if tree == CallTree.ROOT: child = CallTree.ROOT.calls(func, None) return child return tree def __repr__(self): return self.__toString("", True) def __toString(self, branch, lastChild): if self._time is not None: s = "%s----%s (%s)\n" % (branch, self._func, self._time) else: s = "%s----%s\n" % (branch, self._func) i = 0 if lastChild: branch = branch[:-1] + " " while i < len(self._children): if i != len(self._children) - 1: s += "%s" % self._children[i].__toString(branch +\ " |", False) else: s += "%s" % self._children[i].__toString(branch +\ " |", True) i += 1 return s class BrokenLineException(Exception): """If the last line is not complete because of the pipe breakage, we want to stop the processing and ignore this line. """ pass class CommentLineException(Exception): """ If the line is a comment (as in the beginning of the trace file), just ignore it. """ pass def parseLine(line): line = line.strip() if line.startswith("#"): raise CommentLineException m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line) if m is None: raise BrokenLineException return (m.group(1), m.group(2), m.group(3)) def main(): CallTree.ROOT = CallTree("Root (Nowhere)", None, None) tree = CallTree.ROOT for line in sys.stdin: try: calltime, callee, caller = parseLine(line) except BrokenLineException: break except CommentLineException: continue tree = tree.getParent(caller) tree = tree.calls(callee, calltime) print CallTree.ROOT if __name__ == "__main__": main()
gpl-2.0
christian-bromann/electron
script/create-dist.py
65
5723
#!/usr/bin/env python import os import re import shutil import subprocess import sys import stat from lib.config import LIBCHROMIUMCONTENT_COMMIT, BASE_URL, PLATFORM, \ get_target_arch, get_chromedriver_version from lib.util import scoped_cwd, rm_rf, get_atom_shell_version, make_zip, \ execute, atom_gyp ATOM_SHELL_VERSION = get_atom_shell_version() SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) DIST_DIR = os.path.join(SOURCE_ROOT, 'dist') OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'R') CHROMIUM_DIR = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor', 'download', 'libchromiumcontent', 'static_library') PROJECT_NAME = atom_gyp()['project_name%'] PRODUCT_NAME = atom_gyp()['product_name%'] TARGET_BINARIES = { 'darwin': [ ], 'win32': [ '{0}.exe'.format(PROJECT_NAME), # 'electron.exe' 'content_shell.pak', 'd3dcompiler_47.dll', 'icudtl.dat', 'libEGL.dll', 'libGLESv2.dll', 'msvcp120.dll', 'msvcr120.dll', 'node.dll', 'pdf.dll', 'content_resources_200_percent.pak', 'ui_resources_200_percent.pak', 'xinput1_3.dll', 'natives_blob.bin', 'snapshot_blob.bin', 'vccorlib120.dll', ], 'linux': [ PROJECT_NAME, # 'electron' 'content_shell.pak', 'icudtl.dat', 'libnode.so', 'natives_blob.bin', 'snapshot_blob.bin', ], } TARGET_DIRECTORIES = { 'darwin': [ '{0}.app'.format(PRODUCT_NAME), ], 'win32': [ 'resources', 'locales', ], 'linux': [ 'resources', 'locales', ], } SYSTEM_LIBRARIES = [ 'libgcrypt.so', 'libnotify.so', ] def main(): rm_rf(DIST_DIR) os.makedirs(DIST_DIR) target_arch = get_target_arch() force_build() create_symbols() copy_binaries() copy_chrome_binary('chromedriver') copy_chrome_binary('mksnapshot') copy_license() if PLATFORM == 'linux': strip_binaries() if target_arch != 'arm': copy_system_libraries() create_version() create_dist_zip() create_chrome_binary_zip('chromedriver', get_chromedriver_version()) create_chrome_binary_zip('mksnapshot', ATOM_SHELL_VERSION) create_symbols_zip() def force_build(): build = os.path.join(SOURCE_ROOT, 'script', 'build.py') execute([sys.executable, build, '-c', 'Release']) def copy_binaries(): for binary in TARGET_BINARIES[PLATFORM]: shutil.copy2(os.path.join(OUT_DIR, binary), DIST_DIR) for directory in TARGET_DIRECTORIES[PLATFORM]: shutil.copytree(os.path.join(OUT_DIR, directory), os.path.join(DIST_DIR, directory), symlinks=True) def copy_chrome_binary(binary): if PLATFORM == 'win32': binary += '.exe' src = os.path.join(CHROMIUM_DIR, binary) dest = os.path.join(DIST_DIR, binary) # Copy file and keep the executable bit. shutil.copyfile(src, dest) os.chmod(dest, os.stat(dest).st_mode | stat.S_IEXEC) def copy_license(): shutil.copy2(os.path.join(SOURCE_ROOT, 'LICENSE'), DIST_DIR) def strip_binaries(): if get_target_arch() == 'arm': strip = 'arm-linux-gnueabihf-strip' else: strip = 'strip' for binary in TARGET_BINARIES[PLATFORM]: if binary.endswith('.so') or '.' not in binary: execute([strip, os.path.join(DIST_DIR, binary)]) def copy_system_libraries(): executable_path = os.path.join(OUT_DIR, PROJECT_NAME) # our/R/electron ldd = execute(['ldd', executable_path]) lib_re = re.compile('\t(.*) => (.+) \(.*\)$') for line in ldd.splitlines(): m = lib_re.match(line) if not m: continue for i, library in enumerate(SYSTEM_LIBRARIES): real_library = m.group(1) if real_library.startswith(library): shutil.copyfile(m.group(2), os.path.join(DIST_DIR, real_library)) SYSTEM_LIBRARIES[i] = real_library def create_version(): version_path = os.path.join(SOURCE_ROOT, 'dist', 'version') with open(version_path, 'w') as version_file: version_file.write(ATOM_SHELL_VERSION) def create_symbols(): destination = os.path.join(DIST_DIR, '{0}.breakpad.syms'.format(PROJECT_NAME)) dump_symbols = os.path.join(SOURCE_ROOT, 'script', 'dump-symbols.py') execute([sys.executable, dump_symbols, destination]) def create_dist_zip(): dist_name = '{0}-{1}-{2}-{3}.zip'.format(PROJECT_NAME, ATOM_SHELL_VERSION, PLATFORM, get_target_arch()) zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name) with scoped_cwd(DIST_DIR): files = TARGET_BINARIES[PLATFORM] + ['LICENSE', 'version'] if PLATFORM == 'linux': files += [lib for lib in SYSTEM_LIBRARIES if os.path.exists(lib)] dirs = TARGET_DIRECTORIES[PLATFORM] make_zip(zip_file, files, dirs) def create_chrome_binary_zip(binary, version): dist_name = '{0}-{1}-{2}-{3}.zip'.format(binary, version, PLATFORM, get_target_arch()) zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name) with scoped_cwd(DIST_DIR): files = ['LICENSE'] if PLATFORM == 'win32': files += [binary + '.exe'] else: files += [binary] make_zip(zip_file, files, []) def create_symbols_zip(): dist_name = '{0}-{1}-{2}-{3}-symbols.zip'.format(PROJECT_NAME, ATOM_SHELL_VERSION, PLATFORM, get_target_arch()) zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name) with scoped_cwd(DIST_DIR): files = ['LICENSE', 'version'] dirs = ['{0}.breakpad.syms'.format(PROJECT_NAME)] make_zip(zip_file, files, dirs) if __name__ == '__main__': sys.exit(main())
mit
dulems/hue
desktop/core/ext-py/configobj/validate.py
42
46768
# validate.py # A Validator object # Copyright (C) 2005 Michael Foord, Mark Andrews, Nicola Larosa # E-mail: fuzzyman AT voidspace DOT org DOT uk # mark AT la-la DOT com # nico AT tekNico DOT net # This software is licensed under the terms of the BSD license. # http://www.voidspace.org.uk/python/license.shtml # Basically you're free to copy, modify, distribute and relicense it, # So long as you keep a copy of the license with it. # Scripts maintained at http://www.voidspace.org.uk/python/index.shtml # For information about bugfixes, updates and support, please join the # ConfigObj mailing list: # http://lists.sourceforge.net/lists/listinfo/configobj-develop # Comments, suggestions and bug reports welcome. """ The Validator object is used to check that supplied values conform to a specification. The value can be supplied as a string - e.g. from a config file. In this case the check will also *convert* the value to the required type. This allows you to add validation as a transparent layer to access data stored as strings. The validation checks that the data is correct *and* converts it to the expected type. Some standard checks are provided for basic data types. Additional checks are easy to write. They can be provided when the ``Validator`` is instantiated or added afterwards. The standard functions work with the following basic data types : * integers * floats * booleans * strings * ip_addr plus lists of these datatypes Adding additional checks is done through coding simple functions. The full set of standard checks are : * 'integer': matches integer values (including negative) Takes optional 'min' and 'max' arguments : :: integer() integer(3, 9) # any value from 3 to 9 integer(min=0) # any positive value integer(max=9) * 'float': matches float values Has the same parameters as the integer check. * 'boolean': matches boolean values - ``True`` or ``False`` Acceptable string values for True are : true, on, yes, 1 Acceptable string values for False are : false, off, no, 0 Any other value raises an error. * 'ip_addr': matches an Internet Protocol address, v.4, represented by a dotted-quad string, i.e. '1.2.3.4'. * 'string': matches any string. Takes optional keyword args 'min' and 'max' to specify min and max lengths of the string. * 'list': matches any list. Takes optional keyword args 'min', and 'max' to specify min and max sizes of the list. (Always returns a list.) * 'tuple': matches any tuple. Takes optional keyword args 'min', and 'max' to specify min and max sizes of the tuple. (Always returns a tuple.) * 'int_list': Matches a list of integers. Takes the same arguments as list. * 'float_list': Matches a list of floats. Takes the same arguments as list. * 'bool_list': Matches a list of boolean values. Takes the same arguments as list. * 'ip_addr_list': Matches a list of IP addresses. Takes the same arguments as list. * 'string_list': Matches a list of strings. Takes the same arguments as list. * 'mixed_list': Matches a list with different types in specific positions. List size must match the number of arguments. Each position can be one of : 'integer', 'float', 'ip_addr', 'string', 'boolean' So to specify a list with two strings followed by two integers, you write the check as : :: mixed_list('string', 'string', 'integer', 'integer') * 'pass': This check matches everything ! It never fails and the value is unchanged. It is also the default if no check is specified. * 'option': This check matches any from a list of options. You specify this check with : :: option('option 1', 'option 2', 'option 3') You can supply a default value (returned if no value is supplied) using the default keyword argument. You specify a list argument for default using a list constructor syntax in the check : :: checkname(arg1, arg2, default=list('val 1', 'val 2', 'val 3')) A badly formatted set of arguments will raise a ``VdtParamError``. """ __docformat__ = "restructuredtext en" __version__ = '1.0.0' __revision__ = '$Id: validate.py 123 2005-09-08 08:54:28Z fuzzyman $' __all__ = ( '__version__', 'dottedQuadToNum', 'numToDottedQuad', 'ValidateError', 'VdtUnknownCheckError', 'VdtParamError', 'VdtTypeError', 'VdtValueError', 'VdtValueTooSmallError', 'VdtValueTooBigError', 'VdtValueTooShortError', 'VdtValueTooLongError', 'VdtMissingValue', 'Validator', 'is_integer', 'is_float', 'is_boolean', 'is_list', 'is_tuple', 'is_ip_addr', 'is_string', 'is_int_list', 'is_bool_list', 'is_float_list', 'is_string_list', 'is_ip_addr_list', 'is_mixed_list', 'is_option', '__docformat__', ) import re _list_arg = re.compile(r''' (?: ([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*list\( ( (?: \s* (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\s\)][^,\)]*?) # unquoted ) \s*,\s* )* (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\s\)][^,\)]*?) # unquoted )? # last one ) \) ) ''', re.VERBOSE | re.DOTALL) # two groups _list_members = re.compile(r''' ( (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\s=][^,=]*?) # unquoted ) (?: (?:\s*,\s*)|(?:\s*$) # comma ) ''', re.VERBOSE | re.DOTALL) # one group _paramstring = r''' (?: ( (?: [a-zA-Z_][a-zA-Z0-9_]*\s*=\s*list\( (?: \s* (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\s\)][^,\)]*?) # unquoted ) \s*,\s* )* (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\s\)][^,\)]*?) # unquoted )? # last one \) )| (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\s=][^,=]*?)| # unquoted (?: # keyword argument [a-zA-Z_][a-zA-Z0-9_]*\s*=\s* (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\s=][^,=]*?) # unquoted ) ) ) ) (?: (?:\s*,\s*)|(?:\s*$) # comma ) ) ''' _matchstring = '^%s*' % _paramstring # Python pre 2.2.1 doesn't have bool try: bool except NameError: def bool(val): """Simple boolean equivalent function. """ if val: return 1 else: return 0 def dottedQuadToNum(ip): """ Convert decimal dotted quad string to long integer >>> int(dottedQuadToNum('1 ')) 1 >>> int(dottedQuadToNum(' 1.2')) 16777218 >>> int(dottedQuadToNum(' 1.2.3 ')) 16908291 >>> int(dottedQuadToNum('1.2.3.4')) 16909060 >>> dottedQuadToNum('1.2.3. 4') 16909060 >>> dottedQuadToNum('255.255.255.255') 4294967295L >>> dottedQuadToNum('255.255.255.256') Traceback (most recent call last): ValueError: Not a good dotted-quad IP: 255.255.255.256 """ # import here to avoid it when ip_addr values are not used import socket, struct try: return struct.unpack('!L', socket.inet_aton(ip.strip()))[0] except socket.error: # bug in inet_aton, corrected in Python 2.3 if ip.strip() == '255.255.255.255': return 0xFFFFFFFFL else: raise ValueError('Not a good dotted-quad IP: %s' % ip) return def numToDottedQuad(num): """ Convert long int to dotted quad string >>> numToDottedQuad(-1L) Traceback (most recent call last): ValueError: Not a good numeric IP: -1 >>> numToDottedQuad(1L) '0.0.0.1' >>> numToDottedQuad(16777218L) '1.0.0.2' >>> numToDottedQuad(16908291L) '1.2.0.3' >>> numToDottedQuad(16909060L) '1.2.3.4' >>> numToDottedQuad(4294967295L) '255.255.255.255' >>> numToDottedQuad(4294967296L) Traceback (most recent call last): ValueError: Not a good numeric IP: 4294967296 """ # import here to avoid it when ip_addr values are not used import socket, struct # no need to intercept here, 4294967295L is fine if num > 4294967295L or num < 0: raise ValueError('Not a good numeric IP: %s' % num) try: return socket.inet_ntoa( struct.pack('!L', long(num))) except (socket.error, struct.error, OverflowError): raise ValueError('Not a good numeric IP: %s' % num) class ValidateError(Exception): """ This error indicates that the check failed. It can be the base class for more specific errors. Any check function that fails ought to raise this error. (or a subclass) >>> raise ValidateError Traceback (most recent call last): ValidateError """ class VdtMissingValue(ValidateError): """No value was supplied to a check that needed one.""" class VdtUnknownCheckError(ValidateError): """An unknown check function was requested""" def __init__(self, value): """ >>> raise VdtUnknownCheckError('yoda') Traceback (most recent call last): VdtUnknownCheckError: the check "yoda" is unknown. """ ValidateError.__init__(self, 'the check "%s" is unknown.' % (value,)) class VdtParamError(SyntaxError): """An incorrect parameter was passed""" def __init__(self, name, value): """ >>> raise VdtParamError('yoda', 'jedi') Traceback (most recent call last): VdtParamError: passed an incorrect value "jedi" for parameter "yoda". """ SyntaxError.__init__(self, 'passed an incorrect value "%s" for parameter "%s".' % (value, name)) class VdtTypeError(ValidateError): """The value supplied was of the wrong type""" def __init__(self, value): """ >>> raise VdtTypeError('jedi') Traceback (most recent call last): VdtTypeError: the value "jedi" is of the wrong type. """ ValidateError.__init__(self, 'the value "%s" is of the wrong type.' % (value,)) class VdtValueError(ValidateError): """The value supplied was of the correct type, but was not an allowed value.""" def __init__(self, value): """ >>> raise VdtValueError('jedi') Traceback (most recent call last): VdtValueError: the value "jedi" is unacceptable. """ ValidateError.__init__(self, 'the value "%s" is unacceptable.' % (value,)) class VdtValueTooSmallError(VdtValueError): """The value supplied was of the correct type, but was too small.""" def __init__(self, value): """ >>> raise VdtValueTooSmallError('0') Traceback (most recent call last): VdtValueTooSmallError: the value "0" is too small. """ ValidateError.__init__(self, 'the value "%s" is too small.' % (value,)) class VdtValueTooBigError(VdtValueError): """The value supplied was of the correct type, but was too big.""" def __init__(self, value): """ >>> raise VdtValueTooBigError('1') Traceback (most recent call last): VdtValueTooBigError: the value "1" is too big. """ ValidateError.__init__(self, 'the value "%s" is too big.' % (value,)) class VdtValueTooShortError(VdtValueError): """The value supplied was of the correct type, but was too short.""" def __init__(self, value): """ >>> raise VdtValueTooShortError('jed') Traceback (most recent call last): VdtValueTooShortError: the value "jed" is too short. """ ValidateError.__init__( self, 'the value "%s" is too short.' % (value,)) class VdtValueTooLongError(VdtValueError): """The value supplied was of the correct type, but was too long.""" def __init__(self, value): """ >>> raise VdtValueTooLongError('jedie') Traceback (most recent call last): VdtValueTooLongError: the value "jedie" is too long. """ ValidateError.__init__(self, 'the value "%s" is too long.' % (value,)) class Validator(object): """ Validator is an object that allows you to register a set of 'checks'. These checks take input and test that it conforms to the check. This can also involve converting the value from a string into the correct datatype. The ``check`` method takes an input string which configures which check is to be used and applies that check to a supplied value. An example input string would be: 'int_range(param1, param2)' You would then provide something like: >>> def int_range_check(value, min, max): ... # turn min and max from strings to integers ... min = int(min) ... max = int(max) ... # check that value is of the correct type. ... # possible valid inputs are integers or strings ... # that represent integers ... if not isinstance(value, (int, long, basestring)): ... raise VdtTypeError(value) ... elif isinstance(value, basestring): ... # if we are given a string ... # attempt to convert to an integer ... try: ... value = int(value) ... except ValueError: ... raise VdtValueError(value) ... # check the value is between our constraints ... if not min <= value: ... raise VdtValueTooSmallError(value) ... if not value <= max: ... raise VdtValueTooBigError(value) ... return value >>> fdict = {'int_range': int_range_check} >>> vtr1 = Validator(fdict) >>> vtr1.check('int_range(20, 40)', '30') 30 >>> vtr1.check('int_range(20, 40)', '60') Traceback (most recent call last): VdtValueTooBigError: the value "60" is too big. New functions can be added with : :: >>> vtr2 = Validator() >>> vtr2.functions['int_range'] = int_range_check Or by passing in a dictionary of functions when Validator is instantiated. Your functions *can* use keyword arguments, but the first argument should always be 'value'. If the function doesn't take additional arguments, the parentheses are optional in the check. It can be written with either of : :: keyword = function_name keyword = function_name() The first program to utilise Validator() was Michael Foord's ConfigObj, an alternative to ConfigParser which supports lists and can validate a config file using a config schema. For more details on using Validator with ConfigObj see: http://www.voidspace.org.uk/python/configobj.html """ # this regex does the initial parsing of the checks _func_re = re.compile(r'(.+?)\((.*)\)', re.DOTALL) # this regex takes apart keyword arguments _key_arg = re.compile(r'^([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*(.*)$', re.DOTALL) # this regex finds keyword=list(....) type values _list_arg = _list_arg # this regex takes individual values out of lists - in one pass _list_members = _list_members # These regexes check a set of arguments for validity # and then pull the members out _paramfinder = re.compile(_paramstring, re.VERBOSE | re.DOTALL) _matchfinder = re.compile(_matchstring, re.VERBOSE | re.DOTALL) def __init__(self, functions=None): """ >>> vtri = Validator() """ self.functions = { '': self._pass, 'integer': is_integer, 'float': is_float, 'boolean': is_boolean, 'ip_addr': is_ip_addr, 'string': is_string, 'list': is_list, 'tuple': is_tuple, 'int_list': is_int_list, 'float_list': is_float_list, 'bool_list': is_bool_list, 'ip_addr_list': is_ip_addr_list, 'string_list': is_string_list, 'mixed_list': is_mixed_list, 'pass': self._pass, 'option': is_option, 'force_list': force_list, } if functions is not None: self.functions.update(functions) # tekNico: for use by ConfigObj self.baseErrorClass = ValidateError self._cache = {} def check(self, check, value, missing=False): """ Usage: check(check, value) Arguments: check: string representing check to apply (including arguments) value: object to be checked Returns value, converted to correct type if necessary If the check fails, raises a ``ValidateError`` subclass. >>> vtor.check('yoda', '') Traceback (most recent call last): VdtUnknownCheckError: the check "yoda" is unknown. >>> vtor.check('yoda()', '') Traceback (most recent call last): VdtUnknownCheckError: the check "yoda" is unknown. >>> vtor.check('string(default="")', '', missing=True) '' """ fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check) if missing: if default is None: # no information needed here - to be handled by caller raise VdtMissingValue() value = self._handle_none(default) if value is None: return None return self._check_value(value, fun_name, fun_args, fun_kwargs) def _handle_none(self, value): if value == 'None': value = None elif value in ("'None'", '"None"'): # Special case a quoted None value = self._unquote(value) return value def _parse_with_caching(self, check): if check in self._cache: fun_name, fun_args, fun_kwargs, default = self._cache[check] # We call list and dict below to work with *copies* of the data # rather than the original (which are mutable of course) fun_args = list(fun_args) fun_kwargs = dict(fun_kwargs) else: fun_name, fun_args, fun_kwargs, default = self._parse_check(check) fun_kwargs = dict((str(key), value) for (key, value) in fun_kwargs.items()) self._cache[check] = fun_name, list(fun_args), dict(fun_kwargs), default return fun_name, fun_args, fun_kwargs, default def _check_value(self, value, fun_name, fun_args, fun_kwargs): try: fun = self.functions[fun_name] except KeyError: raise VdtUnknownCheckError(fun_name) else: return fun(value, *fun_args, **fun_kwargs) def _parse_check(self, check): fun_match = self._func_re.match(check) if fun_match: fun_name = fun_match.group(1) arg_string = fun_match.group(2) arg_match = self._matchfinder.match(arg_string) if arg_match is None: # Bad syntax raise VdtParamError('Bad syntax in check "%s".' % check) fun_args = [] fun_kwargs = {} # pull out args of group 2 for arg in self._paramfinder.findall(arg_string): # args may need whitespace removing (before removing quotes) arg = arg.strip() listmatch = self._list_arg.match(arg) if listmatch: key, val = self._list_handle(listmatch) fun_kwargs[key] = val continue keymatch = self._key_arg.match(arg) if keymatch: val = keymatch.group(2) if not val in ("'None'", '"None"'): # Special case a quoted None val = self._unquote(val) fun_kwargs[keymatch.group(1)] = val continue fun_args.append(self._unquote(arg)) else: # allows for function names without (args) return check, (), {}, None # Default must be deleted if the value is specified too, # otherwise the check function will get a spurious "default" keyword arg try: default = fun_kwargs.pop('default', None) except AttributeError: # Python 2.2 compatibility default = None try: default = fun_kwargs['default'] del fun_kwargs['default'] except KeyError: pass return fun_name, fun_args, fun_kwargs, default def _unquote(self, val): """Unquote a value if necessary.""" if (len(val) >= 2) and (val[0] in ("'", '"')) and (val[0] == val[-1]): val = val[1:-1] return val def _list_handle(self, listmatch): """Take apart a ``keyword=list('val, 'val')`` type string.""" out = [] name = listmatch.group(1) args = listmatch.group(2) for arg in self._list_members.findall(args): out.append(self._unquote(arg)) return name, out def _pass(self, value): """ Dummy check that always passes >>> vtor.check('', 0) 0 >>> vtor.check('', '0') '0' """ return value def get_default_value(self, check): """ Given a check, return the default value for the check (converted to the right type). If the check doesn't specify a default value then a ``KeyError`` will be raised. """ fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check) if default is None: raise KeyError('Check "%s" has no default value.' % check) value = self._handle_none(default) if value is None: return value return self._check_value(value, fun_name, fun_args, fun_kwargs) def _is_num_param(names, values, to_float=False): """ Return numbers from inputs or raise VdtParamError. Lets ``None`` pass through. Pass in keyword argument ``to_float=True`` to use float for the conversion rather than int. >>> _is_num_param(('', ''), (0, 1.0)) [0, 1] >>> _is_num_param(('', ''), (0, 1.0), to_float=True) [0.0, 1.0] >>> _is_num_param(('a'), ('a')) Traceback (most recent call last): VdtParamError: passed an incorrect value "a" for parameter "a". """ fun = to_float and float or int out_params = [] for (name, val) in zip(names, values): if val is None: out_params.append(val) elif isinstance(val, (int, long, float, basestring)): try: out_params.append(fun(val)) except ValueError, e: raise VdtParamError(name, val) else: raise VdtParamError(name, val) return out_params # built in checks # you can override these by setting the appropriate name # in Validator.functions # note: if the params are specified wrongly in your input string, # you will also raise errors. def is_integer(value, min=None, max=None): """ A check that tests that a given value is an integer (int, or long) and optionally, between bounds. A negative value is accepted, while a float will fail. If the value is a string, then the conversion is done - if possible. Otherwise a VdtError is raised. >>> vtor.check('integer', '-1') -1 >>> vtor.check('integer', '0') 0 >>> vtor.check('integer', 9) 9 >>> vtor.check('integer', 'a') Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. >>> vtor.check('integer', '2.2') Traceback (most recent call last): VdtTypeError: the value "2.2" is of the wrong type. >>> vtor.check('integer(10)', '20') 20 >>> vtor.check('integer(max=20)', '15') 15 >>> vtor.check('integer(10)', '9') Traceback (most recent call last): VdtValueTooSmallError: the value "9" is too small. >>> vtor.check('integer(10)', 9) Traceback (most recent call last): VdtValueTooSmallError: the value "9" is too small. >>> vtor.check('integer(max=20)', '35') Traceback (most recent call last): VdtValueTooBigError: the value "35" is too big. >>> vtor.check('integer(max=20)', 35) Traceback (most recent call last): VdtValueTooBigError: the value "35" is too big. >>> vtor.check('integer(0, 9)', False) 0 """ (min_val, max_val) = _is_num_param(('min', 'max'), (min, max)) if not isinstance(value, (int, long, basestring)): raise VdtTypeError(value) if isinstance(value, basestring): # if it's a string - does it represent an integer ? try: value = int(value) except ValueError: raise VdtTypeError(value) if (min_val is not None) and (value < min_val): raise VdtValueTooSmallError(value) if (max_val is not None) and (value > max_val): raise VdtValueTooBigError(value) return value def is_float(value, min=None, max=None): """ A check that tests that a given value is a float (an integer will be accepted), and optionally - that it is between bounds. If the value is a string, then the conversion is done - if possible. Otherwise a VdtError is raised. This can accept negative values. >>> vtor.check('float', '2') 2.0 From now on we multiply the value to avoid comparing decimals >>> vtor.check('float', '-6.8') * 10 -68.0 >>> vtor.check('float', '12.2') * 10 122.0 >>> vtor.check('float', 8.4) * 10 84.0 >>> vtor.check('float', 'a') Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. >>> vtor.check('float(10.1)', '10.2') * 10 102.0 >>> vtor.check('float(max=20.2)', '15.1') * 10 151.0 >>> vtor.check('float(10.0)', '9.0') Traceback (most recent call last): VdtValueTooSmallError: the value "9.0" is too small. >>> vtor.check('float(max=20.0)', '35.0') Traceback (most recent call last): VdtValueTooBigError: the value "35.0" is too big. """ (min_val, max_val) = _is_num_param( ('min', 'max'), (min, max), to_float=True) if not isinstance(value, (int, long, float, basestring)): raise VdtTypeError(value) if not isinstance(value, float): # if it's a string - does it represent a float ? try: value = float(value) except ValueError: raise VdtTypeError(value) if (min_val is not None) and (value < min_val): raise VdtValueTooSmallError(value) if (max_val is not None) and (value > max_val): raise VdtValueTooBigError(value) return value bool_dict = { True: True, 'on': True, '1': True, 'true': True, 'yes': True, False: False, 'off': False, '0': False, 'false': False, 'no': False, } def is_boolean(value): """ Check if the value represents a boolean. >>> vtor.check('boolean', 0) 0 >>> vtor.check('boolean', False) 0 >>> vtor.check('boolean', '0') 0 >>> vtor.check('boolean', 'off') 0 >>> vtor.check('boolean', 'false') 0 >>> vtor.check('boolean', 'no') 0 >>> vtor.check('boolean', 'nO') 0 >>> vtor.check('boolean', 'NO') 0 >>> vtor.check('boolean', 1) 1 >>> vtor.check('boolean', True) 1 >>> vtor.check('boolean', '1') 1 >>> vtor.check('boolean', 'on') 1 >>> vtor.check('boolean', 'true') 1 >>> vtor.check('boolean', 'yes') 1 >>> vtor.check('boolean', 'Yes') 1 >>> vtor.check('boolean', 'YES') 1 >>> vtor.check('boolean', '') Traceback (most recent call last): VdtTypeError: the value "" is of the wrong type. >>> vtor.check('boolean', 'up') Traceback (most recent call last): VdtTypeError: the value "up" is of the wrong type. """ if isinstance(value, basestring): try: return bool_dict[value.lower()] except KeyError: raise VdtTypeError(value) # we do an equality test rather than an identity test # this ensures Python 2.2 compatibilty # and allows 0 and 1 to represent True and False if value == False: return False elif value == True: return True else: raise VdtTypeError(value) def is_ip_addr(value): """ Check that the supplied value is an Internet Protocol address, v.4, represented by a dotted-quad string, i.e. '1.2.3.4'. >>> vtor.check('ip_addr', '1 ') '1' >>> vtor.check('ip_addr', ' 1.2') '1.2' >>> vtor.check('ip_addr', ' 1.2.3 ') '1.2.3' >>> vtor.check('ip_addr', '1.2.3.4') '1.2.3.4' >>> vtor.check('ip_addr', '0.0.0.0') '0.0.0.0' >>> vtor.check('ip_addr', '255.255.255.255') '255.255.255.255' >>> vtor.check('ip_addr', '255.255.255.256') Traceback (most recent call last): VdtValueError: the value "255.255.255.256" is unacceptable. >>> vtor.check('ip_addr', '1.2.3.4.5') Traceback (most recent call last): VdtValueError: the value "1.2.3.4.5" is unacceptable. >>> vtor.check('ip_addr', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. """ if not isinstance(value, basestring): raise VdtTypeError(value) value = value.strip() try: dottedQuadToNum(value) except ValueError: raise VdtValueError(value) return value def is_list(value, min=None, max=None): """ Check that the value is a list of values. You can optionally specify the minimum and maximum number of members. It does no check on list members. >>> vtor.check('list', ()) [] >>> vtor.check('list', []) [] >>> vtor.check('list', (1, 2)) [1, 2] >>> vtor.check('list', [1, 2]) [1, 2] >>> vtor.check('list(3)', (1, 2)) Traceback (most recent call last): VdtValueTooShortError: the value "(1, 2)" is too short. >>> vtor.check('list(max=5)', (1, 2, 3, 4, 5, 6)) Traceback (most recent call last): VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long. >>> vtor.check('list(min=3, max=5)', (1, 2, 3, 4)) [1, 2, 3, 4] >>> vtor.check('list', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. >>> vtor.check('list', '12') Traceback (most recent call last): VdtTypeError: the value "12" is of the wrong type. """ (min_len, max_len) = _is_num_param(('min', 'max'), (min, max)) if isinstance(value, basestring): raise VdtTypeError(value) try: num_members = len(value) except TypeError: raise VdtTypeError(value) if min_len is not None and num_members < min_len: raise VdtValueTooShortError(value) if max_len is not None and num_members > max_len: raise VdtValueTooLongError(value) return list(value) def is_tuple(value, min=None, max=None): """ Check that the value is a tuple of values. You can optionally specify the minimum and maximum number of members. It does no check on members. >>> vtor.check('tuple', ()) () >>> vtor.check('tuple', []) () >>> vtor.check('tuple', (1, 2)) (1, 2) >>> vtor.check('tuple', [1, 2]) (1, 2) >>> vtor.check('tuple(3)', (1, 2)) Traceback (most recent call last): VdtValueTooShortError: the value "(1, 2)" is too short. >>> vtor.check('tuple(max=5)', (1, 2, 3, 4, 5, 6)) Traceback (most recent call last): VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long. >>> vtor.check('tuple(min=3, max=5)', (1, 2, 3, 4)) (1, 2, 3, 4) >>> vtor.check('tuple', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. >>> vtor.check('tuple', '12') Traceback (most recent call last): VdtTypeError: the value "12" is of the wrong type. """ return tuple(is_list(value, min, max)) def is_string(value, min=None, max=None): """ Check that the supplied value is a string. You can optionally specify the minimum and maximum number of members. >>> vtor.check('string', '0') '0' >>> vtor.check('string', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. >>> vtor.check('string(2)', '12') '12' >>> vtor.check('string(2)', '1') Traceback (most recent call last): VdtValueTooShortError: the value "1" is too short. >>> vtor.check('string(min=2, max=3)', '123') '123' >>> vtor.check('string(min=2, max=3)', '1234') Traceback (most recent call last): VdtValueTooLongError: the value "1234" is too long. """ if not isinstance(value, basestring): raise VdtTypeError(value) (min_len, max_len) = _is_num_param(('min', 'max'), (min, max)) try: num_members = len(value) except TypeError: raise VdtTypeError(value) if min_len is not None and num_members < min_len: raise VdtValueTooShortError(value) if max_len is not None and num_members > max_len: raise VdtValueTooLongError(value) return value def is_int_list(value, min=None, max=None): """ Check that the value is a list of integers. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is an integer. >>> vtor.check('int_list', ()) [] >>> vtor.check('int_list', []) [] >>> vtor.check('int_list', (1, 2)) [1, 2] >>> vtor.check('int_list', [1, 2]) [1, 2] >>> vtor.check('int_list', [1, 'a']) Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. """ return [is_integer(mem) for mem in is_list(value, min, max)] def is_bool_list(value, min=None, max=None): """ Check that the value is a list of booleans. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a boolean. >>> vtor.check('bool_list', ()) [] >>> vtor.check('bool_list', []) [] >>> check_res = vtor.check('bool_list', (True, False)) >>> check_res == [True, False] 1 >>> check_res = vtor.check('bool_list', [True, False]) >>> check_res == [True, False] 1 >>> vtor.check('bool_list', [True, 'a']) Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. """ return [is_boolean(mem) for mem in is_list(value, min, max)] def is_float_list(value, min=None, max=None): """ Check that the value is a list of floats. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a float. >>> vtor.check('float_list', ()) [] >>> vtor.check('float_list', []) [] >>> vtor.check('float_list', (1, 2.0)) [1.0, 2.0] >>> vtor.check('float_list', [1, 2.0]) [1.0, 2.0] >>> vtor.check('float_list', [1, 'a']) Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. """ return [is_float(mem) for mem in is_list(value, min, max)] def is_string_list(value, min=None, max=None): """ Check that the value is a list of strings. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a string. >>> vtor.check('string_list', ()) [] >>> vtor.check('string_list', []) [] >>> vtor.check('string_list', ('a', 'b')) ['a', 'b'] >>> vtor.check('string_list', ['a', 1]) Traceback (most recent call last): VdtTypeError: the value "1" is of the wrong type. >>> vtor.check('string_list', 'hello') Traceback (most recent call last): VdtTypeError: the value "hello" is of the wrong type. """ if isinstance(value, basestring): raise VdtTypeError(value) return [is_string(mem) for mem in is_list(value, min, max)] def is_ip_addr_list(value, min=None, max=None): """ Check that the value is a list of IP addresses. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is an IP address. >>> vtor.check('ip_addr_list', ()) [] >>> vtor.check('ip_addr_list', []) [] >>> vtor.check('ip_addr_list', ('1.2.3.4', '5.6.7.8')) ['1.2.3.4', '5.6.7.8'] >>> vtor.check('ip_addr_list', ['a']) Traceback (most recent call last): VdtValueError: the value "a" is unacceptable. """ return [is_ip_addr(mem) for mem in is_list(value, min, max)] def force_list(value, min=None, max=None): """ Check that a value is a list, coercing strings into a list with one member. Useful where users forget the trailing comma that turns a single value into a list. You can optionally specify the minimum and maximum number of members. A minumum of greater than one will fail if the user only supplies a string. >>> vtor.check('force_list', ()) [] >>> vtor.check('force_list', []) [] >>> vtor.check('force_list', 'hello') ['hello'] """ if not isinstance(value, (list, tuple)): value = [value] return is_list(value, min, max) fun_dict = { 'integer': is_integer, 'float': is_float, 'ip_addr': is_ip_addr, 'string': is_string, 'boolean': is_boolean, } def is_mixed_list(value, *args): """ Check that the value is a list. Allow specifying the type of each member. Work on lists of specific lengths. You specify each member as a positional argument specifying type Each type should be one of the following strings : 'integer', 'float', 'ip_addr', 'string', 'boolean' So you can specify a list of two strings, followed by two integers as : mixed_list('string', 'string', 'integer', 'integer') The length of the list must match the number of positional arguments you supply. >>> mix_str = "mixed_list('integer', 'float', 'ip_addr', 'string', 'boolean')" >>> check_res = vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', True)) >>> check_res == [1, 2.0, '1.2.3.4', 'a', True] 1 >>> check_res = vtor.check(mix_str, ('1', '2.0', '1.2.3.4', 'a', 'True')) >>> check_res == [1, 2.0, '1.2.3.4', 'a', True] 1 >>> vtor.check(mix_str, ('b', 2.0, '1.2.3.4', 'a', True)) Traceback (most recent call last): VdtTypeError: the value "b" is of the wrong type. >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a')) Traceback (most recent call last): VdtValueTooShortError: the value "(1, 2.0, '1.2.3.4', 'a')" is too short. >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', 1, 'b')) Traceback (most recent call last): VdtValueTooLongError: the value "(1, 2.0, '1.2.3.4', 'a', 1, 'b')" is too long. >>> vtor.check(mix_str, 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. This test requires an elaborate setup, because of a change in error string output from the interpreter between Python 2.2 and 2.3 . >>> res_seq = ( ... 'passed an incorrect value "', ... 'yoda', ... '" for parameter "mixed_list".', ... ) >>> res_str = "'".join(res_seq) >>> try: ... vtor.check('mixed_list("yoda")', ('a')) ... except VdtParamError, err: ... str(err) == res_str 1 """ try: length = len(value) except TypeError: raise VdtTypeError(value) if length < len(args): raise VdtValueTooShortError(value) elif length > len(args): raise VdtValueTooLongError(value) try: return [fun_dict[arg](val) for arg, val in zip(args, value)] except KeyError, e: raise VdtParamError('mixed_list', e) def is_option(value, *options): """ This check matches the value to any of a set of options. >>> vtor.check('option("yoda", "jedi")', 'yoda') 'yoda' >>> vtor.check('option("yoda", "jedi")', 'jed') Traceback (most recent call last): VdtValueError: the value "jed" is unacceptable. >>> vtor.check('option("yoda", "jedi")', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. """ if not isinstance(value, basestring): raise VdtTypeError(value) if not value in options: raise VdtValueError(value) return value def _test(value, *args, **keywargs): """ A function that exists for test purposes. >>> checks = [ ... '3, 6, min=1, max=3, test=list(a, b, c)', ... '3', ... '3, 6', ... '3,', ... 'min=1, test="a b c"', ... 'min=5, test="a, b, c"', ... 'min=1, max=3, test="a, b, c"', ... 'min=-100, test=-99', ... 'min=1, max=3', ... '3, 6, test="36"', ... '3, 6, test="a, b, c"', ... '3, max=3, test=list("a", "b", "c")', ... '''3, max=3, test=list("'a'", 'b', "x=(c)")''', ... "test='x=fish(3)'", ... ] >>> v = Validator({'test': _test}) >>> for entry in checks: ... print v.check(('test(%s)' % entry), 3) (3, ('3', '6'), {'test': ['a', 'b', 'c'], 'max': '3', 'min': '1'}) (3, ('3',), {}) (3, ('3', '6'), {}) (3, ('3',), {}) (3, (), {'test': 'a b c', 'min': '1'}) (3, (), {'test': 'a, b, c', 'min': '5'}) (3, (), {'test': 'a, b, c', 'max': '3', 'min': '1'}) (3, (), {'test': '-99', 'min': '-100'}) (3, (), {'max': '3', 'min': '1'}) (3, ('3', '6'), {'test': '36'}) (3, ('3', '6'), {'test': 'a, b, c'}) (3, ('3',), {'test': ['a', 'b', 'c'], 'max': '3'}) (3, ('3',), {'test': ["'a'", 'b', 'x=(c)'], 'max': '3'}) (3, (), {'test': 'x=fish(3)'}) >>> v = Validator() >>> v.check('integer(default=6)', '3') 3 >>> v.check('integer(default=6)', None, True) 6 >>> v.get_default_value('integer(default=6)') 6 >>> v.get_default_value('float(default=6)') 6.0 >>> v.get_default_value('pass(default=None)') >>> v.get_default_value("string(default='None')") 'None' >>> v.get_default_value('pass') Traceback (most recent call last): KeyError: 'Check "pass" has no default value.' >>> v.get_default_value('pass(default=list(1, 2, 3, 4))') ['1', '2', '3', '4'] >>> v = Validator() >>> v.check("pass(default=None)", None, True) >>> v.check("pass(default='None')", None, True) 'None' >>> v.check('pass(default="None")', None, True) 'None' >>> v.check('pass(default=list(1, 2, 3, 4))', None, True) ['1', '2', '3', '4'] Bug test for unicode arguments >>> v = Validator() >>> v.check(u'string(min=4)', u'test') u'test' >>> v = Validator() >>> v.get_default_value(u'string(min=4, default="1234")') u'1234' >>> v.check(u'string(min=4, default="1234")', u'test') u'test' >>> v = Validator() >>> default = v.get_default_value('string(default=None)') >>> default == None 1 """ return (value, args, keywargs) def _test2(): """ >>> >>> v = Validator() >>> v.get_default_value('string(default="#ff00dd")') '#ff00dd' >>> v.get_default_value('integer(default=3) # comment') 3 """ def _test3(): r""" >>> vtor.check('string(default="")', '', missing=True) '' >>> vtor.check('string(default="\n")', '', missing=True) '\n' >>> print vtor.check('string(default="\n")', '', missing=True), <BLANKLINE> >>> vtor.check('string()', '\n') '\n' >>> vtor.check('string(default="\n\n\n")', '', missing=True) '\n\n\n' >>> vtor.check('string()', 'random \n text goes here\n\n') 'random \n text goes here\n\n' >>> vtor.check('string(default=" \nrandom text\ngoes \n here\n\n ")', ... '', missing=True) ' \nrandom text\ngoes \n here\n\n ' >>> vtor.check("string(default='\n\n\n')", '', missing=True) '\n\n\n' >>> vtor.check("option('\n','a','b',default='\n')", '', missing=True) '\n' >>> vtor.check("string_list()", ['foo', '\n', 'bar']) ['foo', '\n', 'bar'] >>> vtor.check("string_list(default=list('\n'))", '', missing=True) ['\n'] """ if __name__ == '__main__': # run the code tests in doctest format import sys import doctest m = sys.modules.get('__main__') globs = m.__dict__.copy() globs.update({ 'vtor': Validator(), }) doctest.testmod(m, globs=globs)
apache-2.0
wuzhy/autotest
client/tests/linus_stress/linus_stress.py
5
1435
import os from autotest_lib.client.bin import test, utils class linus_stress(test.test): version = 1 def setup(self): os.mkdir(self.srcdir) os.chdir(self.bindir) utils.system('cp linus_stress.c src/') os.chdir(self.srcdir) utils.system(utils.get_cc() + ' linus_stress.c -D_POSIX_C_SOURCE=200112 -o linus_stress') def initialize(self): self.job.require_gcc() def run_the_test(self, iterations): utils.write_one_line('/proc/sys/vm/dirty_ratio', '4') utils.write_one_line('/proc/sys/vm/dirty_background_ratio', '2') cmd = os.path.join(self.srcdir, 'linus_stress') args = "%d" % (utils.memtotal() / 32) profilers = self.job.profilers if profilers.present(): profilers.start(self) for i in range(iterations): utils.system(cmd + ' ' + args) if profilers.present(): profilers.stop(self) profilers.report(self) def execute(self, iterations = 1): dirty_ratio = utils.read_one_line('/proc/sys/vm/dirty_ratio') dirty_background_ratio = utils.read_one_line('/proc/sys/vm/dirty_background_ratio') try: self.run_the_test(iterations) finally: utils.write_one_line('/proc/sys/vm/dirty_ratio', dirty_ratio) utils.write_one_line('/proc/sys/vm/dirty_background_ratio', dirty_background_ratio)
gpl-2.0
garbear/EventGhost
plugins/RadioSure/__init__.py
1
248696
version="0.2.13" # plugins/RadioSure/__init__.py # # Copyright (C) 2009, 2010, 2011 Pako ([email protected]) # # This file is a plugin for EventGhost. # # EventGhost is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # EventGhost is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EventGhost; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # # Changelog (in reverse chronological order): # ------------------------------------------- # 0.2.13 by Pako 2011-04-06 11:07 UTC+1 # - Bug fix - bad name of language file # 0.2.12 by Pako 2011-04-05 17:38 UTC+1 # - Added first version of Favorites manager # - Added "menu browser" # - Added many new actions # 0.2.11 by Pako 2011-03-03 09:08 UTC+1 # - The cursor is changed to indicate the existence of a context menu # - If exists file "contextCursor.cur", used as the cursor where there is a contextual menu # 0.2.10 by Pako 2011-02-12 09:53 UTC+1 # - FixedTimeCtrl replaced by eg.TimeCtrl # 0.2.9 by Pako 2011-01-15 11:50 UTC+1 # - different shape of the cursor on the table of schedules indicate that there is context menu available # 0.2.8 by Pako 2011-01-11 14:25 UTC+1 # - if you turn on logging then into the log file is written whole command line # 0.2.7 by Pako 2011-01-07 18:39 UTC+1 # - fixed bug - the Scheduler window opens although in Scheduler.xml there not the attribute "Position" # (this can happen when you upgrade from version 0.2.0 and lower) # 0.2.6 by Pako 2011-01-07 11:39 UTC+1 # - fixed bug - incorrect reading favorites, when applied a new structure of RadioSure.xml file # 0.2.5 by Pako 2010-12-28 16:02 UTC+1 # - added popup menu and features "Move schedule up/down" # 0.2.4 by Pako 2010-12-24 12:08 UTC+1 # - there is no need to reinstall this plugin, when changing the way the installation (especially the paths) of Radio?Sure! # 0.2.3 by Pako 2010-12-24 08:30 UTC+1 # - scheduler dialog opens, even though there is no node "Favorites" in RadioSure.xml # 0.2.2 by Pako 2010-12-19 15:54 UTC+1 # - changed the way of paths settings to the RadioSure.exe and RadioSure.xml # 0.2.1 by Pako 2010-12-19 08:19 UTC+1 # - scheduler dialog remembers its position even after closing EventGhost # - bugfix - "Add schedule" enable buttons, when schedule list is empty # 0.2.0 by Pako 2010-12-14 11:13 UTC+1 # - a comprehensive rework according to the plugin SchedulGhost: # - addded new types of schedule # - changed format of "Scheduler.xml" file # - added ability to affect certain types of schedules according to public holidays # - added option to select the first day of the week (Sunday or Monday) # - scheduler dialog remembers its position # - scheduler dialog is not modal and can be minimized # - added Apply button (scheduler dialog) # - added new actions - "Run schedule immediately" # 0.1.9 by Pako 2010-12-09 13:52 UTC+1 # - correction of previous versions (moreover redefine one pseudo-private method) # 0.1.8 by Pako 2010-12-06 20:10 UTC+1 # - wx.lib.masked.TimeCtrl workaround (see http://trac.wxwidgets.org/ticket/11171) # 0.1.7 by Pako 2010-07-22 20:27 GMT+1 # - bugfix # 0.1.6 by Pako 2010-07-22 10:30 GMT+1 # - added wx.ComboBox for Scheduler actions # 0.1.5 by Pako 2010-07-10 08:21 GMT+1 # - added Scheduler # - added guid attribute # 0.1.4 by Pako 2010-03-23 11:20 GMT+1 # - added action Random favorite # 0.1.3 by Pako 2010-03-22 09:09 GMT+1 # - added actions Start and Stop observation of titlebar #=============================================================================== eg.RegisterPlugin( name = "RadioSure", author = "Pako", version = version, kind = "program", guid = "{84703620-87B4-4982-A9AB-DA1B3F8D22EA}", description = ur'''<rst> Adds actions to control the `Radio?Sure!`_ .. _Radio?Sure!: http://www.radiosure.com/ ''', createMacrosOnAdd = True, url = "http://www.eventghost.net/forum/viewtopic.php?f=9&t=2359", icon = ( "iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAADAFBMVEUA//+Gh4ju7/Ds" "7e7q6+zo6evm5+nk5efi4+Xg4ePd3+Db3N7Z2tzW2Nrr7e5+g4bo6erm5+js7e3a3N7Y" "2dt3fYDT1dfp6uzn6Orl5uhIS03V1tjS1NbQ0tTn6Onl5ufi5OXS09XP0dPNz9HR09XP" "0NPMztDKzM7h4+Tf4OLd3uCVmp1OUlQZGhoYGRlLTlCHjZDMzdDJy87Hycve4OHc3t+d" "oqQyNDU3OjtSVlgpKywqLC2IjpHHyMvExsnc3d/Z29xWWlyBh4p2fH9pbnFfZGYsLi9L" "T1HExsjCxMYbHB2XnJ5MUFJKTU9yeHtVWVvBw8a/wcTW19kcHR6UmZypra9RVVeGjI9l" "am0aGxu/wcO9v8JcYWNeY2W5vL6xtLamqqyboKK9vsG7vcA9QEG6vL+5u76LkJPIycyy" "tbddYmRYXV+jqKqDiYy3ubzFx8nDxcfBw8W4ur22uLsAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADQcfgAAAAAAAXQciD0AAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAABAAgAAAAAAAAAAAAAAAAAAAAAAAAAAABGa1gAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAEAAAAAAAAAAAAPAAAAAAEAAAEAAADQckT/C08AAAAAAAAAAAAAAAMAAADf" "BnAAAAAAAAAAAAAAAAAAAAQAAQEAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACnZAh6AAAA" "AXRSTlMAQObYZgAAAAlwSFlzAAALEgAACxIB0t1+/AAAAKdJREFUeNpjYGBgRAVoAl9A" "ArwQCbDMayYgg5OTk5GBg4PhPzs7OwNIAESDARsbGwNICxtQKQeQLwSkb4EE1JEMPQfS" "wgMGjNwgANZix8h4UwOqYgdIxdmznGKcIPAMaBVIReARW6DcNW2QimUgAWlGe7DynTY8" "jPOYwNYzs/+//vqB3ANmZrAWVUeg9EsGCcafHIyTQQKGjDZAAUYOJt4/rH0M6N4HAFCJ" "GrcTFgV2AAAAAElFTkSuQmCC" ), ) #=============================================================================== import wx.grid as gridlib import subprocess import wx.calendar as wxCal from wx.lib.masked import EVT_TIMEUPDATE from subprocess import Popen from os import listdir, remove, rename from os.path import abspath, join, dirname, split, isfile, exists from calendar import day_name, month_name, monthrange from wx.lib.mixins.listctrl import CheckListCtrlMixin from _winreg import OpenKey, HKEY_CURRENT_USER, EnumValue, QueryValueEx, CloseKey from time import sleep, mktime, strptime, localtime from datetime import datetime as dt from datetime import timedelta as td from copy import deepcopy as cpy from xml.dom import minidom as miniDom from threading import Timer, Thread, Event from eg.WinApi.Utils import GetMonitorDimensions from eg.WinApi.Dynamic import CreateEvent, SetEvent, PostMessage from eg.WinApi.Dynamic import SendMessage, ShowWindow, RegisterWindowMessage from eg.WinApi import SendMessageTimeout from win32gui import GetWindowText, GetWindow, GetDlgCtrlID, GetMenuItemCount from win32gui import GetWindowPlacement, GetDlgItem, GetClassName, GetSubMenu from win32file import GetFileAttributes from random import randrange from codecs import lookup from codecs import open as openFile from winsound import PlaySound, SND_ASYNC from locale import strxfrm from ctypes import c_long, c_ulong, c_int, byref, sizeof, Structure, c_buffer from ctypes.wintypes import WinDLL _kernel32 = WinDLL("kernel32") _user32 = WinDLL("user32") from sys import getfilesystemencoding FSE = getfilesystemencoding() if eg.Version.base >= "0.4.0": from eg.Classes.MainFrame.TreeCtrl import DropTarget as EventDropTarget IMAGES_DIR = eg.imagesDir else: from eg.Classes.MainFrame.TreeCtrl import EventDropTarget IMAGES_DIR = eg.IMAGES_DIR ARIAL_INFO = "0;-35;0;0;0;700;0;0;0;0;3;2;1;34;Arial" TAHOMA_INFO = "0;-27;0;0;0;400;0;0;0;0;3;2;1;34;Tahoma" PROCESS_TERMINATE = 1 WM_CLOSE = 16 WM_COMMAND = 273 WM_SYSCOMMAND = 274 TBM_GETPOS = 1024 TBM_SETPOS = 1029 SC_RESTORE = 61728 #SW_HIDE = 0 #SW_MINIMIZE = 6 SW_RESTORE = 9 GW_CHILD = 5 GW_HWNDNEXT = 2 FILE_ATTRIBUTE_HIDDEN = 2 FILE_ATTRIBUTE_SYSTEM = 4 SYS_VSCROLL_X = wx.SystemSettings.GetMetric(wx.SYS_VSCROLL_X) #=============================================================================== CUR_STRING = ( "iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAIAAAD8GO2jAAAABnRSTlMA/wBmAADomHeP" "AAAACXBIWXMAAA7EAAAOxAGVKw4bAAAMK0lEQVR42gEgDN/zAQAAAOns8xZ6DQAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAApKDTXa9EAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAA////AQEB1tLSKZQuAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAP///youLtds0gAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAAD///8qLi7XbNIAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAACAAAAAAAA/wD+AAAA////Ki4u12zSAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAgAAAAAAAP7///8A/gAAAP///youLtds0wAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAIAAAAAAAAAAAD+////AP4AAAD//PwqLi3YbNQAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAA" "AAAAAAAAAAAA/gD/APwAAAMD9PjwKS4s2W3UAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAA" "AAABAAH6AQAAAPb69gkHC+ns9CgtLdlt1AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAQAC+wLz" "+/P0+/QB+QHw8fEWEwvs6ecoLS/VcN8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAA/////vv+9/32AwMDAvsC" "AQkABQEHAAAAAAAAAAAAAQEJ/2b4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAIFAgsIDObm8gYKBwUBBwEB" "AQEBDQEBAQEBCAAA+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAADz9vsjIBicn6UAAAAAABTd2Nj/" "ZgD/Zvn/ZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAABAAAA////8/X6DgwH/2YAAZoA////9fHwDBAR/2YAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAgAAAAAAAA4MB+Pk6wAAAAAAFMnFwgsPEAAEGLtQxAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAQAAAABAQHe3+YcghUAAADk59ocGRL////o4coYGx7/ZgAAAAAAAAAEoBoA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD8YOYAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAE/2YAAAAAAIcaAAAAAAAAG38SAAAQ8u7qFxodAAAAAAAAAAAAAAAAAAAAiodz+Pj4" "DAwMAQEBBQUF/Pz8+Pj47e3tCAgIg4aaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAA" "AAAAAAAAAAAAAAAAAAAAAPz/7wQBASEkOszP2ACXKAAAAAAAAAAAAFBQUCMjIwAAAAAA" "AAAAAAAAAAAAAHJycoeHh4OGmgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAA" "AAAAAAAAAAAAAAADZwH/ZgAAAMYAlygAAAAAAAAAAAAAAAAiIiLMzMwFBQULCwsAAAAB" "AQHn5+cXFxcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeHh46OjoAAAABQUF6enp+fn5CAgI" "BgYGISEhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAM7OzqysrA8PD4WFhYKCgvz8/AgICIKCgouL" "iwMGGgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAH/ZgAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAEoBr9+uY2NjbKysogICDg4OAAAAAAAAAAAAAAAAADBhr8" "YOYAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAwcHBlZWVAAAA39/fZWVlAAAAysrKnZ2ds7OzAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAD4+PqysrAAAAAAAAJqamgAAAKmpqRMTE0xMTAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAtLS0AAADb29sEBAQAAAAxMTHt7e0AAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAB/2YAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABKAa" "/PnlAAAADQ0NCQkJs7OzZmZm0dHRAAAAAAAABAcb/GDmAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMrKygEB" "AfT09Ovr65aWlmRkZAEBAURERAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA2Njb/////////" "///u7u79/f3n5+e8vLwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB/2YAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABKAaAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAA/GDmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAf9mAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADPbTswenz69gAAAABJRU5ErkJggg==" ) #=============================================================================== class ConfigData(eg.PersistentData): pos = None plcmnt = None class Text: label1 = "Radio?Sure! installation folder:" label2 = "RadioSure.xml and Scheduler.xml folder location:" # filemask = "RadioSure.exe|RadioSure.exe|All-Files (*.*)|*.*" text1 = "Couldn't find RadioSure window !" browseTitle = "Selected folder:" toolTipFolder = "Press button and browse to select folder ..." boxTitle = 'Folder "%s" is incorrect' toolTipFile = "Press button and browse to select logfile ..." browseFile = 'Select the logfile' boxMessage1 = 'Missing file %s !' logLabel = "Log scheduler events to following logfile:" # nextRun = "Next run: %s" none = "None" execut = 'Schedule "%s" - execution. Next run: %s' cmdLine = 'Commandline: %s' cancAndDel = 'Schedule "%s" canceled and deleted' cancAndDis = 'Schedule "%s" canceled (disabled)' newSched = 'Schedule "%s" scheduled. Next run: %s' re_Sched = 'Schedule "%s" re-scheduled. New next run: %s' start = 'RadioSure plugin started. All valid schedules will be scheduled:' stop = 'RadioSure plugin stoped. All scheduled schedules will be canceled:' canc = 'Schedule "%s" canceled' launched = "Schedule.Launched" holidButton = "Public holidays ..." managerButton = "Show scheduler" fixBoxLabel = "Fixed public holidays:" varBoxLabel = "Variable public holidays:" ok = "OK" cancel = "Cancel" yes = "Yes" no = "No" add = "Add" delete = "Delete" first_day = "The first day of the week:" xmlComment = "Radio?Sure! scheduler configuration file. Updated at %s." messBoxTit0 = "EventGhost - Radio?Sure! plugin" messBoxTit1 = "Attention !" message2 = """You can not start another instance of Radio?Sure!, because the maximum number of instances %i is exhausted!""" message3 = '''You can not start another instance of Radio?Sure!, because the option "Allow only one instance" is chosen!''' autoClose = "Auto close after %i s" toolTip = "Drag-and-drop an event from the log into the box." popup = ( "Delete item", "Delete all items", ) clear = "Clear all" opened = "Opened" closed = "Closed" root = "Main (root) menu" class OpenManager: dialogTitle = "Radio?Sure! Favorites manager %s (plugin for EventGhost)" toolTipDelete = "Delete item(s)" toolTipUp = "Move item(s) up" toolTipDown = "Move item(s) down" moveTop = "Move item(s) top" moveBottom = "Move item(s) bottom" exportSel = "Export selected item(s) to XML file" exportAll = "Export all items to XML file" toolTipExport = "Export selected (if any) or all items to XML file" toolTipImport = "Import from XML file" toolTipImportSR = "Import from Screamer Radio" sort = "Sort alphabetically" play = "Play selected favorite just now !" refresh = "Refresh favorites list from RadioSure.xml" export = "Export" imprt = "Import" importSR = "Import SR" lblSource = "Source:" lblGenre = "Genre:" lblLanguage = "Language:" lblCountry = "Country:" ok = "OK" cancel = "Cancel" apply = "Apply" lblList = "Favorites list:" xmlComment1 = "Radio?Sure! favorites backup file." xmlComment2 = 'Saved at %s by EventGhost.' choose = 'Choose a XML file to be import' save = 'Backup favorites as XML file ...' wildcard = "XML file (*.xml)|*.xml" removeDupl = "Remove duplications" messBoxTit2 = """Attention ! Radio?Sure! is running !""" messBoxTit3 = """Attention ! Recording is in progress !""" messBoxTit5 = "Congratulations!" messBoxTit6 = "Announcement" messBoxTit7 = "Warning" message1 = """Your version of Radio?Sure! allows you to save only the first %i favorite stations ! Other favorites will be ignored.""" message2 = """If you want to save the modified list of favorite stations, must be overwritten file RadioSure.xml. You can not overwrite the file RadioSure.xml, if the Radio?Sure! is currently running. Otherwise, the favorites list is returned to its original condition. Press button %s, if the program Radio?Sure! can be closed. Press button %s, if the program Radio?Sure! can not be closed.""" message3 = "Failed to save data to the file RadioSure.xml !" message4 = 'It is not possible to import because there is a problem.\n\ The file "%s" does not have the expected structure.' message5 = "Your list of favorite stations has been successfully updated!" message6 = "Failed to close Radio?Sure!" message7 = "Your list of favorite stations has not been updated!" message8 = """Your list of favorite stations contain (in sources) duplications! They will be saved only unique items.""" message9 = "Failed to open Radio?Sure!" class OpenScheduler: dialogTitle = "Radio?Sure! Scheduler %s (plugin for EventGhost)" header = ( "Enabled", "Schedule title", "Last run", "Next run", ) sched_type = ( "Only once (or yearly)", "Daily", "Weekly", "Monthly / weekday", "Monthly / day", "Periodically", ) toolTipFile = """Press button and browse to select file ... File type (as .mp3) need not be completed. Will be added automatically.""" browseTitle = "Select a folder and enter file name (without file type):" serial_num = ( "first", "second", "third", "fourth", "fifth", "last" ) the = "The" in_ = "in" buttons = ( "Add new", "Duplicate", "Delete", "OK", "Cancel", "Apply" ) type_label = "Schedule type:" source = "Source URL:" favorite = "Favorite station title:" filename = "Destination file name (optional):" chooseDay = "Choose day" theEvery = "The every" yearly = "Every year on the same day" chooseTime = "Choose start time and duration (00:00 = constantly)" choosePeriod = "Choose period" andThenEvery = "Repeat every" units = ( "hours", "days", "weeks", "months", "years", ) start = "Start time (HH:MM:SS):" length = "Duration (HH:MM):" boxTitle = "Your setup is not properly configured !" boxTexts = ( "Schedule title must not be an empty string !", "Schedule title must be unique !", 'Determine the source URL, or set the mode "Do nothing" !', 'Not allowed to set "Do nothing" while also "None" event !', 'Must be chosen Schedule type !', "The span must be shorter than the period !", ) workModeLabel = "Radio?Sure! working mode:" workModes = ( "Playing (audibly)", "Recording (audibly)", "Recording (soundlessly)", "Do nothing" ) windOpenLabel = "Window open:" windOpenChoices =( "Visible", "Hidden" ) triggEvtLabel = "Trigger an event:" triggEvtChoices = ( "None", "Schedule title", "All parameters" ) testButton = "Test now" testRun = 'Schedule "%s" - TEST execution. Possible next run: %s' holidCheck_1 = "Do not trigger events for a chosen day if it happens to be a holiday" holidCheck_2 = "Do also trigger events for a non-chosen day if it happens to be a holiday" popup = ( "Add schedule", "Duplicate schedule", "Delete schedule", "Enable all schedules", "Disable all schedules", "Move schedule up", "Move schedule down", ) #=============================================================================== def my_list2cmdline(seq): """ FIXING subprocess.list2cmdline Workaround, because subprocess.list2cmdline does not work with arguments like: filename="... ...". Ie, when we need quotes inside the string, and somewhere inside is a space character. When you properly prepare all items (including the quotes), it works! There is also done simultaneously filesystemencode encoding (otherwise there UnicodeDecodeError occurs...)""" return ' '.join([arg.encode(FSE) if isinstance(arg, unicode) else arg for arg in seq]) subprocess.list2cmdline = my_list2cmdline #=============================================================================== class MyDirBrowseButton(eg.DirBrowseButton): def GetTextCtrl(self): # now I can make build-in textCtrl return self.textControl # non-editable !!! #=============================================================================== class MyFileBrowseButton(eg.FileBrowseButton): def GetTextCtrl(self): # now I can make build-in textCtrl return self.textControl # non-editable !!! #=============================================================================== class MySpinIntCtrl(eg.SpinIntCtrl): def SetNumCtrlId(self, id): self.numCtrl.SetId(id) #=============================================================================== newEVT_BUTTON_AFTER = wx.NewEventType() EVT_BUTTON_AFTER = wx.PyEventBinder(newEVT_BUTTON_AFTER, 1) newEVT_UPDATE_DIALOG = wx.NewEventType() EVT_UPDATE_DIALOG = wx.PyEventBinder(newEVT_UPDATE_DIALOG, 1) newEVT_CHECKLISTCTRL = wx.NewEventType() EVT_CHECKLISTCTRL = wx.PyEventBinder(newEVT_CHECKLISTCTRL, 1) #=============================================================================== class UserEvent(wx.PyCommandEvent): def __init__(self, evtType, id): wx.PyCommandEvent.__init__(self, evtType, id) self.myVal = None def SetValue(self, val): self.myVal = val def GetValue(self): return self.myVal #=============================================================================== class ExtColourSelectButton(eg.ColourSelectButton): def __init__(self,*args,**kwargs): eg.ColourSelectButton.__init__(self, *args) self.title = kwargs['title'] def OnButton(self, event): colourData = wx.ColourData() colourData.SetChooseFull(True) colourData.SetColour(self.value) for i, colour in enumerate(eg.config.colourPickerCustomColours): colourData.SetCustomColour(i, colour) dialog = wx.ColourDialog(self.GetParent(), colourData) dialog.SetTitle(self.title) if dialog.ShowModal() == wx.ID_OK: colourData = dialog.GetColourData() self.SetValue(colourData.GetColour().Get()) event.Skip() eg.config.colourPickerCustomColours = [ colourData.GetCustomColour(i).Get() for i in range(16) ] dialog.Destroy() evt = UserEvent(newEVT_BUTTON_AFTER, self.GetId()) evt.SetValue(self.GetValue()) self.GetEventHandler().ProcessEvent(evt) #=============================================================================== class ExtFontSelectButton(eg.FontSelectButton): def OnButton(self, event): fontData = wx.FontData() fontData.EnableEffects(False) if self.value is not None: font = wx.FontFromNativeInfoString(self.value) fontData.SetInitialFont(font) else: fontData.SetInitialFont( wx.SystemSettings_GetFont(wx.SYS_ANSI_VAR_FONT) ) dialog = wx.FontDialog(self.GetParent(), fontData) if dialog.ShowModal() == wx.ID_OK: fontData = dialog.GetFontData() font = fontData.GetChosenFont() self.value = font.GetNativeFontInfo().ToString() event.Skip() dialog.Destroy() evt = UserEvent(newEVT_BUTTON_AFTER, self.GetId()) evt.SetValue(self.GetValue()) self.GetEventHandler().ProcessEvent(evt) #=============================================================================== class MessageBoxDialog(wx.Dialog): def __init__( self, parent, message, caption = eg.APP_NAME, flags=wx.OK, time=0, plugin=None, pos=wx.DefaultPosition ): PlaySound('SystemExclamation', SND_ASYNC) if parent is None and eg.document.frame: parent = eg.document.frame dialogStyle = wx.DEFAULT_DIALOG_STYLE if flags & wx.STAY_ON_TOP: dialogStyle |= wx.STAY_ON_TOP wx.Dialog.__init__(self, parent, -1, caption, pos, style=dialogStyle) self.SetTitle(plugin.text.messBoxTit0) self.SetIcon(plugin.info.icon.GetWxIcon()) bttns = [] if flags: art = None if flags & wx.ICON_EXCLAMATION: art = wx.ART_WARNING elif flags & wx.ICON_ERROR: art = wx.ART_ERROR elif flags & wx.ICON_QUESTION: art = wx.ART_QUESTION elif flags & wx.ICON_INFORMATION: art = wx.ART_INFORMATION if art is not None: bmp = wx.ArtProvider.GetBitmap(art, wx.ART_MESSAGE_BOX, (32,32)) icon = wx.StaticBitmap(self, -1, bmp) icon2 = wx.StaticBitmap(self, -1, bmp) else: icon = (32,32) icon2 = (32,32) flag = True if flags & wx.YES: default = False if not flags & wx.NO_DEFAULT: default = True flag = False bttns.append((wx.ID_YES, plugin.text.yes, default)) if flags & wx.NO: default = False if flags & wx.NO_DEFAULT: default = True flag = False bttns.append((wx.ID_NO, plugin.text.no, default)) if flags & wx.OK: bttns.append((wx.ID_OK, plugin.text.ok, flag)) if flags & wx.CANCEL: bttns.append((wx.ID_CANCEL, plugin.text.cancel, False)) if not flags & (wx.OK | wx.CANCEL | wx.YES | wx.NO): bttns.append((wx.ID_OK, plugin.text.ok, True)) else: bttns.append((wx.ID_OK, plugin.text.ok, True)) if caption: caption = wx.StaticText(self, -1, caption) # caption.SetFont(wx.Font(16, wx.SWISS, wx.NORMAL, wx.BOLD)) caption.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD)) message = wx.StaticText(self, -1, message) line = wx.StaticLine(self, -1, size=(1,-1), style = wx.LI_HORIZONTAL) bottomSizer = wx.BoxSizer(wx.HORIZONTAL) bottomSizer.Add((10, 1)) if time: self.cnt = time txt = plugin.text.autoClose % self.cnt info = wx.StaticText(self, -1, txt) info.Enable(False) bottomSizer.Add(info, 0, wx.TOP, 3) def UpdateInfoLabel(evt): self.cnt -= 1 txt = plugin.text.autoClose % self.cnt info.SetLabel(txt) if not self.cnt: self.Close() self.Bind(wx.EVT_TIMER, UpdateInfoLabel) self.timer = wx.Timer(self) self.timer.Start(1000) else: self.timer = None bottomSizer.Add((5,1),1,wx.EXPAND) for bttn in bttns: b = wx.Button(self, bttn[0], bttn[1]) if bttn[2]: #b.SetDefault() defBtn = b # SetDefault() workaround bottomSizer.Add(b, 0, wx.LEFT, 5) bottomSizer.Add((10, 1)) topSizer = wx.BoxSizer(wx.HORIZONTAL) topSizer.Add(icon,0,wx.LEFT|wx.RIGHT,10) topSizer.Add((1,1),1,wx.EXPAND) topSizer.Add(caption,0,wx.TOP,5) topSizer.Add((1,1),1,wx.EXPAND) topSizer.Add(icon2,0,wx.LEFT|wx.RIGHT,10) mainSizer = wx.BoxSizer(wx.VERTICAL) mainSizer.Add(topSizer, 0, wx.EXPAND|wx.TOP|wx.BOTTOM,10) mainSizer.Add(message, 0, wx.EXPAND|wx.LEFT|wx.RIGHT,10) mainSizer.Add(line, 0, wx.EXPAND|wx.ALL,5) mainSizer.Add(bottomSizer, 0, wx.EXPAND|wx.BOTTOM,5) # SetDefault() workaround: defBtn.SetFocus() def OnButton(evt): self.SetReturnCode(evt.GetId()) self.Close() evt.Skip() wx.EVT_BUTTON(self, -1, OnButton) def onClose(evt): if self.GetReturnCode() not in (wx.ID_OK, wx.ID_CANCEL, wx.ID_YES, wx.ID_NO): self.SetReturnCode(wx.ID_CANCEL) if self.timer: self.timer.Stop() del self.timer self.MakeModal(False) self.GetParent().Raise() self.Destroy() self.Bind(wx.EVT_CLOSE, onClose) self.SetSizer(mainSizer) self.Fit() def MessageBox(parent, message, caption='', flags=0, time = 0, plugin = None): mssgbx = MessageBoxDialog(parent, message, caption, flags, time, plugin) val = mssgbx.ShowModal() return val #=============================================================================== class MyTimer(): def __init__(self, t, plugin): self.timer = Timer(t, self.Run) self.plugin = plugin self.timer.start() def Run(self): try: self.plugin.menuDlg.Close() self.plugin.menuDlg = None except: pass def Cancel(self): self.timer.cancel() #=============================================================================== class HolidaysFrame(wx.Dialog): fixWin = None varWin = None fixHolidays = [] varHolidays = [] def __init__(self, parent, plugin): self.plugin = plugin wx.Dialog.__init__( self, parent, -1, style = wx.DEFAULT_DIALOG_STYLE, name = self.plugin.text.holidButton ) self.SetIcon(self.plugin.info.icon.GetWxIcon()) self.panel = parent self.fixHolidays, self.varHolidays = cpy(self.panel.holidays) self.Bind(wxCal.EVT_CALENDAR_DAY, self.OnChangeDay) def ShowHolidaysFrame(self): text = self.plugin.text self.SetTitle(self.plugin.text.holidButton) self.fixWin = CalendarPopup(self, False, self.plugin.first_day) self.varWin = CalendarPopup(self, True, self.plugin.first_day) calW, calH = self.fixWin.GetWinSize() fixLbl = wx.StaticText(self, -1, text.fixBoxLabel) variableLbl = wx.StaticText(self, -1, text.varBoxLabel) widthList = [self.GetTextExtent("30. %s 2000" % month)[0] + SYS_VSCROLL_X for month in list(month_name)] widthList.append(fixLbl.GetSize()[0]) widthList.append(variableLbl.GetSize()[0]) w = max(widthList) + 5 self.SetMinSize((w + calW + 30, 2 * calH + 128)) self.fixListBox = HolidaysBox( self, -1, size = wx.Size(w, 130), style = wx.LB_SINGLE|wx.LB_NEEDED_SB ) self.fix_add_Btn = wx.Button(self, -1, text.add) self.fix_del_Btn = wx.Button(self, -1, text.delete) self.fix_del_Btn.Enable(False) self.varListBox = HolidaysBox( self, -1, size = wx.Size(w, 130), style = wx.LB_SINGLE|wx.LB_NEEDED_SB ) self.var_add_Btn = wx.Button(self, -1, text.add) self.var_del_Btn = wx.Button(self, -1, text.delete) self.var_del_Btn.Enable(False) line = wx.StaticLine(self, -1, style = wx.LI_HORIZONTAL) sizer = wx.BoxSizer(wx.VERTICAL) fixSizer = wx.GridBagSizer(2, 8) fixSizer.SetMinSize((w + 8 + calW, -1)) varSizer = wx.GridBagSizer(2, 8) varSizer.SetMinSize((w + 8 + calW, -1)) fixSizer.Add(fixLbl, (0, 0)) fixSizer.Add(self.fixListBox, (1, 0), (3, 1)) fixSizer.Add(self.fix_add_Btn, (1, 1)) fixSizer.Add((-1, 15), (2, 1)) fixSizer.Add(self.fix_del_Btn, (3, 1)) varSizer.Add(variableLbl, (0, 0)) varSizer.Add(self.varListBox, (1, 0), (3,1)) varSizer.Add(self.var_add_Btn, (1, 1)) varSizer.Add((-1, 15), (2, 1)) varSizer.Add(self.var_del_Btn, (3, 1)) sizer.Add(fixSizer, 0, wx.EXPAND|wx.ALL, 8) sizer.Add((-1, 12)) sizer.Add(varSizer, 0, wx.EXPAND|wx.ALL, 8) sizer.Add((1, 16)) btn1 = wx.Button(self, wx.ID_OK) btn1.SetLabel(text.ok) btn1.SetDefault() btn2 = wx.Button(self, wx.ID_CANCEL) btn2.SetLabel(text.cancel) btnsizer = wx.StdDialogButtonSizer() btnsizer.AddButton(btn1) btnsizer.AddButton(btn2) btnsizer.Realize() sizer.Add(line, 0, wx.EXPAND) sizer.Add((1,5)) sizer.Add(btnsizer, 0, wx.EXPAND|wx.RIGHT, 10) sz = self.GetMinSize() self.SetSize(sz) self.fixListBox.Reset(self.fixHolidays) self.varListBox.Reset(self.varHolidays) self.Bind(wx.EVT_CLOSE, self.onClose) btn2.Bind(wx.EVT_BUTTON, self.onCancel) btn1.Bind(wx.EVT_BUTTON, self.onOK) self.fix_add_Btn.Bind(wx.EVT_BUTTON, self.onFixAddBtn) self.var_add_Btn.Bind(wx.EVT_BUTTON, self.onVarAddBtn) self.fix_del_Btn.Bind(wx.EVT_BUTTON, self.onFixDelBtn) self.var_del_Btn.Bind(wx.EVT_BUTTON, self.onVarDelBtn) self.Bind(wx.EVT_LISTBOX, self.onHolBoxSel) sizer.Layout() self.SetSizer(sizer) self.MakeModal(True) self.Show(True) def onClose(self, evt): self.MakeModal(False) self.GetParent().GetParent().Raise() self.Destroy() def onCancel(self, evt): self.Close() def onOK(self, evt): self.panel.holidays = (self.fixHolidays, self.varHolidays) self.Close() def onHolBoxSel(self, evt): if evt.GetId() == self.fixListBox.GetId(): self.fix_del_Btn.Enable(True) else: self.var_del_Btn.Enable(True) evt.Skip() def onFixAddBtn(self, evt): pos = self.ClientToScreen(self.fix_add_Btn.GetPosition()) self.fixWin.PopUp(pos, self.fixHolidays) def onVarAddBtn(self, evt): pos = self.ClientToScreen(self.var_add_Btn.GetPosition()) self.varWin.PopUp(pos, self.varHolidays) def onFixDelBtn(self, evt): self.fixHolidays.pop(self.fixListBox.GetSelection()) if self.fixListBox.Reset(self.fixHolidays): self.fix_del_Btn.Enable(False) def onVarDelBtn(self, evt): self.varHolidays.pop(self.varListBox.GetSelection()) if self.varListBox.Reset(self.varHolidays): self.var_del_Btn.Enable(False) def OnChangeDay(self, evt): if evt.GetId() == self.fixWin.GetCalId(): self.fixListBox.Reset(self.fixHolidays) else: self.varListBox.Reset(self.varHolidays) evt.Skip() #=============================================================================== class HolidaysBox(wx.ListBox): def __init__ (self, parent, id, size, style): wx.ListBox.__init__( self, parent = parent, id = id, size = size, style = style ) self.sel = -1 self.Bind(wx.EVT_LISTBOX, self.onHolBoxSel) def Reset(self, list): tmpList = [] for item in list: day = item[-1] day = " %i" % day if day < 10 else "%i" % day if len(item) == 2: tmpList.append("%s. %s" % (day, month_name[item[0]])) else: tmpList.append("%s. %s %i" % (day, month_name[item[1]], item[0])) self.Set(tmpList) if self.sel > -1 and self.sel < self.GetCount(): self.SetSelection(self.sel) return False else: return True def onHolBoxSel(self, evt): self.sel = evt.GetSelection() evt.Skip() #=============================================================================== class CalendarPopup(wx.PopupWindow): yearChange = True def __init__(self, parent, yearChange, first_day): self.yearChange = yearChange wx.PopupWindow.__init__(self, parent) startDate = wx.DateTime() startDate.Set(1, 0) self.cal = wxCal.CalendarCtrl( self, -1, startDate, style = (wxCal.CAL_MONDAY_FIRST, wxCal.CAL_SUNDAY_FIRST)[first_day] | wxCal.CAL_SHOW_HOLIDAYS | wxCal.CAL_SEQUENTIAL_MONTH_SELECTION | wxCal.CAL_SHOW_SURROUNDING_WEEKS ) self.cal.EnableYearChange(yearChange) sz = self.cal.GetBestSize() self.SetSize(sz) self.cal.Bind(wxCal.EVT_CALENDAR_DAY, self.OnChangeDay) self.cal.Bind(wxCal.EVT_CALENDAR_MONTH, self.OnChangeMonth) self.cal.Bind(wxCal.EVT_CALENDAR_YEAR, self.OnChangeMonth) self.cal.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeaveWindow) def OnLeaveWindow(self, evt): self.PopDown() evt.Skip() def GetCalId(self): return self.cal.GetId() def GetWinSize(self): return self.GetSize() def OnChangeDay(self, evt): date = evt.GetDate() day, month, year = date.GetDay(), 1 + date.GetMonth(), date.GetYear() newHoliday = (year, month, day) if self.yearChange else (month, day) if not newHoliday in self.holidays: self.holidays.append(newHoliday) self.holidays.sort() date = self.cal.GetDate() self.cal.SetHoliday(day) date.AddDS(wx.DateSpan.Day()) self.cal.SetDate(date) self.Refresh() evt.Skip() def OnChangeMonth(self, evt = None): date = self.cal.GetDate() cur_month = date.GetMonth() + 1 # convert wx.DateTime 0-11 => 1-12 if self.yearChange: cur_year = date.GetYear() for year, month, day in self.holidays: if year == cur_year and month == cur_month: self.cal.SetHoliday(day) else: for month, day in self.holidays: if month == cur_month: self.cal.SetHoliday(day) def PopUp(self, position, holidays): self.cal.EnableHolidayDisplay(False) self.cal.EnableHolidayDisplay(True) self.SetPosition(position) self.holidays = holidays self.OnChangeMonth() self.Show(True) def PopDown(self): self.Show(False) self.Close() #=============================================================================== class CheckListCtrl(wx.ListCtrl, CheckListCtrlMixin): def __init__(self, parent, text, width): wx.ListCtrl.__init__( self, parent, -1, size = (width, 164), style = wx.LC_REPORT|wx.LC_HRULES|wx.LC_VRULES|wx.LC_SINGLE_SEL ) curFile = abspath(join(dirname(__file__), "contextCursor.cur")) img = None if exists(curFile): img = wx.EmptyImage(32, 32) img.LoadFile(curFile, wx.BITMAP_TYPE_CUR) if not img or not img.IsOk(): from cStringIO import StringIO from base64 import b64decode stream = StringIO(b64decode(CUR_STRING)) img = wx.ImageFromStream(stream) stream.close() img.SetOptionInt(wx.IMAGE_OPTION_CUR_HOTSPOT_X, 0) img.SetOptionInt(wx.IMAGE_OPTION_CUR_HOTSPOT_Y, 0) self.SetCursor(wx.CursorFromImage(img)) self.selRow = -1 self.back = self.GetBackgroundColour() self.fore = self.GetForegroundColour() self.selBack = wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT) self.selFore = wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHTTEXT) for i in range(len(text.header)): self.InsertColumn(i, text.header[i]) self.SetColumnWidth(0, wx.LIST_AUTOSIZE_USEHEADER) self.SetColumnWidth( 1, width - self.GetColumnWidth(0) - 2 * 116 - SYS_VSCROLL_X - self.GetWindowBorderSize()[0] ) self.SetColumnWidth(2, 116) self.SetColumnWidth(3, 116) CheckListCtrlMixin.__init__(self) self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnItemSelected) def OnItemSelected(self, evt): self.SelRow(evt.m_itemIndex) evt.Skip() # this is called by the base class when an item is checked/unchecked !!!!!!! def OnCheckItem(self, index, flag): evt = UserEvent(newEVT_CHECKLISTCTRL, self.GetId()) evt.SetValue((index, flag)) self.GetEventHandler().ProcessEvent(evt) def SelRow(self, row): if row != self.selRow: if self.selRow in range(self.GetItemCount()): item = self.GetItem(self.selRow) item.SetTextColour(self.fore) item.SetBackgroundColour(self.back) self.SetItem(item) self.selRow = row if self.GetItemBackgroundColour(row) != self.selBack: item = self.GetItem(row) item.SetTextColour(self.selFore) item.SetBackgroundColour(self.selBack) self.SetItem(item) self.SetItemState(row, 0, wx.LIST_STATE_SELECTED) def AppendRow(self): ix = self.GetItemCount() self.InsertStringItem(ix, "") self.CheckItem(ix) self.EnsureVisible(ix) self.SelRow(ix) #=============================================================================== class ManagerDialog(wx.Dialog): def __init__(self, text, plugin): wx.Dialog.__init__( self, None, -1, text.dialogTitle % version, style = wx.DEFAULT_DIALOG_STYLE|wx.MINIMIZE_BOX|wx.CLOSE_BOX|wx.RESIZE_BORDER, ) #self.plugin = eg.Utils.GetPlugin(self) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! self.plugin = plugin statusRS = self.plugin.GetStatusRS() self.idUp = wx.NewId() self.idDown = wx.NewId() self.idTop = wx.NewId() self.idBottom = wx.NewId() self.idSort = wx.NewId() self.idRefr = wx.NewId() self.idPlay = wx.NewId() self.SetIcon(self.plugin.info.icon.GetWxIcon()) self.plugin.manager = self self.text = text self.Bind(wx.EVT_CLOSE, self.onClose) statPath = self.plugin.RadioSurePath+"\\Stations" rsd_files = [x for x in listdir(statPath) if x.endswith('.rsd') and x.startswith('stations-')] stations = statPath+"\\"+rsd_files[0] def unique(seq): res = set(seq) res = list(res) res.sort() return res f = openFile(stations, encoding='utf-8', mode='r') data = self.data = [item.split("\t") for item in f.readlines()] genres = [item[2] for item in data] genres = unique(genres) countrys = [item[3] for item in data] countrys = unique(countrys) languages = [item[4] for item in data] languages = unique(languages) titles = [item[0] for item in data] titles = unique(titles) f.close() curFile = abspath(join(dirname(__file__), "contextCursor.cur")) img = None if exists(curFile): img = wx.EmptyImage(32, 32) img.LoadFile(curFile, wx.BITMAP_TYPE_CUR) if not img or not img.IsOk(): from cStringIO import StringIO from base64 import b64decode stream = StringIO(b64decode(CUR_STRING)) img = wx.ImageFromStream(stream) stream.close() img.SetOptionInt(wx.IMAGE_OPTION_CUR_HOTSPOT_X, 0) img.SetOptionInt(wx.IMAGE_OPTION_CUR_HOTSPOT_Y, 0) self.grid = wx.ListCtrl(self, style = wx.LC_REPORT|wx.LC_NO_HEADER|wx.LC_HRULES|wx.LC_VRULES) self.grid.SetCursor(wx.CursorFromImage(img)) self.grid.InsertColumn(0,"") #Button UP bmp = wx.ArtProvider.GetBitmap(wx.ART_GO_UP, wx.ART_OTHER, (16, 16)) btnUP = wx.BitmapButton(self, self.idUp, bmp) btnUP.SetToolTipString(self.text.toolTipUp) #Button DOWN bmp = wx.ArtProvider.GetBitmap(wx.ART_GO_DOWN, wx.ART_OTHER, (16, 16)) btnDOWN = wx.BitmapButton(self, self.idDown, bmp) btnDOWN.SetToolTipString(self.text.toolTipDown) #Button DEL bmp = wx.ArtProvider.GetBitmap(wx.ART_DELETE, wx.ART_OTHER, (16, 16)) btnDEL = wx.BitmapButton(self, -1, bmp) btnDEL.SetToolTipString(self.text.toolTipDelete) btnExp = wx.Button(self, wx.ID_SAVEAS, self.text.export) btnExp.SetToolTipString(self.text.toolTipExport) btnImp = wx.Button(self, wx.ID_OPEN, self.text.imprt) btnImp.SetToolTipString(self.text.toolTipImport) btnImpSR = wx.Button(self, wx.ID_FILE, self.text.importSR) btnImpSR.SetToolTipString(self.text.toolTipImportSR) bmp = wx.ArtProvider.GetBitmap(wx.ART_HELP_SETTINGS, wx.ART_OTHER, (16, 16)) btnSort = wx.BitmapButton(self, self.idSort, bmp) btnSort.SetToolTipString(self.text.sort) bmp = wx.ArtProvider.GetBitmap(wx.ART_REDO, wx.ART_OTHER, (16, 16)) btnRefr = wx.BitmapButton(self, self.idRefr, bmp) btnRefr.SetToolTipString(self.text.refresh) def EnableCtrls(): first = self.grid.GetFirstSelected() cnt = self.grid.GetSelectedItemCount() subseq = True for ix in range(first, first + cnt): if not self.grid.IsSelected(ix): subseq = False break one = cnt==1 self.menuFlagM = subseq itemCnt = self.grid.GetItemCount() upDown = cnt > 0 and cnt < itemCnt and subseq sourceLabel.Enable(one) genreLabel.Enable(one) langLabel.Enable(one) countryLabel.Enable(one) sourceCtrl.Enable(one) btnUP.Enable(upDown) btnDOWN.Enable(upDown) btnDEL.Enable(cnt > 0) btnExp.Enable(itemCnt > 0) btnSort.Enable(itemCnt > 1) def ListSelection(event=None): EnableCtrls() first = self.grid.GetFirstSelected() cnt = self.grid.GetSelectedItemCount() if cnt == 1: item = self.tmpFavs[first] src = item[0] sourceCtrl.Clear() srcs = () i = -1 for ix in range(5, 11): srcIx = [itm[ix] for itm in data] if src in srcIx: i = srcIx.index(src) break if i > -1: srcs = data[i][5:] sourceCtrl.AppendItems(srcs) if not src in srcs: sourceCtrl.Append(src) sourceCtrl.SetStringSelection(src) if item[2] in genres: genreCtrl.SetStringSelection(item[2]) if item[3] in languages: langCtrl.SetStringSelection(item[3]) if item[4] in countrys: countryCtrl.SetStringSelection(item[4]) else: sourceCtrl.SetSelection(-1) genreCtrl.SetSelection(-1) langCtrl.SetSelection(-1) countryCtrl.SetSelection(-1) if event: event.Skip() self.grid.Bind(wx.EVT_LIST_ITEM_SELECTED, ListSelection) self.grid.Bind(wx.EVT_LIST_ITEM_DESELECTED, ListSelection) def onRefresh(evt = None, seq = None): self.favs = seq if seq else self.plugin.RefreshVariables() self.tmpFavs = cpy(self.favs) self.grid.DeleteAllItems() for row in range(len(self.tmpFavs)): self.grid.InsertStringItem(row, self.tmpFavs[row][1]) self.grid.SetColumnWidth(0, -1) self.grid.SetColumnWidth(0, self.grid.GetColumnWidth(0) + 6) ListSelection() self.Diff() EnableCtrls() #evt.Skip btnRefr.Bind(wx.EVT_BUTTON, onRefresh) def onSort(evt): self.tmpFavs = sorted(self.tmpFavs, key=lambda i: strxfrm(i[1].encode(eg.systemEncoding))) self.grid.DeleteAllItems() for row in range(len(self.tmpFavs)): self.grid.InsertStringItem(row, self.tmpFavs[row][1]) ListSelection() self.Diff() self.Colour() btnSort.Bind(wx.EVT_BUTTON, onSort) sourceLabel = wx.StaticText(self, -1, self.text.lblSource) genreLabel = wx.StaticText(self, -1, self.text.lblGenre) langLabel = wx.StaticText(self, -1, self.text.lblLanguage) countryLabel = wx.StaticText(self, -1, self.text.lblCountry) sourceCtrl = wx.Choice(self, -1, choices=[]) genreCtrl = wx.Choice(self, -1, choices=genres) langCtrl = wx.Choice(self, -1, choices=languages) countryCtrl = wx.Choice(self, -1, choices=countrys) genreCtrl.Enable(False) langCtrl.Enable(False) countryCtrl.Enable(False) line = wx.StaticLine(self, -1, style=wx.LI_HORIZONTAL) btn1 = wx.Button(self, wx.ID_OK, self.text.ok) btn1.SetDefault() btn2 = wx.Button(self, wx.ID_CANCEL, self.text.cancel) btn3 = wx.Button(self, wx.ID_APPLY, self.text.apply) btn1.Bind(wx.EVT_BUTTON, self.onBtn) btn2.Bind(wx.EVT_BUTTON, self.onBtn) btn3.Bind(wx.EVT_BUTTON, self.onBtn) btnExp.Bind(wx.EVT_BUTTON, self.onBtnsInOut) btnImp.Bind(wx.EVT_BUTTON, self.onBtnsInOut) btnImpSR.Bind(wx.EVT_BUTTON, self.onBtnsInOut) btnsizer = wx.BoxSizer(wx.HORIZONTAL) btnsizer.Add(btnExp,0,wx.LEFT) btnsizer.Add((8,-1),0) btnsizer.Add(btnImp,0,wx.CENTER) btnsizer.Add((8,-1),0) btnsizer.Add(btnImpSR,0,wx.CENTER) btnsizer.Add((-1,-1),1) btnsizer.Add(btn1,0,wx.CENTER) btnsizer.Add((8,-1),0) btnsizer.Add(btn2,0,wx.CENTER) btnsizer.Add((8,-1),0) btnsizer.Add(btn3,0,wx.RIGHT) btnsizer.Layout() w = btn1.GetSize()[0]+btn2.GetSize()[0]+btn3.GetSize()[0]+btnExp.GetSize()[0]+btnImp.GetSize()[0]+btnImpSR.GetSize()[0]+5*8 w1 = btnUP.GetSize()[0]+8 onRefresh() self.grid.SetMinSize((w-w1,-1)) szr = wx.BoxSizer(wx.VERTICAL) sizer = wx.GridBagSizer(1,5) sizer.AddGrowableCol(1) sizer.AddGrowableRow(7) sizer.Add(wx.StaticText(self, -1, self.text.lblList),(0,0),(1,2)) sizer.Add(self.grid, (1,0), (7, 2), wx.EXPAND, 5) sizer.Add(btnUP, (1,2), (1, 1),flag=wx.RIGHT) sizer.Add(btnDOWN, (2,2), (1, 1),flag=wx.RIGHT) sizer.Add(btnDEL, (3,2), (1, 1),flag=wx.RIGHT) sizer.Add((5,20), (4,2), (1, 1),flag=wx.RIGHT) sizer.Add(btnRefr, (5,2), (1, 1),flag=wx.RIGHT) sizer.Add(btnSort, (6,2), (1, 1),flag=wx.RIGHT) sizer.Add(sourceLabel, (8,0), (1, 1),wx.TOP, 10) sizer.Add(sourceCtrl, (8,1), (1, 2), wx.EXPAND|wx.TOP, 5) sizer.Add(genreLabel, (9,0), (1, 1),wx.TOP, 10) sizer.Add(genreCtrl, (9,1), (1, 2), wx.EXPAND|wx.TOP, 5) sizer.Add(langLabel, (10,0), (1, 1),wx.TOP, 10) sizer.Add(langCtrl, (10,1), (1, 2), wx.EXPAND|wx.TOP, 5) sizer.Add(countryLabel, (11,0), (1, 1),wx.TOP, 10) sizer.Add(countryCtrl, (11,1), (1, 2), wx.EXPAND|wx.TOP, 5) szr.Add(sizer, 1, wx.EXPAND|wx.ALL, 5) szr.Add(line, 0, wx.EXPAND|wx.TOP, 3) szr.Add(btnsizer, 0, wx.EXPAND|wx.ALL, 5) self.SetSizer(szr) self.Fit() #Learn New MINSIZE: #==================== if ConfigData.plcmnt: # if 0: self.SetPosition(ConfigData.plcmnt[0]) sz = ConfigData.plcmnt[1] minsz = ConfigData.plcmnt[2] else: self.Center() sz = (w+w1, self.GetSize()[1] + btn1.GetSize()[1] + 10) minsz = sz self.SetMinSize(minsz) self.SetSize(sz) self.Show(True) def onSource(evt): if self.grid.GetSelectedItemCount() == 1: self.tmpFavs[self.grid.GetFirstSelected()][0] = evt.GetString() self.Diff() sourceCtrl.Bind(wx.EVT_CHOICE, onSource) def Move(evt): id = evt.GetId() first = self.grid.GetFirstSelected() cnt = self.grid.GetSelectedItemCount() if id == self.idUp: if first: bit = self.tmpFavs.pop(first-1) self.tmpFavs.insert(first-1+cnt, bit) else: id = self.idBottom elif id == self.idDown: if first+cnt < len(self.tmpFavs): bit = self.tmpFavs.pop(first+cnt) self.tmpFavs.insert(first, bit) else: id = self.idTop if id in (self.idBottom, self.idTop): p1=self.tmpFavs[:first] p2=self.tmpFavs[first:first+cnt] p3=self.tmpFavs[first+cnt:] if id == self.idTop: p2.extend(p1) p2.extend(p3) self.tmpFavs = p2 elif id == self.idBottom: p1.extend(p3) p1.extend(p2) self.tmpFavs = p1 self.grid.DeleteAllItems() for row in range(len(self.tmpFavs)): self.grid.InsertStringItem(row, self.tmpFavs[row][1]) if id == self.idUp: if first: b, e = (first-1, first-1+cnt) elif id == self.idDown: if first+cnt < len(self.tmpFavs): b, e = (first+1,first+1+cnt) elif id == self.idBottom: ln = len(self.tmpFavs) b, e = (ln-cnt, ln) elif id == self.idTop: b, e = (0, cnt) for ix in range(b, e): self.grid.Select(ix, True) self.grid.EnsureVisible(ix) self.Diff() self.Colour() btnUP.Bind(wx.EVT_BUTTON, Move) btnDOWN.Bind(wx.EVT_BUTTON, Move) def onRemDupl(evt): indexes=dict(map(None,[item[0] for item in self.tmpFavs],range(len(self.tmpFavs)))).values() indexes.sort() tmp = [] for ix in indexes: tmp.append(self.tmpFavs[ix]) onRefresh(None, tmp) self.Diff() self.Colour() def onDelete(evt): cnt = self.grid.GetItemCount() for ix in range(cnt-1, -1, -1): if self.grid.IsSelected(ix): self.grid.DeleteItem(ix) self.tmpFavs.pop(ix) EnableCtrls() self.Diff() self.Colour() btnDEL.Bind(wx.EVT_BUTTON, onDelete) def onPlayNow(evt): ix = self.grid.GetFirstSelected() self.plugin.RefreshVariables() sel = self.tmpFavs[ix][1] src = sourceCtrl.GetStringSelection() rsList = [item[1] for item in self.plugin.Favorites] hwnds = HandleRS() indx = None if sel in [item[1] for item in self.plugin.Favorites]: indx = rsList.index(sel) if src != self.plugin.Favorites[indx][0]: indx = None if indx is not None: # start with favorite index if not hwnds: hwnds = self.plugin.GetNewHwnd() if hwnds: SendMessage(hwnds[0], WM_COMMAND, 4102+indx, 0) else: self.FailedToOpen() else: for hwnd in hwnds: x, rec = self.plugin.GetStatusRS([hwnd]) if rec != 1: SendMessage(hwnd, WM_COMMAND, 4102+indx, 0) break if rec or rec is None: hwnds = self.plugin.GetNewHwnd(hwnds) if hwnds: SendMessage(hwnds[0], WM_COMMAND, 4102+indx, 0) else: self.FailedToOpen() else: #start with source="blablabla" if not hwnds: hwnds = self.plugin.GetNewHwnd(hwnds, src=src) if not hwnds: self.FailedToOpen() else: for hwnd in hwnds: x, rec = self.plugin.GetStatusRS([hwnd]) if rec != 1: PostMessage(hwnd, WM_COMMAND, 1, 0) #close i = 0 while hwnd in hwnds and i < 100: hwnds = HandleRS() i += 1 if i == 100: self.PrintError(self.text.message6) rec = 1 else: hwnds = self.plugin.GetNewHwnd(hwnds, src=src) if not hwnds: self.FailedToOpen() rec = 1 else: break if rec or rec is None: hwnds = self.plugin.GetNewHwnd(hwnds, src=src) if not hwnds: self.FailedToOpen() self.grid.Bind(wx.EVT_LIST_ITEM_ACTIVATED, onPlayNow) def AreDuplications(): srcList = [item[0] for item in self.tmpFavs] return len(srcList) > len(set(srcList)) def OnRightClick(evt): if not hasattr(self, "popupID1"): self.popupID1 = wx.NewId() self.popupID2 = wx.NewId() self.Bind(wx.EVT_MENU, onDelete, id = self.popupID1) self.Bind(wx.EVT_MENU, onRemDupl, id = self.popupID2) self.Bind(wx.EVT_MENU, Move, id = self.idUp) self.Bind(wx.EVT_MENU, Move, id = self.idDown) self.Bind(wx.EVT_MENU, Move, id = self.idTop) self.Bind(wx.EVT_MENU, Move, id = self.idBottom) self.Bind(wx.EVT_MENU, onPlayNow, id = self.idPlay) self.Bind(wx.EVT_MENU, onSort, id = self.idSort) self.Bind(wx.EVT_MENU, onRefresh, id = self.idRefr) self.Bind(wx.EVT_MENU, self.onBtnsInOut, id = wx.ID_SAVEAS) self.Bind(wx.EVT_MENU, self.onBtnsInOut, id = wx.ID_SAVE) self.Bind(wx.EVT_MENU, self.onBtnsInOut, id = wx.ID_OPEN) self.Bind(wx.EVT_MENU, self.onBtnsInOut, id = wx.ID_FILE) menu = wx.Menu() if self.grid.GetSelectedItemCount() == 1: menu.Append(self.idPlay, self.text.play) menu.AppendSeparator() menu.Append(self.popupID1, self.text.toolTipDelete) if AreDuplications(): menu.Append(self.popupID2, self.text.removeDupl) if self.grid.GetItemCount() > 1: menu.Append(self.idSort, self.text.sort) if self.menuFlagM: menu.AppendSeparator() menu.Append(self.idUp, self.text.toolTipUp) menu.Append(self.idDown, self.text.toolTipDown) menu.Append(self.idTop, self.text.moveTop) menu.Append(self.idBottom, self.text.moveBottom) menu.AppendSeparator() menu.Append(self.idRefr, self.text.refresh) menu.Append(wx.ID_SAVEAS, self.text.exportSel) menu.Append(wx.ID_SAVE, self.text.exportAll) menu.Append(wx.ID_OPEN, self.text.toolTipImport) menu.Append(wx.ID_FILE, self.text.toolTipImportSR) self.PopupMenu(menu) menu.Destroy() evt.Skip() self.grid.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, OnRightClick) def FailedToOpen(self): return MessageBox( self, self.text.message9, #failed to open self.text.messBoxTit6, wx.ICON_EXCLAMATION, 15, plugin = self.plugin, ) def CreateFavorites(self, dom, node, itmList = None, save = False): max = self.plugin.maxFav mssgs = [] if save: #Duplications check indexes = dict(map(None,[item[0] for item in self.tmpFavs],range(len(self.tmpFavs)))).values() indexes.sort() tmp = [] for ix in indexes: tmp.append(self.tmpFavs[ix]) itmList = range(len(tmp)) if len(self.tmpFavs) > len(tmp): mssgs.append(self.text.message8) else: tmp = self.tmpFavs flag = save and len(itmList) > max if flag: mssgs.append(self.text.message1 % self.plugin.maxFav) if mssgs: MessageBox( self, "\n".join(mssgs), self.plugin.text.messBoxTit1, wx.ICON_EXCLAMATION, plugin = self.plugin, ) elm = 0 for i in itmList: elm += 1 if flag and elm > max: break item = tmp[i] itemNode = dom.createElement(u'Item-%i' % elm) sourceNode = dom.createElement(u'Source') sourceText = dom.createTextNode(unicode(item[0])) sourceNode.appendChild(sourceText) itemNode.appendChild(sourceNode) titleNode = dom.createElement(u'Title') titleText = dom.createTextNode(unicode(item[1])) titleNode.appendChild(titleText) itemNode.appendChild(titleNode) genreNode = dom.createElement(u'Genre') genreText = dom.createTextNode(unicode(item[2])) genreNode.appendChild(genreText) itemNode.appendChild(genreNode) languageNode = dom.createElement(u'Language') languageText = dom.createTextNode(unicode(item[3])) languageNode.appendChild(languageText) itemNode.appendChild(languageNode) countryNode = dom.createElement(u'Country') countryText = dom.createTextNode(unicode(item[4])) countryNode.appendChild(countryText) itemNode.appendChild(countryNode) node.appendChild(itemNode) def UpdateRadioSureXml(self): # create a backup of original file new_file_name = u'%s\\RadioSure.xml' % self.plugin.xmlpath old_file_name = new_file_name + "~" if exists(old_file_name): remove(old_file_name) rename(new_file_name, old_file_name) try: # change Favorites node doc = miniDom.parse(old_file_name) node = doc.getElementsByTagName('XMLConfigSettings')[0] oldFavorites = node.getElementsByTagName('Favorites')[0] newFavorites = doc.createElement(u'Favorites') self.CreateFavorites(doc, newFavorites, save = True) node.replaceChild(newFavorites, oldFavorites) # persist changes to new file f = file(new_file_name, "wb") writer = lookup('utf-8')[3](f) doc.writexml(writer, encoding = 'utf-8') f.close() MessageBox( self, self.text.message5, #updated self.text.messBoxTit5, wx.ICON_INFORMATION, 15, plugin = self.plugin, ) return True except: raise MessageBox( self, self.text.message3, self.plugin.text.messBoxTit1, wx.ICON_EXCLAMATION, plugin = self.plugin, ) if exists(new_file_name): remove(new_file_name) rename(old_file_name, new_file_name) return False def onBtn(self, evt): def UpdateXml(): closeFlag = self.UpdateRadioSureXml() rs = u'%s\\RadioSure.exe' % self.plugin.RadioSurePath rs = rs.encode(FSE) if isinstance(rs, unicode) else rs args = [rs] if isfile(rs): Popen(args) return closeFlag closeFlag = False id = evt.GetId() if id == wx.ID_APPLY or (id == wx.ID_OK and self.favs != self.tmpFavs): hwnds = HandleRS() rec = 0 for hwnd in hwnds: rec = self.plugin.GetStatusRS([hwnd])[1] if rec: break title = self.text.messBoxTit3 if rec else self.text.messBoxTit2 if hwnds: # RS is running ! res = MessageBox( self, self.text.message2 % (self.plugin.text.yes, self.plugin.text.no), title, wx.ICON_EXCLAMATION|wx.YES_NO|wx.YES_DEFAULT, plugin = self.plugin, ) if res == wx.ID_YES: for hwnd in hwnds: rec = self.plugin.GetStatusRS([hwnd])[1] if rec: PostMessage(hwnd, WM_COMMAND, 1051, 0) # Stop Rec i=0 while rec and i < 100: i+=1 rec = self.plugin.GetStatusRS([hwnd])[1] if not rec: PostMessage(hwnd, WM_COMMAND, 1, 0) # Close else: PostMessage(hwnd, WM_COMMAND, 1, 0) # Close i = 0 while hwnds and i < 100: i += 1 hwnds = HandleRS() if hwnds: pid = eg.WinApi.Utils.PyGetWindowThreadProcessId(hwnd)[1] handle = _kernel32.OpenProcess(PROCESS_TERMINATE, False, pid) succ = _kernel32.TerminateProcess(handle, -1) _kernel32.CloseHandle(handle) if not succ: MessageBox( self, self.text.message6, #failed to close self.text.messBoxTit6, wx.ICON_EXCLAMATION, 15, plugin = self.plugin, ) else: closeFlag = UpdateXml() else: closeFlag = UpdateXml() else: MessageBox( self, self.text.message7, #no update self.text.messBoxTit7, wx.ICON_EXCLAMATION, 15, plugin = self.plugin, ) else: closeFlag = self.UpdateRadioSureXml() if id == wx.ID_APPLY and closeFlag: self.favs = cpy(self.tmpFavs) self.Diff() if id != wx.ID_APPLY: if id != wx.ID_OK or closeFlag or self.favs == self.tmpFavs: self.Close() #evt.Skip() def Import(self, data): # ToDo: Add check of duplications ??? self.tmpFavs.extend(data) self.grid.DeleteAllItems() for row in range(len(self.tmpFavs)): self.grid.InsertStringItem(row, self.tmpFavs[row][1]) self.grid.SetColumnWidth(0, -1) self.grid.SetColumnWidth(0, self.grid.GetColumnWidth(0) + 6) self.grid.EnsureVisible(len(self.tmpFavs)-1) self.grid.SetFocus() self.Colour() self.Diff() def Colour(self): maxF = self.plugin.maxFav cnt = self.grid.GetItemCount() fore = self.grid.GetTextColour() for row in range(min(maxF, cnt)): item = self.grid.GetItem(row) item.SetTextColour(fore) self.grid.SetItem(item) if maxF >= cnt: return for row in range(maxF, cnt): item = self.grid.GetItem(row) item.SetTextColour("red") self.grid.SetItem(item) def onBtnsInOut(self, evt): id = evt.GetId() if id == wx.ID_SAVEAS or id == wx.ID_SAVE: dlg = wx.FileDialog( self, message = self.text.save, defaultDir = self.plugin.xmlpath, defaultFile = "Favorites.xml", wildcard = self.text.wildcard, style=wx.SAVE ) if dlg.ShowModal() == wx.ID_OK: self.Export(dlg.GetPath(), id) dlg.Destroy() elif id == wx.ID_OPEN: # Import dlg = wx.FileDialog( self, message = self.text.choose, defaultDir = self.plugin.xmlpath, defaultFile = "*.xml", wildcard = self.text.wildcard, style = wx.OPEN | wx.CHANGE_DIR ) flg = True filePath = None if dlg.ShowModal() == wx.ID_OK: filePath = dlg.GetPath() dlg.Destroy() xmldoc = miniDom.parse(filePath) document = xmldoc.getElementsByTagName('Favorites') if len(document) > 0: stations = getStations(document[0]) if stations: flg = False self.Import(stations) if flg and filePath: MessageBox( self, self.text.message4 % split(filePath)[1], self.plugin.text.messBoxTit1, wx.ICON_EXCLAMATION, plugin = self.plugin, ) elif id == wx.ID_FILE: # Import SR dlg = wx.FileDialog( self, message = self.text.choose, defaultDir = eg.folderPath.ProgramFiles+'\\Screamer', defaultFile = "favorites.xml", wildcard = self.text.wildcard, style = wx.OPEN | wx.CHANGE_DIR ) if dlg.ShowModal() == wx.ID_OK: filePath = dlg.GetPath() dlg.Destroy() stations = self.ImportSR(filePath) if not stations: MessageBox( self, self.text.message4 % split(filePath)[1], self.plugin.text.messBoxTit1, wx.ICON_EXCLAMATION, plugin = self.plugin, ) else: self.Import(stations) evt.Skip() return def Diff(self): wx.FindWindowById(wx.ID_APPLY).Enable(self.favs != self.tmpFavs) def onClose(self, evt): hwnd = self.GetHandle() wp = GetWindowPlacement(hwnd)[4] #Note: GetPosition() return (-32000, -32000), if window is minimized !!! plcmnt = ( (wp[0], wp[1]), # pos (wp[2] - wp[0], wp[3] - wp[1]), # size (self.GetMinSize().GetWidth(),self.GetMinSize().GetHeight()) # min size ) if plcmnt != ConfigData.plcmnt: ConfigData.plcmnt = plcmnt #if not eg.document.IsDirty(): # wx.CallAfter(eg.Notify, "DocumentChange", True) self.Show(False) self.plugin.manager = None self.Destroy() evt.Skip() def ImportSR(self, filePath): xmldoc = miniDom.parse(filePath) document = xmldoc.getElementsByTagName('Screamer') if len(document) > 0: res = [] stations = tuple(document[0].getElementsByTagName('Station')) for station in stations: if "title" in station.attributes.keys(): title = station.attributes["title"].value else: return None src = station.getElementsByTagName('Source') if len(src)>0: src = src[0].firstChild.data i = -1 for ix in range(5, 11): srcIx = [itm[ix] for itm in self.data] if src in srcIx: i = srcIx.index(src) break if i > -1: station = self.data[i] itm = (src, station[0], station[2], station[4], station[3]) else: itm = (src, title, "-", "-", "-") res.append(itm) else: return None return res return None def Export(self, path, id): impl = miniDom.getDOMImplementation() dom = impl.createDocument(None, u'XMLConfigSettings', None) root = dom.documentElement commentNode = dom.createComment(self.text.xmlComment1) dom.insertBefore(commentNode, root) commentNode = dom.createComment(self.text.xmlComment2 % str(dt.now())[:19]) dom.insertBefore(commentNode, root) favNode = dom.createElement(u'Favorites') root.appendChild(favNode) if id == wx.ID_SAVEAS and self.grid.GetSelectedItemCount(): itmList = [itm for itm in range(len(self.tmpFavs)) if self.grid.IsSelected(itm)] else: itmList = range(len(self.tmpFavs)) self.CreateFavorites(dom, favNode, itmList) f = file(path, 'wb') writer = lookup('utf-8')[3](f) dom.writexml(writer, encoding = 'utf-8') f.close() #=============================================================================== class SchedulerDialog(wx.Dialog): lastRow = -1 applyBttn = None def __init__(self, text, plugin): wx.Dialog.__init__( self, None, -1, text.dialogTitle % version, style = wx.DEFAULT_DIALOG_STYLE|wx.MINIMIZE_BOX|wx.CLOSE_BOX, ) #import locale as l #l.setlocale(l.LC_ALL, "us") # only for testing bttns = [] self.ctrls=[] self.plugin = plugin self.SetIcon(self.plugin.info.icon.GetWxIcon()) self.plugin.dialog = self self.tmpData = self.plugin.tmpData = cpy(self.plugin.data) self.text = text def fillDynamicSizer(type, data = None, old_type = 255): flag = old_type != type if flag: dynamicSizer.Clear(True) self.ctrls=[] self.ctrls.append(wx.NewId()) self.ctrls.append(wx.NewId()) if type == -1: return if type != 1 and flag: topSizer = wx.StaticBoxSizer( wx.StaticBox(self, -1, self.text.chooseDay), wx.HORIZONTAL ) if type == 0: if flag: self.ctrls.append(wx.NewId()) dp = wx.DatePickerCtrl(self, self.ctrls[2], size = (86, -1), style = wx.DP_DROPDOWN | wx.DP_SHOWCENTURY) topSizer.Add(dp,0,wx.EXPAND) self.ctrls.append(wx.NewId()) yearlyCtrl = wx.CheckBox(self, self.ctrls[3], self.text.yearly) topSizer.Add(yearlyCtrl, 0, wx.EXPAND|wx.LEFT, 30) dynamicSizer.Add(topSizer, 0, wx.EXPAND|wx.TOP, 2) else: dp = wx.FindWindowById(self.ctrls[2]) yearlyCtrl = wx.FindWindowById(self.ctrls[3]) if data: if not data[2]: val = wx.DateTime_Now() data[2] = str(dt.now())[:10] wxDttm = wx.DateTime() wxDttm.Set( int(data[2][8:10]), int(data[2][5:7]) - 1, int(data[2][:4]) ) dp.SetValue(wxDttm) yearlyCtrl.SetValue(data[3]) elif type == 2: if flag: if self.plugin.first_day: choices = list(day_name)[:-1] choices.insert(0, list(day_name)[-1]) else: choices = list(day_name) self.ctrls.append(wx.NewId()) weekdayCtrl = wx.CheckListBox( self, self.ctrls[2], choices = choices, size=((-1,110)), ) self.ctrls.append(wx.NewId()) holidCheck_2 = wx.CheckBox( self, self.ctrls[3], self.text.holidCheck_2 ) self.ctrls.append(wx.NewId()) holidCheck_1 = wx.CheckBox( self, self.ctrls[4], self.text.holidCheck_1 ) topSizer.Add((40,1), 0, wx.ALIGN_CENTER) topSizer.Add( wx.StaticText( self, -1, self.text.theEvery ), 0, wx.ALIGN_CENTER | wx.RIGHT, 10 ) topSizer.Add(weekdayCtrl, 0, wx.TOP) dynamicSizer.Add(topSizer, 0, wx.EXPAND | wx.TOP,2) dynamicSizer.Add(holidCheck_1, 0, wx.TOP, 2) dynamicSizer.Add(holidCheck_2, 0, wx.TOP, 2) else: weekdayCtrl = wx.FindWindowById(self.ctrls[2]) holidCheck_2 = wx.FindWindowById(self.ctrls[3]) holidCheck_1 = wx.FindWindowById(self.ctrls[4]) val = 127 if not data else data[2] if self.plugin.first_day: exp = [6, 0, 1, 2, 3, 4, 5] else: exp = [0, 1, 2, 3, 4, 5, 6] for i in range(7): weekdayCtrl.Check(i, bool(val & (2 ** exp[i]))) enable = val & 31 and not val & 96 holidCheck_1.Enable(enable) check = 0 if (not data or not enable) else data[4] holidCheck_1.SetValue(check) enable = val & 96 and not val & 31 holidCheck_2.Enable(enable) check = 0 if (not data or not enable) else data[3] holidCheck_2.SetValue(check) elif type == 3: # Monthly/weekday ... if flag: dateSizer = wx.BoxSizer(wx.HORIZONTAL) dateSizer.Add( wx.StaticText( self, -1, self.text.the ), 0, wx.ALIGN_CENTER ) topSizer.Add(dateSizer, 0, wx.EXPAND) dynamicSizer.Add(topSizer, 0, wx.EXPAND | wx.TOP,2) self.ctrls.append(wx.NewId()) serialCtrl = wx.CheckListBox( self, self.ctrls[2], choices = self.text.serial_num, size = ((-1, 95)), ) dateSizer.Add(serialCtrl, 0, wx.ALIGN_CENTER | wx.LEFT, 10) if self.plugin.first_day: choices = list(day_name)[0:-1] choices.insert(0, list(day_name)[-1]) else: choices = list(day_name) self.ctrls.append(wx.NewId()) weekdayCtrl = wx.CheckListBox( self, self.ctrls[3], choices = choices, size = ((-1, 110)), ) dateSizer.Add(weekdayCtrl, 0, wx.ALIGN_CENTER | wx.LEFT, 10) dateSizer.Add( wx.StaticText( self, -1, self.text.in_ ), 0, wx.ALIGN_CENTER | wx.LEFT, 10 ) self.ctrls.append(wx.NewId()) monthsCtrl_1 = wx.CheckListBox( self, self.ctrls[4], choices = list(month_name)[1:7], size = ((-1, 95)), ) dateSizer.Add(monthsCtrl_1, 0, wx.ALIGN_CENTER | wx.LEFT, 10) self.ctrls.append(wx.NewId()) monthsCtrl_2 = wx.CheckListBox( self, self.ctrls[5], choices = list(month_name)[7:], size = ((-1, 95)), ) dateSizer.Add(monthsCtrl_2, 0, wx.ALIGN_CENTER | wx.LEFT, -1) self.ctrls.append(wx.NewId()) holidCheck_1 = wx.CheckBox( self, self.ctrls[6], self.text.holidCheck_1 ) dynamicSizer.Add(holidCheck_1, 0, wx.TOP, 2) else: serialCtrl = wx.FindWindowById(self.ctrls[2]) weekdayCtrl = wx.FindWindowById(self.ctrls[3]) monthsCtrl_1 = wx.FindWindowById(self.ctrls[4]) monthsCtrl_2 = wx.FindWindowById(self.ctrls[5]) holidCheck_1 = wx.FindWindowById(self.ctrls[6]) val = 0 if not data else data[2] for i in range(6): serialCtrl.Check(i, bool(val & (2 ** i))) val = 0 if not data else data[3] if self.plugin.first_day: exp = [6, 0, 1, 2, 3, 4, 5] else: exp = [0, 1, 2, 3, 4, 5, 6] for i in range(7): weekdayCtrl.Check(i, bool(val & (2 ** exp[i]))) enable = val & 31 and not val & 96 holidCheck_1.Enable(enable) val = 63 if not data else data[4] for i in range(6): monthsCtrl_1.Check(i, bool(val & (2 ** i))) val = 63 if not data else data[5] for i in range(6): monthsCtrl_2.Check(i, bool(val & (2 ** i))) check = 0 if (not data or not enable) else data[6] holidCheck_1.SetValue(check) elif type == 4: # Monthly/day ... if flag: dateSizer = wx.BoxSizer(wx.HORIZONTAL) topSizer.Add(dateSizer, 0, wx.EXPAND) dynamicSizer.Add(topSizer, 0, wx.EXPAND | wx.TOP, 2) self.ctrls.append(wx.NewId()) q_1_Ctrl = wx.CheckListBox( self, self.ctrls[2], choices = [str(i) + '.' for i in range(1, 9)], size = ((40, 125)), ) dateSizer.Add(q_1_Ctrl, 0, wx.LEFT, 5) self.ctrls.append(wx.NewId()) q_2_Ctrl = wx.CheckListBox( self, self.ctrls[3], choices = [str(i) + '.' for i in range(9, 17)], size = ((46, 125)), ) dateSizer.Add(q_2_Ctrl, 0, wx.LEFT, -1) self.ctrls.append(wx.NewId()) q_3_Ctrl = wx.CheckListBox( self, self.ctrls[4], choices = [str(i) + '.' for i in range(17, 25)], size = ((46, 125)), ) dateSizer.Add(q_3_Ctrl, 0, wx.LEFT, -1) self.ctrls.append(wx.NewId()) q_4_Ctrl = wx.CheckListBox( self, self.ctrls[5], choices = [str(i) + '.' for i in range(25, 32)], size = ((46, 125)), ) dateSizer.Add(q_4_Ctrl, 0, wx.LEFT, -1) dateSizer.Add((-1, 1), 1, wx.EXPAND) self.ctrls.append(wx.NewId()) monthsCtrl_1 = wx.CheckListBox( self, self.ctrls[6], choices = list(month_name)[1:7], size = ((-1, 95)), ) dateSizer.Add(monthsCtrl_1, 0, wx.ALIGN_CENTER | wx.LEFT, 10) self.ctrls.append(wx.NewId()) monthsCtrl_2 = wx.CheckListBox( self, self.ctrls[7], choices = list(month_name)[7:], size = ((-1, 95)), ) dateSizer.Add(monthsCtrl_2, 0, wx.ALIGN_CENTER | wx.LEFT, -1) dateSizer.Add((5, 1), 0) else: q_1_Ctrl = wx.FindWindowById(self.ctrls[2]) q_2_Ctrl = wx.FindWindowById(self.ctrls[3]) q_3_Ctrl = wx.FindWindowById(self.ctrls[4]) q_4_Ctrl = wx.FindWindowById(self.ctrls[5]) monthsCtrl_1 = wx.FindWindowById(self.ctrls[6]) monthsCtrl_2 = wx.FindWindowById(self.ctrls[7]) val = 0 if not data else data[2] for i in range(8): q_1_Ctrl.Check(i, bool(val & (2 ** i))) val = 0 if not data else data[3] for i in range(8): q_2_Ctrl.Check(i, bool(val & (2 ** i))) val = 0 if not data else data[4] for i in range(8): q_3_Ctrl.Check(i, bool(val & (2 ** i))) val = 0 if not data else data[5] for i in range(7): q_4_Ctrl.Check(i, bool(val & (2 ** i))) val = 63 if not data else data[6] for i in range(6): monthsCtrl_1.Check(i, bool(val & (2 ** i))) val = 63 if not data else data[7] for i in range(6): monthsCtrl_2.Check(i, bool(val & (2 ** i))) elif type == 5: if flag: self.ctrls.append(wx.NewId()) dp = wx.DatePickerCtrl(self, self.ctrls[2], size = (86, -1), style = wx.DP_DROPDOWN | wx.DP_SHOWCENTURY) topSizer.Add(dp, 0, wx.EXPAND) dynamicSizer.Add(topSizer, 0, wx.EXPAND | wx.TOP, 2) else: dp = wx.FindWindowById(self.ctrls[2]) if data: if not data[2]: val = wx.DateTime_Now() data[2] = str(dt.now())[:10] wxDttm = wx.DateTime() wxDttm.Set( int(data[2][8:10]), int(data[2][5:7])-1, int(data[2][:4]) ) dp.SetValue(wxDttm) #elif type == 1: # daily # pass if flag: timeSizer = wx.GridBagSizer(0, 0) bottomSizer = wx.StaticBoxSizer( wx.StaticBox(self, -1, self.text.chooseTime6 if type == 6 else self.text.chooseTime), wx.HORIZONTAL ) dynamicSizer.Add(bottomSizer, 0, wx.EXPAND | wx.TOP, 16 if type != 2 else 5) bottomSizer.Add(timeSizer, 0, wx.EXPAND) stEvLbl = wx.StaticText(self, -1, self.text.start) timeSizer.Add(stEvLbl, (0, 0), (1, 2)) durLabel = wx.StaticText(self, -1, self.text.length) timeSizer.Add(durLabel, (0, 3), (1, 2)) spinBtn = wx.SpinButton( self, -1, wx.DefaultPosition, (-1, 22), wx.SP_VERTICAL ) initTime = wx.DateTime_Now() initTime.SetSecond(0) initTime.AddTS(wx.TimeSpan.Minute()) val = data[0] if data and data[0] else initTime timeCtrl = eg.TimeCtrl( self, self.ctrls[0], val, fmt24hr = True, spinButton = spinBtn ) timeSizer.Add(timeCtrl, (1, 0), (1, 1)) timeSizer.Add(spinBtn, (1, 1), (1, 1)) timeSizer.Add((40, -1), (1, 2), (1, 1)) spinBtn2 = wx.SpinButton( self, -1, wx.DefaultPosition, (-1, 22), wx.SP_VERTICAL ) val = data[1] if data and data[1] else "00:00" lenCtrl = eg.TimeCtrl_Duration( self, self.ctrls[1], val, fmt24hr = True, spinButton = spinBtn2, displaySeconds = False ) timeSizer.Add(lenCtrl, (1, 3), (1, 1)) timeSizer.Add(spinBtn2, (1, 4), (1, 1)) bottomSizer.Add((-1,-1), 1, wx.EXPAND) testBttn = wx.Button( self, -1 if len(bttns) == 0 else bttns[-1], self.text.testButton ) bottomSizer.Add(testBttn, 0, wx.EXPAND | wx.RIGHT) else: timeCtrl = wx.FindWindowById(self.ctrls[0]) val = data[0] if data and data[0] else wx.DateTime_Now() timeCtrl.SetValue(val) lenCtrl = wx.FindWindowById(self.ctrls[1]) val = data[1] if data and data[1] else "00:00" lenCtrl.SetValue(val) if type == 5: #periodically if flag: bottomSizer = wx.StaticBoxSizer( wx.StaticBox(self, -1, self.text.choosePeriod), wx.HORIZONTAL ) self.ctrls.append(wx.NewId()) numCtrl = MySpinIntCtrl(self, -1, value = 1, min = 1) numCtrl.SetNumCtrlId(self.ctrls[3]) bottomSizer.Add( wx.StaticText( self, -1, self.text.andThenEvery ), 0, wx.ALIGN_CENTER ) bottomSizer.Add(numCtrl, 0, wx.LEFT, 4) self.ctrls.append(wx.NewId()) unitCtrl = wx.Choice( self, self.ctrls[4], choices = self.text.units ) bottomSizer.Add(unitCtrl, 0, wx.LEFT, 8) dynamicSizer.Add(bottomSizer, 0, wx.EXPAND|wx.TOP, 16) dynamicSizer.Layout() else: numCtrl = wx.FindWindowById(self.ctrls[3]) unitCtrl = wx.FindWindowById(self.ctrls[4]) if data: numCtrl.SetValue(str(data[3])) unitCtrl.SetSelection(data[4]) elif flag: dynamicSizer.Layout() if type == 6: stEvLbl.Show(False) timeCtrl.Show(False) spinBtn.Show(False) return dynamicSizer.GetMinSize()[0] def Diff(): applyBttn = wx.FindWindowById(bttns[5]) flg = self.tmpData != self.plugin.data applyBttn.Enable(flg) def onCheckListBox(evt): id = evt.GetId() sel = evt.GetSelection() box = self.FindWindowById(id) ix = self.ctrls.index(id) type = self.tmpData[self.lastRow][2] cond = (type == 2 and ix == 2) or (type == 3 and ix == 3) if cond and self.plugin.first_day: exp = (6, 0, 1, 2, 3, 4, 5)[sel] else: exp = sel if box.IsChecked(sel): self.tmpData[self.lastRow][3][ix] |= 2 ** exp else: self.tmpData[self.lastRow][3][ix] &= 255 - 2 ** exp if cond: holidCheck_1 = wx.FindWindowById(self.ctrls[-1]) val = self.tmpData[self.lastRow][3][ix] flg = val & 31 and not val & 96 holidCheck_1.Enable(flg) if not flg: holidCheck_1.SetValue(0) self.tmpData[self.lastRow][3][-1] = 0 if type == 2: holidCheck_2 = wx.FindWindowById(self.ctrls[3]) val = self.tmpData[self.lastRow][3][2] flg = val & 96 and not val & 31 holidCheck_2.Enable(flg) if not flg: holidCheck_2.SetValue(0) self.tmpData[self.lastRow][3][3] = 0 next = self.plugin.NextRun( self.tmpData[self.lastRow][2], self.tmpData[self.lastRow][3] ) grid.SetStringItem(self.lastRow, 3, next) Diff() def OnTimeChange(evt): ix = self.ctrls.index(evt.GetId()) self.tmpData[self.lastRow][3][ix] = evt.GetValue() next = self.plugin.NextRun( self.tmpData[self.lastRow][2], self.tmpData[self.lastRow][3] ) grid.SetStringItem(self.lastRow, 3, next) Diff() def onPeriodUnit(evt): if len(self.ctrls) == 5 and evt.GetId() == self.ctrls[4]: self.tmpData[self.lastRow][3][4] = evt.GetSelection() next = self.plugin.NextRun( self.tmpData[self.lastRow][2], self.tmpData[self.lastRow][3] ) grid.SetStringItem(self.lastRow, 3, next) else: evt.Skip() Diff() def onDatePicker(evt): val = str(dt.fromtimestamp(evt.GetDate().GetTicks()))[:10] self.tmpData[self.lastRow][3][2] = val next = self.plugin.NextRun( self.tmpData[self.lastRow][2], self.tmpData[self.lastRow][3] ) grid.SetStringItem(self.lastRow, 3, next) Diff() def onCheckBox(evt): val = evt.IsChecked() ix = self.ctrls.index(evt.GetId()) if self.tmpData[self.lastRow][2] == 2 and ix == 3: self.tmpData[self.lastRow][3][3] = int(val) else: self.tmpData[self.lastRow][3][-1] = int(val) next = self.plugin.NextRun( self.tmpData[self.lastRow][2], self.tmpData[self.lastRow][3] ) grid.SetStringItem(self.lastRow, 3, next) Diff() def OnUpdateDialog(evt): if self.lastRow == evt.GetId(): OpenSchedule() def OnSelectCell(evt): self.lastRow = evt.m_itemIndex OpenSchedule() Diff() evt.Skip() # necessary !!! def enableBttns(value): for i in (1, 2): bttn = self.FindWindowById(bttns[i]) bttn.Enable(value) Diff() def ShowMessageBox(mess): MessageBox( self, mess, self.text.boxTitle, wx.ICON_EXCLAMATION, plugin = self.plugin ) def FindNewTitle(title): tmpLst = [] for item in self.tmpData: if item[1].startswith(title + " ("): tmpLst.append(item[1][2 + len(title):]) if len(tmpLst) == 0: return "%s (1)" % title tmpLst2 = [] for item in tmpLst: if item[-1] == ")": try: tmpLst2.append(int(item[:-1])) except: pass if len(tmpLst2) == 0: return "%s (1)" % title else: return "%s (%i)" % (title, 1 + max(tmpLst2)) def testValidity(data, test = False): mssgs = [] tempDict = dict([(item[1].strip(), item[2]) for item in data]) if "" in tempDict.iterkeys(): mssgs.append(self.text.boxTexts[0]) if not test and len(tempDict) < len(data): mssgs.append(self.text.boxTexts[1]) if -1 in tempDict.itervalues(): mssgs.append(self.text.boxTexts[4]) for item in data: val = item[7] if (val & 6) == 6: # = Do nothing if not val & 24: if not self.text.boxTexts[3] in mssgs: mssgs.append(self.text.boxTexts[3]) else: # Not "Do nothing" if not item[5]: if not self.text.boxTexts[2] in mssgs: mssgs.append(self.text.boxTexts[2]) if item[2] == 5 and item[3][4] < 2: period = item[3][3] * (3600, 86400)[item[3][4]] span = 60 * int(item[3][1][3:]) + 3600 * int(item[3][1][:2]) if period <= span: if self.text.boxTexts[5] not in mssgs: mssgs.append(self.text.boxTexts[5]) flag = len(mssgs) > 0 if flag: ShowMessageBox("\n".join(mssgs)) return flag def addSchedule(evt = None): empty = [1, "", -1, [], " ", "", "", 5] self.lastRow = len(self.tmpData) self.tmpData.append(empty) Tidy() grid.AppendRow() grid.SelRow(self.lastRow) if not self.lastRow: enableBttns(True) EnableCtrls(True) Diff() def duplSchedule(evt = None): lngth = len(self.tmpData) item = cpy(self.tmpData[self.lastRow]) nxt = grid.GetItem(self.lastRow, 3).GetText() item[4] = "" self.lastRow = lngth self.tmpData.append(item) newTitle = FindNewTitle(self.tmpData[lngth][1]) self.tmpData[lngth][1] = newTitle grid.AppendRow() grid.SelRow(lngth) grid.SetStringItem(lngth, 1, newTitle) grid.SetStringItem(lngth, 3, nxt) OpenSchedule() Diff() def delSchedule(evt = None): self.tmpData.pop(self.lastRow) grid.DeleteItem(self.lastRow) if len(self.tmpData) > 0: if self.lastRow == len(self.tmpData): self.lastRow -= 1 OpenSchedule() grid.SelRow(self.lastRow) else: self.lastRow = -1 Tidy() EnableCtrls(False) enableBttns(False) Diff() def Move(direction): lst = cpy(self.tmpData) index = self.lastRow max = len(lst)-1 #Last to first position, other down if index == max and direction == 1: self.tmpData[1:] = lst[:-1] self.tmpData[0] = lst[max] index2 = 0 #First to last position, other up elif index == 0 and direction == -1: self.tmpData[:-1] = lst[1:] self.tmpData[max] = lst[0] index2 = max else: index2 = index + direction self.tmpData[index] = lst[index2] self.tmpData[index2] = lst[index] del lst return index2 def moveUp(evt = None): newSel = Move(-1) fillGrid(False) self.grid.SelRow(newSel) Diff() def moveDown(evt = None): newSel = Move(1) fillGrid(False) self.grid.SelRow(newSel) Diff() def onButton(evt): id = evt.GetId() if id == bttns[0]: # Add new addSchedule() elif id == bttns[1]: # Duplicate duplSchedule() elif id == bttns[2]: # Delete delSchedule() elif id == bttns[3]: # OK if testValidity(self.tmpData): evt.Skip() return self.plugin.data = cpy(self.tmpData) self.tmpData = [] self.plugin.dataToXml() self.plugin.UpdateEGscheduler() self.Close() elif id == bttns[4]: # Cancel self.tmpData = [] self.Close() elif id == bttns[5]: # Apply applyBttn = wx.FindWindowById(bttns[5]) applyBttn.Enable(False) if testValidity(self.tmpData): evt.Skip() return self.plugin.data = cpy(self.tmpData) self.plugin.dataToXml() self.plugin.UpdateEGscheduler() evt.Skip() def EnableCtrls(value): typeChoice.Enable(value) schedulerName.Enable(value) name_label.Enable(value) type_label.Enable(value) favorite_label.Enable(value) workModeLbl.Enable(value) triggEvtLbl.Enable(value) windOpenLbl.Enable(value) source_label.Enable(value) filename_label.Enable(value) favChoice.Enable(value) sourceCtrl.Enable(value) recordCtrl.Enable(value) workModeCtrl.Enable(value) triggEvtCtrl.Enable(value) windOpenCtrl.Enable(value) if not value: workModeCtrl.SetSelection(-1) triggEvtCtrl.SetSelection(-1) windOpenCtrl.SetSelection(-1) def OpenSchedule(): schedulerName.ChangeValue(self.tmpData[self.lastRow][1]) type = self.tmpData[self.lastRow][2] fillDynamicSizer( type, self.tmpData[self.lastRow][3], typeChoice.GetSelection() ) typeChoice.SetSelection(type) modes = self.tmpData[self.lastRow][7] rsMode = (modes>>1)&3 workModeCtrl.SetSelection(rsMode) recordCtrl.GetTextCtrl().ChangeValue(self.tmpData[self.lastRow][6]) sourceCtrl.SetValue(self.tmpData[self.lastRow][5]) if rsMode == 3: windOpenCtrl.SetSelection(-1) windOpenCtrl.Enable(False) windOpenLbl.Enable(False) else: windOpenCtrl.SetSelection(modes&1) windOpenCtrl.Enable(True) windOpenLbl.Enable(True) triggEvtCtrl.SetSelection((modes>>3)&3) def Tidy(): favChoice.SetSelection(-1) typeChoice.SetSelection(-1) windOpenCtrl.SetSelection(1) workModeCtrl.SetSelection(2) triggEvtCtrl.SetSelection(0) sourceCtrl.ChangeValue("") recordCtrl.GetTextCtrl().ChangeValue("") schedulerName.ChangeValue("") fillDynamicSizer(-1) filename_label.Enable(True) recordCtrl.Enable(True) def onCheckListCtrl(evt): index, flag = evt.GetValue() if self.tmpData[index][0] != int(flag): self.tmpData[index][0] = int(flag) Diff() def onSchedulerTitle(evt): txt = evt.GetString() grid.SetStringItem(self.lastRow, 1, txt) self.tmpData[self.lastRow][1] = txt Diff() def onPeriodNumber(evt): if len(self.ctrls) == 5 and evt.GetId() == self.ctrls[3]: self.tmpData[self.lastRow][3][3] = int(evt.GetString()) next = self.plugin.NextRun( self.tmpData[self.lastRow][2], self.tmpData[self.lastRow][3] ) grid.SetStringItem(self.lastRow, 3, next) Diff() else: evt.Skip() def onTestButton(evt): data = self.tmpData[self.lastRow] if testValidity([data,], True): return ticks = mktime(localtime()) next, cmdline = self.plugin.Execute(data, True) next = next[:19] if next else self.plugin.text.none self.plugin.updateLogFile(self.text.testRun % (data[1], next)) self.plugin.updateLogFile(self.plugin.text.cmdLine % cmdline) def OnRightClick(evt): if not hasattr(self, "popupID1"): self.popupID1 = wx.NewId() self.popupID2 = wx.NewId() self.popupID3 = wx.NewId() self.popupID4 = wx.NewId() self.popupID5 = wx.NewId() self.popupID6 = wx.NewId() self.popupID7 = wx.NewId() self.Bind(wx.EVT_MENU, addSchedule, id=self.popupID1) self.Bind(wx.EVT_MENU, duplSchedule, id=self.popupID2) self.Bind(wx.EVT_MENU, delSchedule, id=self.popupID3) self.Bind(wx.EVT_MENU, self.EnableAll, id=self.popupID4) self.Bind(wx.EVT_MENU, self.DisableAll, id=self.popupID5) self.Bind(wx.EVT_MENU, moveUp, id=self.popupID6) self.Bind(wx.EVT_MENU, moveDown, id=self.popupID7) # make a menu menu = wx.Menu() menu.Append(self.popupID1, self.text.popup[0]) menu.Append(self.popupID2, self.text.popup[1]) menu.Append(self.popupID3, self.text.popup[2]) menu.AppendSeparator() menu.Append(self.popupID4, self.text.popup[3]) menu.Append(self.popupID5, self.text.popup[4]) menu.AppendSeparator() menu.Append(self.popupID6, self.text.popup[5]) menu.Append(self.popupID7, self.text.popup[6]) self.PopupMenu(menu) menu.Destroy() evt.Skip() def fillGrid(flag): grid.DeleteAllItems() rows = len(self.tmpData) if rows > 0: for row in range(rows): grid.InsertStringItem(row, "") if self.tmpData[row][0]: grid.CheckItem(row) grid.SetStringItem(row, 1, self.tmpData[row][1]) grid.SetStringItem(row, 2, self.tmpData[row][4]) next = self.plugin.NextRun(self.tmpData[row][2], self.tmpData[row][3]) grid.SetStringItem(row, 3, next) if flag: self.lastRow = 0 grid.SelRow(0) OpenSchedule() enableBttns(True) else: EnableCtrls(False) grid.DeleteItem(0) dynamicSizer = wx.BoxSizer(wx.VERTICAL) wDynamic = fillDynamicSizer(3) fillDynamicSizer(-1) self.SetSize(wx.Size(wDynamic + 37, 684)) grid = self.grid = CheckListCtrl(self, text, wDynamic + 20) # grid = self.grid = CheckListCtrl(self, text, wDynamic + 20) sizer = wx.BoxSizer(wx.VERTICAL) sizer.Add(grid, 0, wx.ALL, 5) favorite_label = wx.StaticText(self, -1, self.text.favorite) workModeLbl = wx.StaticText(self, -1, self.text.workModeLabel) workModeCtrl = wx.Choice(self, -1, choices = self.text.workModes) triggEvtLbl = wx.StaticText(self, -1, self.text.triggEvtLabel) triggEvtCtrl = wx.Choice(self, -1, choices = self.text.triggEvtChoices) windOpenLbl = wx.StaticText(self, -1, self.text.windOpenLabel) windOpenCtrl = wx.Choice(self, -1, choices = self.text.windOpenChoices) source_label = wx.StaticText(self, -1, self.text.source) self.favorites = self.plugin.RefreshVariables() favChoice = wx.Choice(self, -1, choices = [item[1] for item in self.favorites]) sourceCtrl = wx.TextCtrl(self,-1,"") filename_label = wx.StaticText(self, -1, self.text.filename) schedulerName = wx.TextCtrl(self, -1, "") typeChoice = wx.Choice(self, -1, choices = self.text.sched_type) xmltoparse = u'%s\\RadioSure.xml' % self.plugin.xmlpath xmltoparse = xmltoparse.encode(FSE) if isinstance(xmltoparse, unicode) else xmltoparse xmldoc = miniDom.parse(xmltoparse) recordings = xmldoc.getElementsByTagName('Recordings') if not recordings: folder = u'%s\\RadioSure Recordings' % self.plugin.xmlpath else: folder = recordings[0].getElementsByTagName('Folder')[0].firstChild.data recordCtrl = MyFileBrowseButton( self, toolTip = self.text.toolTipFile, dialogTitle = self.text.browseTitle, buttonText = eg.text.General.browse, startDirectory = folder ) self.grid.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, OnRightClick) def onSource(evt): src = evt.GetString() srcs = [item[0] for item in self.favorites] if src in srcs: ix = srcs.index(src) else: ix = -1 favChoice.SetSelection(ix) self.tmpData[self.lastRow][5] = src Diff() evt.Skip() sourceCtrl.Bind(wx.EVT_TEXT, onSource) def onFavChoice(evt): sel = evt.GetSelection() txt = self.favorites[sel][0] sourceCtrl.ChangeValue(txt) self.tmpData[self.lastRow][5] = txt Diff() evt.Skip() favChoice.Bind(wx.EVT_CHOICE, onFavChoice) def onRecordCtrl(evt): txt = evt.GetString() self.tmpData[self.lastRow][6] = txt Diff() evt.Skip() recordCtrl.GetTextCtrl().Bind(wx.EVT_TEXT, onRecordCtrl) def onTypeChoice(evt): type = evt.GetSelection() if self.tmpData[self.lastRow][2] != type: empty_data = [ ["", "", 0, 0], ["", ""], ["", "", 127, 0, 0], ["", "", 0, 0, 63, 63, 0], ["", "", 0, 0, 0, 0, 63, 63], ["", "", 0, 1, 0], ] self.tmpData[self.lastRow][2] = type data = empty_data[self.tmpData[self.lastRow][2]] self.tmpData[self.lastRow][3] = data fillDynamicSizer(type, data) Diff() def onWorkMode(evt): sel = evt.GetSelection() if sel == 3: windOpenCtrl.SetSelection(-1) windOpenCtrl.Enable(False) windOpenLbl.Enable(False) if triggEvtCtrl.GetSelection() == 0: ShowMessageBox(self.text.boxTexts[3]) else: if windOpenCtrl.GetSelection() == -1: windOpenCtrl.SetSelection(1) windOpenCtrl.Enable(True) windOpenLbl.Enable(True) val = self.tmpData[self.lastRow][7] val &= (255-6) val |= (sel<<1) self.tmpData[self.lastRow][7] = val Diff() workModeCtrl.Bind(wx.EVT_CHOICE, onWorkMode) def onWindOpen(evt): sel = evt.GetSelection() val = self.tmpData[self.lastRow][7] val &= (255-1) val |= sel self.tmpData[self.lastRow][7] = val Diff() windOpenCtrl.Bind(wx.EVT_CHOICE, onWindOpen) def onTriggEvtCtrl(evt): sel = evt.GetSelection() workMode = workModeCtrl.GetSelection() if sel == 0 and workMode == 3: ShowMessageBox(self.text.boxTexts[3]) val = self.tmpData[self.lastRow][7] val &= (255-24) val |= (sel<<3) self.tmpData[self.lastRow][7] = val Diff() triggEvtCtrl.Bind(wx.EVT_CHOICE, onTriggEvtCtrl) bttnSizer = wx.BoxSizer(wx.HORIZONTAL) bttnSizer.Add((5, -1)) i = 0 for bttn in self.text.buttons: id = wx.NewId() bttns.append(id) b = wx.Button(self, id, bttn) bttnSizer.Add(b,1) if i in (1, 2, 5): b.Enable(False) if i == 3: b.SetDefault() if i == 5: self.applyBttn = b b.Bind(wx.EVT_BUTTON, onButton, id = id) bttnSizer.Add((5, -1)) i += 1 sizer.Add(bttnSizer,0,wx.EXPAND) id = wx.NewId() #testBttn bttns.append(id) self.Bind(wx.EVT_BUTTON, onTestButton, id = id) wx.EVT_CHECKLISTBOX(self, -1, onCheckListBox) EVT_TIMEUPDATE(self, -1, OnTimeChange) wx.EVT_TEXT(self, -1, onPeriodNumber) wx.EVT_CHOICE(self, -1, onPeriodUnit) wx.EVT_DATE_CHANGED(self, -1, onDatePicker) wx.EVT_CHECKBOX(self, -1, onCheckBox) self.Bind(EVT_UPDATE_DIALOG, OnUpdateDialog) self.Bind(wx.EVT_LIST_ITEM_SELECTED, OnSelectCell) typeChoice.Bind(wx.EVT_CHOICE, onTypeChoice) schedulerName.Bind(wx.EVT_TEXT, onSchedulerTitle) self.Bind(EVT_CHECKLISTCTRL, onCheckListCtrl) nameSizer = wx.FlexGridSizer(2, 0, 0, 20) nameSizer.AddGrowableCol(0,1) name_label = wx.StaticText(self, -1, self.text.header[1] + ":") nameSizer.Add(name_label) type_label = wx.StaticText(self, -1, self.text.type_label) nameSizer.Add(type_label) nameSizer.Add(schedulerName, 0, wx.EXPAND) nameSizer.Add(typeChoice) typeSizer = wx.StaticBoxSizer( wx.StaticBox(self, -1, ""), wx.VERTICAL ) dynamicSizer.SetMinSize((-1, 226)) typeSizer.Add(nameSizer, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, 5) typeSizer.Add(dynamicSizer, 0, wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, 5) sizer.Add(typeSizer, 0, wx.EXPAND | wx.LEFT | wx.RIGHT, 5) sizer.Add(source_label, 0, wx.TOP|wx.LEFT, 5) sizer.Add(sourceCtrl,0,wx.EXPAND|wx.LEFT|wx.RIGHT, 5) sizer.Add((1,4)) sizer.Add(favorite_label, 0, wx.TOP|wx.LEFT, 5) sizer.Add(favChoice,0,wx.EXPAND|wx.LEFT|wx.RIGHT, 5) sizer.Add((1,4)) choicesSizer = wx.FlexGridSizer(2,3,0,10) choicesSizer.Add(windOpenLbl,0) choicesSizer.Add(workModeLbl,0) choicesSizer.Add(triggEvtLbl,0) choicesSizer.Add(windOpenCtrl,0,wx.EXPAND) choicesSizer.Add(workModeCtrl,0,wx.EXPAND) choicesSizer.Add(triggEvtCtrl,0,wx.EXPAND) sizer.Add(choicesSizer,0,wx.ALL, 5) sizer.Add(filename_label, 0, wx.LEFT, 5) sizer.Add(recordCtrl,0,wx.EXPAND|wx.LEFT|wx.RIGHT, 5) fillGrid(True) self.Bind(wx.EVT_CLOSE, self.onClose) self.SetSizer(sizer) sizer.Layout() if ConfigData.pos: self.SetPosition(ConfigData.pos) else: self.Center() self.Show(True) def EnableAll(self, flag): if isinstance(flag, wx.CommandEvent): schedule = self.tmpData[self.lastRow][1] flag = 1 for ix in range(len(self.tmpData)): self.tmpData[ix][0] = flag if self.grid.GetItem(ix, 1).GetText() == self.tmpData[ix][1]: if flag: self.grid.CheckItem(ix) elif self.grid.IsChecked(ix): self.grid.ToggleItem(ix) self.applyBttn.Enable(self.tmpData != self.plugin.data) def DisableAll(self, evt): self.EnableAll(0) def EnableSchedule(self, schedule, flag): tmpList = [item[1] for item in self.tmpData] if schedule in tmpList: ix = tmpList.index(schedule) self.tmpData[ix][0] = flag if self.grid.GetItem(ix, 1).GetText() == self.tmpData[ix][1]: if flag: self.grid.CheckItem(ix) elif self.grid.IsChecked(ix): self.grid.ToggleItem(ix) def DeleteSchedule(self, schedule): tmpList = [item[1] for item in self.tmpData] if schedule in tmpList: ix = tmpList.index(schedule) if self.grid.GetItem(ix, 1).GetText() == self.tmpData[ix][1]: self.grid.DeleteItem(ix) self.grid.Refresh() self.tmpData.pop(ix) def AddSchedule(self, schedule): tmpList = [item[1] for item in self.tmpData] if schedule[1] in tmpList: ix = tmpList.index(schedule[1]) self.tmpData[ix] = schedule if not self.grid.GetItem(ix, 1).GetText() == self.tmpData[ix][1]: return else: ix = len(self.tmpData) self.tmpData.append(schedule) self.grid.InsertStringItem(ix, "") if schedule[0]: self.grid.CheckItem(ix) elif self.grid.IsChecked(ix): self.grid.ToggleItem(ix) self.grid.SetStringItem(ix, 1, schedule[1]) next = self.plugin.NextRun(schedule[2], schedule[3]) self.grid.SetStringItem(ix, 3, next) if self.lastRow == ix: evt = wx.PyCommandEvent(newEVT_UPDATE_DIALOG, ix) self.GetEventHandler().ProcessEvent(evt) def RefreshGrid(self, ix, last, next): if self.grid.GetItem(ix, 1).GetText() == self.tmpData[ix][1]: self.grid.SetStringItem(ix, 2, last) self.grid.SetStringItem(ix, 3, next) def onClose(self, evt): hwnd = self.GetHandle() wp = GetWindowPlacement(hwnd)[4] #Note: GetPosition() return (-32000, -32000), if window is minimized !!! pos = (wp[0], wp[1]) if pos != ConfigData.pos: ConfigData.pos = pos #if not eg.document.IsDirty(): # wx.CallAfter(eg.Notify, "DocumentChange", True) self.Show(False) self.plugin.dialog = None self.Destroy() evt.Skip() #=============================================================================== def HandleRS(): FindRS = eg.WindowMatcher( u'RadioSure.exe', None, u'#32770', None, None, None, True, 0.0, 0 ) hwnds = FindRS() res = [] for hwnd in hwnds: try: #maybe already closed !!! curhw = GetWindow(hwnd, GW_CHILD) while curhw > 0: if GetDlgCtrlID(curhw) == 1016 and GetClassName(curhw) == 'SysListView32': res.append(hwnd) break curhw = GetWindow(curhw, GW_HWNDNEXT) except: pass return res #=============================================================================== class ObservationThread(Thread): def __init__( self, period, evtName, ): self.abort = False self.aborted = False self.oldData = "" self.threadFlag = Event() self.period = period self.evtName = evtName Thread.__init__(self, name = self.evtName.encode('unicode_escape')+'_Thread') def run(self): while 1: hwnd = HandleRS() if hwnd: data = GetWindowText(hwnd[0]).decode(eg.systemEncoding) if data != self.oldData and data != "Radio? Sure!": self.oldData = data eg.TriggerEvent(self.evtName, payload = data, prefix = "RadioSure") if self.abort: break self.threadFlag.wait(self.period) self.threadFlag.clear() self.aborted = True def AbortObservation(self): self.abort = True self.threadFlag.set() #=============================================================================== def GetCtrlByID(id): res = None hwnds = HandleRS() if hwnds: try: res = GetDlgItem(hwnds[0], id) except: pass return res #=============================================================================== def getPathFromReg(): try: rs_reg = OpenKey( HKEY_CURRENT_USER, "Software\\RadioSure" ) res = unicode(EnumValue(rs_reg,0)[1]) CloseKey(rs_reg) except: res = None return res #=============================================================================== def FindMonthDay(year, month, weekday, index): """weekday = what day of the week looking for (numbered 0-6, 0 = monday) index = how many occurrence of looking for (numbered 0-4 and 5 for the last day) Returns the day of the month (date) or 0 (if no such date exists)""" first_wd, length = monthrange(year, month) day = 1 + weekday - first_wd if day < 1: day += 7 if index == 5: index = 4 if day <= length % 7 else 3 day += 7 * index if day > length: day = 0 return day #=============================================================================== def getStations(nodelist): tmp = [] for item in nodelist.childNodes: if item.nodeName[:5] == "Item-": title = item.getElementsByTagName('Title')[0].firstChild if title: title = title.data source = item.getElementsByTagName('Source')[0].firstChild if source: source = source.data genre = item.getElementsByTagName('Genre')[0].firstChild if genre: genre = genre.data language = item.getElementsByTagName('Language')[0].firstChild if language: language = language.data country = item.getElementsByTagName('Country')[0].firstChild if country: country = country.data tmp.append([source, title, genre, language, country]) return tmp #=============================================================================== class MenuGrid(gridlib.Grid): def __init__(self, parent, lngth): gridlib.Grid.__init__(self, parent) self.SetRowLabelSize(0) self.SetColLabelSize(0) self.SetDefaultRowSize(16) self.SetScrollLineX(1) self.SetScrollLineY(1) self.EnableEditing(False) self.EnableDragColSize(False) self.EnableDragRowSize(False) self.EnableDragGridSize(False) self.EnableGridLines(False) self.SetColMinimalAcceptableWidth(8) self.CreateGrid(lngth, 3) attr = gridlib.GridCellAttr() attr.SetAlignment(wx.ALIGN_LEFT, wx.ALIGN_CENTRE) self.SetColAttr(1,attr) self.SetSelectionMode(gridlib.Grid.wxGridSelectRows) self.Bind(gridlib.EVT_GRID_CMD_SELECT_CELL, self.onGridSelectCell, self) def SetBackgroundColour(self, colour): self.SetDefaultCellBackgroundColour(colour) def SetForegroundColour(self, colour): self.SetDefaultCellTextColour(colour) def SetFont(self, font): self.SetDefaultCellFont(font) def GetSelection(self): return self.GetSelectedRows()[0] def Set(self, choices): oldLen = self.GetNumberRows() newLen = len(choices) h = self.GetDefaultRowSize() if oldLen > newLen: self.DeleteRows(0, oldLen-newLen, False) elif oldLen < newLen: self.AppendRows(newLen-oldLen, False) for i in range(len(choices)): chr = u"\u25a0" if choices[i][2] else "" self.SetCellValue(i,0,chr) self.SetCellValue(i,1," "+choices[i][0]) chr = u"\u25ba" if choices[i][3] == -1 else "" self.SetCellValue(i,2, chr) self.SetRowSize(i,h) def onGridSelectCell(self, event): row = event.GetRow() self.SelectRow(row) if not self.IsVisible(row,1): self.MakeCellVisible(row,1) event.Skip() def MoveCursor(self, step): max = self.GetNumberRows() sel = self.GetSelectedRows()[0] new = sel + step if new < 0: new += max elif new > max-1: new -= max self.SetGridCursor(new, 1) self.SelectRow(new) #=============================================================================== class MyTextDropTarget(EventDropTarget): def __init__(self, object): EventDropTarget.__init__(self, object) self.object = object def OnDragOver(self, x, y, dragResult): return wx.DragMove def OnData(self, dummyX, dummyY, dragResult): if self.GetData() and self.customData.GetDataSize() > 0: txt = self.customData.GetData() ix, evtList = self.object.GetEvtList() flag = True for lst in evtList: if txt in lst: flag = False break if flag: self.object.InsertImageStringItem(len(evtList[ix]), txt, 0) self.object.UpdateEvtList(ix, txt) else: PlaySound('SystemExclamation', SND_ASYNC) def OnLeave(self): pass #=============================================================================== class EventListCtrl(wx.ListCtrl): def __init__(self, parent, id, evtList, ix, plugin): width = 205 wx.ListCtrl.__init__(self, parent, id, style=wx.LC_REPORT | wx.LC_NO_HEADER | wx.LC_SINGLE_SEL, size = (width, -1)) self.parent = parent self.id = id self.evtList = evtList self.ix = ix self.plugin = plugin self.sel = -1 self.il = wx.ImageList(16, 16) self.il.Add(wx.BitmapFromImage(wx.Image(join(IMAGES_DIR, "event.png"), wx.BITMAP_TYPE_PNG))) self.SetImageList(self.il, wx.IMAGE_LIST_SMALL) self.InsertColumn(0, '') self.SetColumnWidth(0, width - 5 - SYS_VSCROLL_X) self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnSelect) self.Bind(wx.EVT_SET_FOCUS, self.OnChange) self.Bind(wx.EVT_LIST_INSERT_ITEM, self.OnChange) self.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.OnChange) self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.OnRightClick) self.SetToolTipString(self.plugin.text.toolTip) def OnSelect(self, event): self.sel = event.GetIndex() evt = UserEvent(newEVT_BUTTON_AFTER, self.id) evt.SetValue(self) self.GetEventHandler().ProcessEvent(evt) event.Skip() def OnChange(self, event): evt = UserEvent(newEVT_BUTTON_AFTER, self.id) evt.SetValue(self) self.GetEventHandler().ProcessEvent(evt) event.Skip() def OnRightClick(self, event): if not hasattr(self, "popupID1"): self.popupID1 = wx.NewId() self.popupID2 = wx.NewId() self.Bind(wx.EVT_MENU, self.OnDeleteButton, id=self.popupID1) self.Bind(wx.EVT_MENU, self.OnDeleteAllButton, id=self.popupID2) # make a menu menu = wx.Menu() # add some items menu.Append(self.popupID1, self.plugin.text.popup[0]) menu.Append(self.popupID2, self.plugin.text.popup[1]) # Popup the menu. If an item is selected then its handler # will be called before PopupMenu returns. self.PopupMenu(menu) menu.Destroy() def OnDeleteButton(self, event=None): self.DeleteItem(self.sel) self.evtList[self.ix].pop(self.sel) evt = UserEvent(newEVT_BUTTON_AFTER, self.id) evt.SetValue(self) self.GetEventHandler().ProcessEvent(evt) if event: event.Skip() def OnDeleteAllButton(self, event=None): self.DeleteAllItems() evt = UserEvent(newEVT_BUTTON_AFTER, self.id) evt.SetValue(self) self.GetEventHandler().ProcessEvent(evt) self.evtList[self.ix] = [] if event: event.Skip() def GetEvtList(self): return self.ix, self.evtList def UpdateEvtList(self, ix, txt): self.evtList[ix].append(txt) def SetItems(self, evtList): for i in range(len(evtList)): self.InsertImageStringItem(i, evtList[i], 0) #=============================================================================== class MenuEventsDialog(wx.MiniFrame): def __init__(self, parent, plugin): wx.MiniFrame.__init__( self, parent, -1, style=wx.CAPTION, name="Menu events dialog" ) self.panel = parent self.plugin = plugin self.evtList = cpy(self.panel.evtList) self.SetBackgroundColour(wx.NullColour) self.ctrl = None self.sel = -1 def ShowMenuEventsDialog(self, title, labels): self.panel.Enable(False) self.panel.dialog.buttonRow.cancelButton.Enable(False) self.panel.EnableButtons(False) self.SetTitle(title) sizer = wx.BoxSizer(wx.VERTICAL) sizer.SetMinSize((450, 308)) topSizer=wx.GridBagSizer(2, 20) textLbl_0=wx.StaticText(self, -1, labels[0]) id = wx.NewId() eventsCtrl_0 = EventListCtrl(self, id, self.evtList, 0, self.plugin) eventsCtrl_0.SetItems(self.evtList[0]) dt0 = MyTextDropTarget(eventsCtrl_0) eventsCtrl_0.SetDropTarget(dt0) textLbl_1=wx.StaticText(self, -1, labels[1]) id = wx.NewId() eventsCtrl_1 = EventListCtrl(self, id, self.evtList, 1, self.plugin) eventsCtrl_1.SetItems(self.evtList[1]) dt1 = MyTextDropTarget(eventsCtrl_1) eventsCtrl_1.SetDropTarget(dt1) textLbl_2=wx.StaticText(self, -1, labels[2]) id = wx.NewId() eventsCtrl_2 = EventListCtrl(self, id, self.evtList, 2, self.plugin) eventsCtrl_2.SetItems(self.evtList[2]) dt2 = MyTextDropTarget(eventsCtrl_2) eventsCtrl_2.SetDropTarget(dt2) textLbl_3=wx.StaticText(self, -1, labels[3]) id = wx.NewId() eventsCtrl_3 = EventListCtrl(self, id, self.evtList, 3, self.plugin) eventsCtrl_3.SetItems(self.evtList[3]) dt3 = MyTextDropTarget(eventsCtrl_3) eventsCtrl_3.SetDropTarget(dt3) textLbl_4=wx.StaticText(self, -1, labels[4]) id = wx.NewId() eventsCtrl_4 = EventListCtrl(self, id, self.evtList, 4, self.plugin) eventsCtrl_4.SetItems(self.evtList[4]) dt4 = MyTextDropTarget(eventsCtrl_4) eventsCtrl_4.SetDropTarget(dt4) deleteSizer = wx.BoxSizer(wx.VERTICAL) delOneBtn = wx.Button(self, -1, self.plugin.text.popup[0]) delBoxBtn = wx.Button(self, -1, self.plugin.text.popup[1]) clearBtn = wx.Button(self, -1, self.plugin.text.clear) deleteSizer.Add(delOneBtn, 1, wx.EXPAND) deleteSizer.Add(delBoxBtn, 1, wx.EXPAND|wx.TOP,5) deleteSizer.Add(clearBtn, 1, wx.EXPAND|wx.TOP,5) topSizer.Add(textLbl_0, (0,0)) topSizer.Add(eventsCtrl_0, (1,0), flag = wx.EXPAND) topSizer.Add(textLbl_1, (0,1)) topSizer.Add(eventsCtrl_1, (1,1), flag = wx.EXPAND) topSizer.Add(textLbl_2, (2,0),flag = wx.TOP, border = 8) topSizer.Add(eventsCtrl_2, (3,0), flag = wx.EXPAND) topSizer.Add(textLbl_3, (2,1), flag = wx.TOP, border = 8) topSizer.Add(eventsCtrl_3, (3,1), flag = wx.EXPAND) topSizer.Add(textLbl_4, (4,0), flag = wx.TOP, border = 8) topSizer.Add(eventsCtrl_4, (5,0), flag = wx.EXPAND) topSizer.Add(deleteSizer, (5,1), flag = wx.EXPAND) line = wx.StaticLine(self, -1, size=(20,-1),pos = (200,0), style=wx.LI_HORIZONTAL) btn1 = wx.Button(self, wx.ID_OK) btn1.SetLabel(self.plugin.text.ok) btn1.SetDefault() btn2 = wx.Button(self, wx.ID_CANCEL) btn2.SetLabel(self.plugin.text.cancel) btnsizer = wx.StdDialogButtonSizer() btnsizer.AddButton(btn1) btnsizer.AddButton(btn2) btnsizer.Realize() sizer.Add(topSizer,0,wx.ALL,10) sizer.Add(line, 0, wx.EXPAND|wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM,5) sizer.Add(btnsizer, 0, wx.EXPAND|wx.RIGHT, 10) sizer.Add((1,6)) self.SetSizer(sizer) sizer.Fit(self) def onFocus(evt): ctrl = evt.GetValue() if ctrl != self.ctrl: if self.ctrl: self.ctrl.SetItemState(-1, wx.LIST_MASK_STATE, wx.LIST_STATE_SELECTED) self.ctrl = ctrl sel = self.ctrl.sel if sel != -1: self.sel = sel flag = self.ctrl.GetSelectedItemCount() > 0 delOneBtn.Enable(flag) delBoxBtn.Enable(flag) evt.Skip() eventsCtrl_0.Bind(EVT_BUTTON_AFTER, onFocus) eventsCtrl_1.Bind(EVT_BUTTON_AFTER, onFocus) eventsCtrl_2.Bind(EVT_BUTTON_AFTER, onFocus) eventsCtrl_3.Bind(EVT_BUTTON_AFTER, onFocus) eventsCtrl_4.Bind(EVT_BUTTON_AFTER, onFocus) def onDelOneBtn(evt): self.ctrl.OnDeleteButton() delOneBtn.Enable(False) delBoxBtn.Enable(False) evt.Skip() delOneBtn.Bind(wx.EVT_BUTTON, onDelOneBtn) def onDelBoxBtn(evt): self.ctrl.OnDeleteAllButton() delOneBtn.Enable(False) delBoxBtn.Enable(False) evt.Skip() delBoxBtn.Bind(wx.EVT_BUTTON, onDelBoxBtn) def onClearBtn(evt): eventsCtrl_0.DeleteAllItems() eventsCtrl_1.DeleteAllItems() eventsCtrl_2.DeleteAllItems() eventsCtrl_3.DeleteAllItems() eventsCtrl_4.DeleteAllItems() delOneBtn.Enable(False) delBoxBtn.Enable(False) self.evtList = [[],[],[],[],[]] evt.Skip() clearBtn.Bind(wx.EVT_BUTTON, onClearBtn) def onClose(evt): self.panel.Enable(True) self.panel.dialog.buttonRow.cancelButton.Enable(True) self.panel.EnableButtons(True) self.GetParent().GetParent().Raise() self.Destroy() self.panel.setFocus() self.Bind(wx.EVT_CLOSE, onClose) def onCancel(evt): self.panel.Enable(True) self.panel.dialog.buttonRow.cancelButton.Enable(True) self.panel.EnableButtons(True) self.Close() btn2.Bind(wx.EVT_BUTTON,onCancel) def onOK(evt): self.panel.evtList = self.evtList self.Close() btn1.Bind(wx.EVT_BUTTON,onOK) sizer.Layout() self.Raise() self.Show() #=============================================================================== class Menu(wx.Frame): def __init__(self): wx.Frame.__init__( self, None, -1, 'MPC_menu', style = wx.STAY_ON_TOP|wx.SIMPLE_BORDER ) self.flag = False self.monitor = 0 self.oldMenu = [] def DrawMenu(self, ix): self.Show(False) self.menuGridCtrl.SetGridCursor(ix, 1) self.menuGridCtrl.SelectRow(ix) monDim = GetMonitorDimensions() try: x,y,ws,hs = monDim[self.monitor] except IndexError: x,y,ws,hs = monDim[0] # menu height calculation: h=self.GetCharHeight()+4 for i in range(len(self.choices)): self.menuGridCtrl.SetRowSize(i,h) self.menuGridCtrl.SetCellValue(i,1," "+self.choices[i]) if self.items[i][3] == -1: self.menuGridCtrl.SetCellValue(i,2, u"\u25ba") height0 = len(self.choices)*h height1 = h*((hs-20)/h) height = min(height0, height1)+6 # menu width calculation: width_lst=[] for item in self.choices: width_lst.append(self.GetTextExtent(item+' ')[0]) width = max(width_lst)+8 self.menuGridCtrl.SetColSize(0,self.w0) self.menuGridCtrl.SetColSize(1,width) self.menuGridCtrl.SetColSize(2,self.w2) self.menuGridCtrl.ForceRefresh() width = width + self.w0 + self.w2 if height1 < height0: width += SYS_VSCROLL_X if width > ws-50: if height + SYS_HSCROLL_Y < hs: height += SYS_HSCROLL_Y width = ws-50 width += 6 x_pos = x + (ws - width)/2 y_pos = y + (hs - height)/2 self.SetDimensions(x_pos,y_pos,width,height) self.menuGridCtrl.SetDimensions(2, 2, width-6, height-6, wx.SIZE_AUTO) self.Show(True) self.Raise() def ShowMenu( self, fore, back, foreSel, backSel, fontInfo, flag, plugin, event, monitor, hWnd, evtList, ix, ): self.fore = fore self.back = back self.foreSel = foreSel self.backSel = backSel self.fontInfo = fontInfo self.flag = flag self.plugin = plugin self.monitor = monitor self.hWnd = hWnd self.evtList = evtList eg.TriggerEvent("OnScreenMenu.%s" % self.plugin.text.opened, prefix = "RadioSure") for evt in self.evtList[0]: eg.Bind(evt, self.onUp) for evt in self.evtList[1]: eg.Bind(evt, self.onDown) for evt in self.evtList[2]: eg.Bind(evt, self.onLeft) for evt in self.evtList[3]: eg.Bind(evt, self.onRight) for evt in self.evtList[4]: eg.Bind(evt, self.onEscape) self.menuHwnd, self.menu = self.plugin.GetRS_Menu(self.hWnd) self.items = self.plugin.GetItemList(self.menuHwnd, self.menu) self.choices = [item[0] for item in self.items] self.menuGridCtrl = MenuGrid(self, len(self.choices)) mainSizer = wx.BoxSizer(wx.VERTICAL) self.SetSizer(mainSizer) mainSizer.Add(self.menuGridCtrl, 0, wx.EXPAND) self.Bind(wx.EVT_CLOSE, self.onClose) self.Bind(gridlib.EVT_GRID_CMD_CELL_LEFT_DCLICK, self.onDoubleClick, self.menuGridCtrl) self.Bind(wx.EVT_CHAR_HOOK, self.onFrameCharHook) font = wx.FontFromNativeInfoString(fontInfo) self.menuGridCtrl.SetFont(font) arial = wx.FontFromNativeInfoString(ARIAL_INFO) self.SetFont(font) hght = self.GetTextExtent('X')[1] for n in range(1,1000): arial.SetPointSize(n) self.SetFont(arial) h = self.GetTextExtent(u"\u25a0")[1] if h > hght: break arial.SetPointSize(2*n/3) self.SetFont(arial) self.w0 = 2 * self.GetTextExtent(u"\u25a0")[0] attr = gridlib.GridCellAttr() attr.SetFont(arial) attr.SetAlignment(wx.ALIGN_CENTRE, wx.ALIGN_CENTRE) self.menuGridCtrl.SetColAttr(0,attr) for n in range(1,1000): arial.SetPointSize(n) self.SetFont(arial) h = self.GetTextExtent(u"\u25ba")[1] if h > hght: break arial.SetPointSize(n/2) self.SetFont(arial) self.w2 = 2 * self.GetTextExtent(u"\u25ba")[0] attr = gridlib.GridCellAttr() attr.SetFont(arial) attr.SetAlignment(wx.ALIGN_RIGHT, wx.ALIGN_CENTRE) self.menuGridCtrl.SetColAttr(2,attr) self.SetFont(font) self.SetBackgroundColour((0, 0, 0)) self.menuGridCtrl.SetBackgroundColour(self.back) self.menuGridCtrl.SetForegroundColour(self.fore) self.menuGridCtrl.SetSelectionBackground(self.backSel) self.menuGridCtrl.SetSelectionForeground(self.foreSel) if self.flag: self.timer=MyTimer(t = 5.0, plugin = self.plugin) self.menuGridCtrl.Set(self.items) self.UpdateMenu(ix == 0, ix) wx.Yield() SetEvent(event) def UpdateMenu(self, root = False, ix = 0): if root: self.menuHwnd, self.menu = self.plugin.GetRS_Menu(self.hWnd) else: self.menuHwnd, self.menu = self.GetSubMenuExt(self.hWnd, ix) ix = 0 self.items = self.plugin.GetItemList(self.menuHwnd, self.menu) if len(self.items)==0: PlaySound('SystemExclamation', SND_ASYNC) eg.PrintError("Please report: %i, %i, %i, %i" % (ix, int(root), self.menuHwnd, self.menu)) #self.menu,ix = self.oldMenu.pop() #self.items = self.plugin.GetItemList(self.hWnd, self.menu) self.choices = [item[0] for item in self.items] self.menuGridCtrl.Set(self.items) self.DrawMenu(ix) def MoveCursor(self, step): max=len(self.choices) if max > 0: self.menuGridCtrl.MoveCursor(step) def onUp(self, event): wx.CallAfter(self.menuGridCtrl.MoveCursor, -1) def onDown(self, event): wx.CallAfter(self.menuGridCtrl.MoveCursor, 1) def onLeft(self, event): if len(self.oldMenu) > 0: ix = self.oldMenu.pop() wx.CallAfter(self.UpdateMenu, True, ix) else: wx.CallAfter(self.destroyMenu) def onRight(self, event): wx.CallAfter(self.DefaultAction) def onEscape(self, event): wx.CallAfter(self.destroyMenu) def GetSubMenuExt(self, hWnd, ix): menu, hMenu = self.plugin.GetRS_Menu(hWnd) if menu: hMenu = GetSubMenu(hMenu, ix) return (menu, hMenu) def DefaultAction(self): sel = self.menuGridCtrl.GetSelection() item = self.items[sel] id = item[3] if id != -1: self.destroyMenu() SendMessage(self.hWnd, WM_COMMAND, id, 0) else: self.oldMenu.append(sel) wx.CallAfter(self.UpdateMenu, False, item[1]) def onFrameCharHook(self, event): keyCode = event.GetKeyCode() if keyCode == wx.WXK_F4: if event.AltDown(): self.destroyMenu() elif keyCode == wx.WXK_RETURN or keyCode == wx.WXK_NUMPAD_ENTER: self.DefaultAction() elif keyCode == wx.WXK_RIGHT or keyCode == wx.WXK_NUMPAD_RIGHT: self.DefaultAction() elif keyCode == wx.WXK_ESCAPE: self.destroyMenu() elif keyCode == wx.WXK_UP or keyCode == wx.WXK_NUMPAD_UP: self.menuGridCtrl.MoveCursor(-1) elif keyCode == wx.WXK_DOWN or keyCode == wx.WXK_NUMPAD_DOWN: self.menuGridCtrl.MoveCursor(1) elif keyCode == wx.WXK_LEFT or keyCode == wx.WXK_NUMPAD_LEFT: if len(self.oldMenu) > 0: ix = self.oldMenu.pop() wx.CallAfter(self.UpdateMenu, True, ix) else: self.destroyMenu() else: event.Skip() def onDoubleClick(self, event): self.DefaultAction() event.Skip() def onClose(self, event): self.Show(False) self.Destroy() self.plugin.menuDlg = None def destroyMenu(self, event = None): for evt in self.evtList[0]: eg.Unbind(evt, self.onUp) for evt in self.evtList[1]: eg.Unbind(evt, self.onDown) for evt in self.evtList[2]: eg.Unbind(evt, self.onLeft) for evt in self.evtList[3]: eg.Unbind(evt, self.onRight) for evt in self.evtList[4]: eg.Unbind(evt, self.onEscape) if self.flag: self.timer.Cancel() eg.TriggerEvent("OnScreenMenu.%s" % self.plugin.text.closed, prefix = "RadioSure") self.Close() #=============================================================================== class RadioSure(eg.PluginBase): text=Text menuDlg = None RadioSurePath = u'' xmlPath = u'' data = [] tmpData = [] dialog = None manager = None Favorites = [] History = [] Current = ['',''] FavIx = -1 HistIx = -1 List = None maxFav = None submenus = None def GetRS_Menu(self, hwnd): WM_CONTEXTMENU = 0x007B OBJID_CLIENT = 0xFFFFFFFC class RECT(Structure): _fields_ = [ ('left', c_long), ('top', c_long), ('right', c_long), ('bottom', c_long), ] class MENUBARINFO(Structure): _fields_ = [ ('cbSize', c_ulong), ('rcBar', RECT), # rect of bar, popup, item ('hMenu', c_long), # real menu handle of bar, popup ('hwndMenu', c_long), # hwnd of item submenu if one ('fBarFocused', c_int, 1), # bar, popup has the focus ('fFocused', c_int, 1), # item has the focus ] findMenu = eg.WindowMatcher( u'RadioSure.exe', None, u'#32768', None, None, None, True, 0.0, 0 ) PostMessage(hwnd, WM_CONTEXTMENU, hwnd, 0x00010001) menu = [] i = 0 while len(menu) == 0: menu = findMenu() i+=1 if i > 1000: break if menu: menu = menu[0] mbi = MENUBARINFO() mbi.cbSize = sizeof(mbi) if _user32.GetMenuBarInfo( menu, OBJID_CLIENT, 0, byref(mbi) ): return (menu, mbi.hMenu) return (None, None) def GetItemList(self, hWnd, hMenu): WM_INITMENUPOPUP = 0x0117 MF_BYPOSITION = 1024 MF_GRAYED = 1 MF_DISABLED = 2 MF_CHECKED = 8 MF_SEPARATOR = 2048 SendMessage(hWnd, WM_INITMENUPOPUP, hMenu, 0) #REFRESH MENU STATE !!! itemList = [] itemName = c_buffer("\000" * 128) count = GetMenuItemCount(hMenu) for i in range(count): _user32.GetMenuStringA(c_int(hMenu), c_int(i), itemName, c_int(len(itemName)), MF_BYPOSITION) hMenuState = _user32.GetMenuState(c_int(hMenu), c_int(i), MF_BYPOSITION) id = _user32.GetMenuItemID(c_int(hMenu), c_int(i)) # if hMenuState & (MF_GRAYED|MF_DISABLED|MF_SEPARATOR): if hMenuState & (MF_GRAYED|MF_DISABLED): continue item = itemName.value.replace("&","").split("\t")[0] if item == "" and id == 0: continue checked = bool(hMenuState & MF_CHECKED) itemList.append((item, i, checked, id)) PostMessage(hWnd, WM_CLOSE, 0, 0) return itemList def GetLanguageXml(self): xmltoparse = u'%s\\RadioSure.xml' % self.xmlpath xmltoparse = xmltoparse.encode(FSE) if isinstance(xmltoparse, unicode) else xmltoparse xmldoc = miniDom.parse(xmltoparse) general = xmldoc.getElementsByTagName('General') if general: #NOTE: don't use general[0].getElementsByTagName('Language') !!!!!!!!!!!!!! langNodes = [node for node in general[0].childNodes if node.localName =="Language"] if langNodes: langFile = abspath(join(self.RadioSurePath+"\\Lang", langNodes[0].firstChild.data)) langFile = langFile.encode(FSE) if isinstance(langFile, unicode) else langFile languageXml = miniDom.parse(langFile) return languageXml def GetOneInstance(self): xmltoparse = u'%s\\RadioSure.xml' % self.xmlpath xmltoparse = xmltoparse.encode(FSE) if isinstance(xmltoparse, unicode) else xmltoparse xmldoc = miniDom.parse(xmltoparse) advanced = xmldoc.getElementsByTagName('Advanced') if advanced: oneInstance = advanced[0].getElementsByTagName('One_instance')[0].firstChild.data return oneInstance def GetStrings(self): language = self.GetLanguageXml() if language: res = {} mainWindow = language.getElementsByTagName('MainWindow') res['stop'] = mainWindow[0].getElementsByTagName('Stop')[0].firstChild.data res['unmute'] = mainWindow[0].getElementsByTagName('Unmute')[0].firstChild.data res['stopRec'] = mainWindow[0].getElementsByTagName('StopRecording')[0].firstChild.data #res['play'] = mainWindow[0].getElementsByTagName('Play')[0].firstChild.data #res['mute'] = mainWindow[0].getElementsByTagName('Mute')[0].firstChild.data #res['rec'] = mainWindow[0].getElementsByTagName('Recording')[0].firstChild.data return res def GetSubmenuStrings(self): choices = [self.text.root] language = self.GetLanguageXml() if language: mainWindow = language.getElementsByTagName('MainWindow') favorites = language.getElementsByTagName('Favorites') equaliser = language.getElementsByTagName('EQUALIZER') sleeptimer = language.getElementsByTagName('SleepTimer') choices.append(favorites[0].getElementsByTagName('Title')[0].firstChild.data) choices.append(mainWindow[0].getElementsByTagName('Back')[0].firstChild.data) choices.append(equaliser[0].getElementsByTagName('Title')[0].firstChild.data) choices.append(mainWindow[0].getElementsByTagName('WindowMenu')[0].firstChild.data) choices.append(mainWindow[0].getElementsByTagName('ClipboardMenu')[0].firstChild.data) choices.append(sleeptimer[0].getElementsByTagName('Title')[0].firstChild.data) choices.append(mainWindow[0].getElementsByTagName('Language')[0].firstChild.data) return choices def GetRS_Status(self, hwnd): menu, hMenu = self.GetRS_Menu(hwnd) if menu: menuItems = self.GetItemList(menu, hMenu) #PostMessage(menu, WM_CLOSE, 0, 0) strings = self.GetStrings() if menuItems and strings: res = [ strings['stop'] == menuItems[0][0], # Playing strings['unmute'] == menuItems[1][0], # Muted strings['stopRec'] == menuItems[2][0], # Recording menuItems[3][2] # Record only current track ] return res def GetMenuItem(self, hwnd, indx): # indx = 7 for Fav, 8 for Hist, 9 for Equalizer menu, hMenu = self.GetRS_Menu(hwnd) if menu: hMenu = GetSubMenu(hMenu, indx) menuItems = self.GetItemList(menu, hMenu) flags = [item[2] for item in menuItems] if True in flags: ix = flags.index(True) return (ix, menuItems[ix][0]) return (-1, "") def RefreshVariables(self): xmltoparse = u'%s\\RadioSure.xml' % self.xmlpath xmltoparse = xmltoparse.encode(FSE) if isinstance(xmltoparse, unicode) else xmltoparse if not exists(xmltoparse): return xmldoc = miniDom.parse(xmltoparse) lastPlayed = xmldoc.getElementsByTagName('LastPlayed') if lastPlayed: lastPlayed=lastPlayed[0] src = lastPlayed.getElementsByTagName('Source') if src: src = src[0].firstChild.data else: src = "" ttl = lastPlayed.getElementsByTagName('Title') if ttl: ttl = ttl[0].firstChild.data else: ttl = "" self.Current = [src, ttl] else: self.Current = ["", ""] histNode = xmldoc.getElementsByTagName('History') if histNode: self.History = getStations(histNode[0]) else: self.History = [] favNode = xmldoc.getElementsByTagName('Favorites') if favNode: self.Favorites = getStations(favNode[0]) else: self.Favorites = [] tmp = [item[:2] for item in self.Favorites] if self.Current in tmp: self.FavIx = tmp.index(self.Current) else: self.FavIx = -1 tmp = [item[:2] for item in self.History] if self.Current in tmp: self.HistIx = tmp.index(self.Current) else: self.HistIx = -1 return self.Favorites def NextRun(self, type, data): def FindRunDateTime(runList, cond): runList.sort() runDateTime = "" if len(runList) > 0: if not cond: return runList[0] found = False for item in runList: if item.weekday() > 4: found = True break else: if (item.month, item.day) in self.holidays[0]: pass elif (item.year, item.month, item.day) in self.holidays[1]: pass else: found = True break if found: runDateTime = item return runDateTime now = dt.now() now = now.replace(microsecond = 0) + td(seconds = 2) runTime = dt.strptime(data[0], "%H:%M:%S").time() if type == 0: # once or yearly runDate = dt.strptime(data[2], '%Y-%m-%d') runDateTime = dt.combine(runDate, runTime) if now < runDateTime: return str(runDateTime) elif not data[3]: return "" else: if runDateTime.replace(year = now.year) < now: return str(runDateTime.replace(year = now.year + 1)) else: return str(runDateTime.replace(year = now.year)) elif type == 1: # daily runDateTime = dt.combine(now.date(), runTime) if now.time() > runTime: runDateTime += td(days = 1) return str(runDateTime) elif type == 2: # weekly if not data[2]: return "" runDateTime = dt.combine(now.date(), runTime) weekdaysLower = [] weekdaysLarger = [] nowDay = now.weekday() for weekday in range(7): if 2**weekday & data[2]: if weekday < nowDay or (weekday == nowDay and now.time() > runTime): weekdaysLower.append(weekday) else: weekdaysLarger.append(weekday) if not data[4] and not data[3]: # without holiday check if len(weekdaysLarger) > 0: delta = weekdaysLarger[0] - nowDay return str(runDateTime + td(days = delta)) delta = 7 + weekdaysLower[0] - nowDay return str(runDateTime + td(days = delta)) elif data[4]: # holiday check found = False shift = 0 while True: for day in weekdaysLarger: delta = day + shift - nowDay tmpRunDT = runDateTime + td(days = delta) if tmpRunDT.weekday() > 4: # weekend found = True break else: # workday if (tmpRunDT.month, tmpRunDT.day) in self.holidays[0]: pass elif (tmpRunDT.year, tmpRunDT.month, tmpRunDT.day) in self.holidays[1]: pass else: found = True break if found: break shift += 7 for day in weekdaysLower: delta = day + shift - nowDay tmpRunDT = runDateTime + td(days = delta) if tmpRunDT.weekday() > 4: # weekend found = True break else: # workday if (tmpRunDT.month, tmpRunDT.day) in self.holidays[0]: pass elif (tmpRunDT.year, tmpRunDT.month, tmpRunDT.day) in self.holidays[1]: pass else: found = True break if found: break return str(tmpRunDT) else: # holiday_2 check if len(weekdaysLarger) > 0: Delta = weekdaysLarger[0] - nowDay else: Delta = 7 + weekdaysLower[0] - nowDay start = 0 if now.time() < runTime else 1 found = False for delta in range(start, Delta): tmpRunDT = runDateTime + td(days = delta) if tmpRunDT.weekday() < 5: if (tmpRunDT.month, tmpRunDT.day) in self.holidays[0]: found = True break elif (tmpRunDT.year, tmpRunDT.month, tmpRunDT.day) in self.holidays[1]: found = True break return str(tmpRunDT if found else runDateTime + td(days = Delta)) elif type == 3: # monthly/weekday if data[2] == 0 or data[3] == 0 or (data[4] + data[5]) == 0: return "" currMonth = now.month currYear = now.year monthsInt = data[4] + (data[5] << 6) months = [] for month in range(1,13): if 2 ** (month - 1) & monthsInt: months.append(month) if currMonth in months: runList = [] for ix in range(6): if 2 ** ix & data[2]: for weekday in range(7): if 2 ** weekday & data[3]: day = FindMonthDay(currYear, currMonth, weekday, ix) if day: runDateTime = dt.combine(dt(currYear, currMonth, day).date(), runTime) if now < runDateTime: runList.append(runDateTime) tmpRunDT = FindRunDateTime(runList, data[6]) if tmpRunDT: return str(tmpRunDT) lower = [] larger = [] for month in months: if month > currMonth: larger.append(month) else: #month <= currMonth: lower.append(month) year = currYear tmpRunDT = None while True: for month in larger: runList = [] for ix in range(6): if 2 ** ix & data[2]: for weekday in range(7): if 2 ** weekday & data[3]: day = FindMonthDay(year, month, weekday, ix) if day: runDateTime = dt.combine(dt(year, month, day).date(), runTime) runList.append(runDateTime) tmpRunDT = FindRunDateTime(runList, data[6]) if tmpRunDT: break if tmpRunDT: break year += 1 for month in lower: runList = [] for ix in range(6): if 2 ** ix & data[2]: for weekday in range(7): if 2 ** weekday & data[3]: day=FindMonthDay(year, month, weekday, ix) if day: runDateTime = dt.combine(dt(year, month, day).date(), runTime) runList.append(runDateTime) tmpRunDT = FindRunDateTime(runList, data[6]) if tmpRunDT: break if tmpRunDT: break return str(tmpRunDT) elif type == 4: #monthly/day if (data[2] + data[3] + data[4] + data[5]) == 0 or (data[6] + data[7]) == 0: return "" runList = [] currMonth = now.month currYear = now.year monthsInt = data[6] + (data[7] << 6) daysInt = data[2] + (data[3] << 8) + (data[4] << 16) + (data[5] << 24) days = [] for day in range(1, 32): if 2 ** (day - 1) & daysInt: days.append(day) months = [] for month in range(1, 13): if 2 ** (month - 1) & monthsInt: months.append(month) if currMonth in months: for day in days: if day > monthrange(currYear, currMonth)[1]: break runDateTime = dt.combine(dt(currYear, currMonth, day).date(), runTime) if now < runDateTime: runList.append(runDateTime) if len(runList) == 0: lower = [] larger = [] nextMonth = None for month in months: if month > currMonth: larger.append(month) else: #month<=currMonth: lower.append(month) if len(larger) > 0: nextYear = currYear for month in larger: for day in days: if day > monthrange(nextYear, month)[1]: break runDateTime = dt.combine(dt(nextYear, month, day).date(), runTime) runList.append(runDateTime) if len(runList) == 0 and len(lower) > 0: nextYear = currYear + 1 for month in lower: for day in days: if day > monthrange(nextYear, month)[1]: break runDateTime = dt.combine(dt(nextYear, month, day).date(), runTime) runList.append(runDateTime) if len(runList) > 0: return str(min(runList)) else: return "" else: #type == 5: #periodically runDate = dt.strptime(data[2], '%Y-%m-%d') runDateTime = dt.combine(runDate, runTime) if now < runDateTime: return str(runDateTime) elif data[4] == 0: #unit = hour period = data[3] * 3600 if period < 86400 and not 86400 % period: if now.time() > runTime: date = now.date() else: date = now.date() - td(days = 1) runDateTime = dt.combine(date, runTime) delta = now - runDateTime delta = delta.seconds + 86400 * delta.days share = delta / period share += 1 delta = td(seconds = share * period) return str(runDateTime + delta) elif data[4] == 1 or data[4] == 2: #unit = day or week period = data[3] if data[4] == 1 else 7 * data[3] delta = (now - runDateTime).days share = delta / period if not delta % period: if now.time() < runTime: return str(dt.combine(now.date(), runTime)) share += 1 delta = td(days = share * period) return str(runDateTime + delta) elif data[4] == 3: #unit = month period = data[3] month = runDateTime.month year = runDateTime.year while now > runDateTime: year += period / 12 m = month+period % 12 if m > 12: year += 1 month = m % 12 else: month = m runDateTime = runDateTime.replace(year = year).replace(month = month) return str(runDateTime) else: # data[4] == 6: #unit = year period = data[3] year = runDateTime.year while now > runDateTime: year += period runDateTime = runDateTime.replace(year = year) return str(runDateTime) def updateLogFile(self, line, blank = False): if not self.logfile: return f = openFile(self.logfile, encoding='utf-8', mode='a') if blank: f.write("\r\n") f.write("%s %s\r\n" % (str(dt.now())[:19], line)) f.close() def Execute(self, params, immed = False): next = self.NextRun(params[2], params[3]) modes = params[7] playRec = modes & 6 if playRec != 6: args = [u'%s\\RadioSure.exe' % self.RadioSurePath,] if playRec: args.append("/record") else: args.append("/play") if playRec == 4: args.append("/mute") if modes & 1: args.append("/hidden") args.append(u'/source="%s"' % params[5]) duration = 60*int(params[3][1][:2])+int(params[3][1][-2:]) if duration: args.append('/duration=%i' % duration) if params[6]: recfile = eg.ParseString(params[6]) try: recfile = eval(recfile) except: pass args.append(u'/filename="%s"' % recfile) elif playRec: args.append(u'/filename="%s"' % params[1]) Popen(args) if not immed and next: # new schedule, if valid next run time and not TEST/IMMEDIATELY run startTicks = mktime(strptime(next, "%Y-%m-%d %H:%M:%S")) eg.scheduler.AddTaskAbsolute(startTicks, self.RadioSureScheduleRun, params[1]) triggEvt = modes & 24 if triggEvt == 8: eg.TriggerEvent(self.text.launched, prefix = "RadioSure", payload = params[1]) elif triggEvt == 16: eg.TriggerEvent(self.text.launched, prefix = "RadioSure", payload = params) return (next, my_list2cmdline(args)) def RadioSureScheduleRun(self, schedule): data = self.data ix = [item[1] for item in data].index(schedule) next, cmdline = self.Execute(data[ix]) last = str(dt.now())[:19] self.data[ix][4] = last if self.dialog: tmpList = [item[1] for item in self.tmpData] if schedule in tmpList: ixTmp = tmpList.index(schedule) self.tmpData[ixTmp][4] = last self.dialog.RefreshGrid(ixTmp, last, next) nxt = next[:19] if next else self.text.none self.updateLogFile(self.text.execut % (data[ix][1], nxt)) self.updateLogFile(self.text.cmdLine % cmdline) def UpdateEGscheduler(self): data = self.data tmpList = [] sched_list = eg.scheduler.__dict__['heap'] for sched in sched_list: if sched[1] == self.RadioSureScheduleRun: if sched[2][0] in [item[1] for item in data]: tmpList.append(sched) else: self.updateLogFile(self.text.cancAndDel % sched[2][0]) eg.scheduler.CancelTask(sched) sched_list = tmpList for schedule in data: startMoment = self.NextRun(schedule[2], schedule[3]) if not startMoment: continue startTicks = mktime(strptime(startMoment,"%Y-%m-%d %H:%M:%S")) nameList = [item[2][0] for item in sched_list] if schedule[1] in nameList: sched = sched_list[nameList.index(schedule[1])] if not schedule[0]: # schedule is disabled ! eg.scheduler.CancelTask(sched) self.updateLogFile(self.text.cancAndDis % schedule[1]) elif sched[0] != startTicks: #Re-schedule self.updateLogFile(self.text.re_Sched % (schedule[1], startMoment)) eg.scheduler.CancelTask(sched) eg.scheduler.AddTaskAbsolute(startTicks, self.RadioSureScheduleRun, schedule[1]) elif schedule[0]: #New schedule eg.scheduler.AddTaskAbsolute(startTicks, self.RadioSureScheduleRun, schedule[1]) self.updateLogFile(self.text.newSched % (schedule[1], startMoment)) def dataToXml(self): impl = miniDom.getDOMImplementation() dom = impl.createDocument(None, u'Document', None) root = dom.documentElement commentNode = dom.createComment(self.text.xmlComment % str(dt.now())[:19]) dom.insertBefore(commentNode, root) for item in self.data: schedNode = dom.createElement(u'Schedule') schedNode.setAttribute(u'Name', unicode(item[1])) schedNode.setAttribute(u'Type', unicode(item[2])) enableNode = dom.createElement(u'Enable') enableText = dom.createTextNode(unicode(item[0])) enableNode.appendChild(enableText) schedNode.appendChild(enableNode) last_runNode = dom.createElement(u'Last_run') last_runText = dom.createTextNode(unicode(item[4])) last_runNode.appendChild(last_runText) schedNode.appendChild(last_runNode) sourceNode = dom.createElement(u'Source') sourceText = dom.createTextNode(unicode(item[5])) sourceNode.appendChild(sourceText) schedNode.appendChild(sourceNode) filenameNode = dom.createElement(u'Filename') filenameText = dom.createTextNode(unicode(item[6])) filenameNode.appendChild(filenameText) schedNode.appendChild(filenameNode) modesNode = dom.createElement(u'Modes') modesText = dom.createTextNode(unicode(item[7])) modesNode.appendChild(modesText) schedNode.appendChild(modesNode) dateTimeNode = dom.createElement(u'Datetime') start_timeNode = dom.createElement(u'Start_time') start_timeText = dom.createTextNode(unicode(item[3][0])) start_timeNode.appendChild(start_timeText) dateTimeNode.appendChild(start_timeNode) durationNode = dom.createElement(u'Duration') durationText = dom.createTextNode(unicode(item[3][1])) durationNode.appendChild(durationText) dateTimeNode.appendChild(durationNode) if item[2] == 0: dateNode = dom.createElement(u'Date') dateText = dom.createTextNode(unicode(item[3][2])) dateNode.appendChild(dateText) dateTimeNode.appendChild(dateNode) yearlyNode = dom.createElement(u'Yearly') yearlyText = dom.createTextNode(unicode(item[3][3])) yearlyNode.appendChild(yearlyText) dateTimeNode.appendChild(yearlyNode) if item[2] == 2: weekdayNode = dom.createElement(u'Weekday') weekdayText = dom.createTextNode(unicode(item[3][2])) weekdayNode.appendChild(weekdayText) dateTimeNode.appendChild(weekdayNode) holidayNode = dom.createElement(u'HolidayCheck') holidayText = dom.createTextNode(unicode(item[3][4])) holidayNode.appendChild(holidayText) dateTimeNode.appendChild(holidayNode) holiday2Node = dom.createElement(u'HolidayCheck_2') holiday2Text = dom.createTextNode(unicode(item[3][3])) holiday2Node.appendChild(holiday2Text) dateTimeNode.appendChild(holiday2Node) if item[2] == 3: orderNode = dom.createElement(u'Order') orderText = dom.createTextNode(unicode(item[3][2])) orderNode.appendChild(orderText) dateTimeNode.appendChild(orderNode) weekdayNode = dom.createElement(u'Weekday') weekdayText = dom.createTextNode(unicode(item[3][3])) weekdayNode.appendChild(weekdayText) dateTimeNode.appendChild(weekdayNode) first_halfNode = dom.createElement(u'First_half') first_halfText = dom.createTextNode(unicode(item[3][4])) first_halfNode.appendChild(first_halfText) dateTimeNode.appendChild(first_halfNode) second_halfNode = dom.createElement(u'Second_half') second_halfText = dom.createTextNode(unicode(item[3][5])) second_halfNode.appendChild(second_halfText) dateTimeNode.appendChild(second_halfNode) holidayNode = dom.createElement(u'HolidayCheck') holidayText = dom.createTextNode(unicode(item[3][6])) holidayNode.appendChild(holidayText) dateTimeNode.appendChild(holidayNode) if item[2] == 4: q_1_Node = dom.createElement(u'Q_1') q_1_Text = dom.createTextNode(unicode(item[3][2])) q_1_Node.appendChild(q_1_Text) dateTimeNode.appendChild(q_1_Node) q_2_Node = dom.createElement(u'Q_2') q_2_Text = dom.createTextNode(unicode(item[3][3])) q_2_Node.appendChild(q_2_Text) dateTimeNode.appendChild(q_2_Node) q_3_Node = dom.createElement(u'Q_3') q_3_Text = dom.createTextNode(unicode(item[3][4])) q_3_Node.appendChild(q_3_Text) dateTimeNode.appendChild(q_3_Node) q_4_Node = dom.createElement(u'Q_4') q_4_Text = dom.createTextNode(unicode(item[3][5])) q_4_Node.appendChild(q_4_Text) dateTimeNode.appendChild(q_4_Node) first_halfNode = dom.createElement(u'First_half') first_halfText = dom.createTextNode(unicode(item[3][6])) first_halfNode.appendChild(first_halfText) dateTimeNode.appendChild(first_halfNode) second_halfNode = dom.createElement(u'Second_half') second_halfText = dom.createTextNode(unicode(item[3][7])) second_halfNode.appendChild(second_halfText) dateTimeNode.appendChild(second_halfNode) if item[2] == 5: dateNode = dom.createElement(u'Date') dateText = dom.createTextNode(unicode(item[3][2])) dateNode.appendChild(dateText) dateTimeNode.appendChild(dateNode) numberNode = dom.createElement(u'Number') numberText = dom.createTextNode(unicode(item[3][3])) numberNode.appendChild(numberText) dateTimeNode.appendChild(numberNode) unitNode = dom.createElement(u'Unit') unitText = dom.createTextNode(unicode(item[3][4])) unitNode.appendChild(unitText) dateTimeNode.appendChild(unitNode) schedNode.appendChild(dateTimeNode) root.appendChild(schedNode) f = file(u'%s\\Scheduler.xml' % self.xmlpath, 'wb') writer = lookup('utf-8')[3](f) dom.writexml(writer, encoding = 'utf-8') f.close() return f.closed def xmlToData(self): data = [] xmlfile = u'%s\\Scheduler.xml' % self.xmlpath if not exists(xmlfile): return data xmldoc = miniDom.parse(xmlfile) document = xmldoc.getElementsByTagName('Document')[0] schedules = tuple(document.getElementsByTagName('Schedule')) for schedule in schedules: dataItem = [] enable = int(schedule.getElementsByTagName('Enable')[0].firstChild.data) dataItem.append(enable) name = schedule.attributes["Name"].value dataItem.append(name) schedType = int(schedule.attributes["Type"].value) dataItem.append(schedType) dateTime = schedule.getElementsByTagName('Datetime')[0] params = [] start_time = dateTime.getElementsByTagName('Start_time')[0].firstChild.data params.append(start_time) duration = dateTime.getElementsByTagName('Duration')[0].firstChild.data params.append(duration) if schedType == 0: date = dateTime.getElementsByTagName('Date')[0].firstChild.data params.append(date) date = int(dateTime.getElementsByTagName('Yearly')[0].firstChild.data) params.append(date) if schedType == 2: weekday = int(dateTime.getElementsByTagName('Weekday')[0].firstChild.data) params.append(weekday) holiday2 = int(dateTime.getElementsByTagName('HolidayCheck_2')[0].firstChild.data) params.append(holiday2) holiday = int(dateTime.getElementsByTagName('HolidayCheck')[0].firstChild.data) params.append(holiday) if schedType == 3: order = int(dateTime.getElementsByTagName('Order')[0].firstChild.data) params.append(order) weekday = int(dateTime.getElementsByTagName('Weekday')[0].firstChild.data) params.append(weekday) first_half = int(dateTime.getElementsByTagName('First_half')[0].firstChild.data) params.append(first_half) second_half = int(dateTime.getElementsByTagName('Second_half')[0].firstChild.data) params.append(second_half) holiday = int(dateTime.getElementsByTagName('HolidayCheck')[0].firstChild.data) params.append(holiday) if schedType == 4: q_1 = int(dateTime.getElementsByTagName('Q_1')[0].firstChild.data) params.append(q_1) q_2 = int(dateTime.getElementsByTagName('Q_2')[0].firstChild.data) params.append(q_2) q_3 = int(dateTime.getElementsByTagName('Q_3')[0].firstChild.data) params.append(q_3) q_4 = int(dateTime.getElementsByTagName('Q_4')[0].firstChild.data) params.append(q_4) first_half = int(dateTime.getElementsByTagName('First_half')[0].firstChild.data) params.append(first_half) second_half = int(dateTime.getElementsByTagName('Second_half')[0].firstChild.data) params.append(second_half) if schedType == 5: date = dateTime.getElementsByTagName('Date')[0].firstChild.data params.append(date) number = int(dateTime.getElementsByTagName('Number')[0].firstChild.data) params.append(number) unit = int(dateTime.getElementsByTagName('Unit')[0].firstChild.data) params.append(unit) dataItem.append(params) last_run = schedule.getElementsByTagName('Last_run')[0].firstChild last_run = last_run.data if last_run else " " dataItem.append(last_run) source = schedule.getElementsByTagName('Source')[0].firstChild source = source.data if source else "" dataItem.append(source) filename = schedule.getElementsByTagName('Filename')[0].firstChild filename = filename.data if filename else "" dataItem.append(filename) modes = schedule.getElementsByTagName('Modes')[0].firstChild.data dataItem.append(int(modes)) data.append(dataItem) return data def GetStatusRS(self, hwnds = None): hwnds = hwnds or HandleRS() maxFav = None recording = None if hwnds: for hwnd in hwnds: try: maxFav = SendMessageTimeout(hwnd, self.favMesg, 0, 0) recording = SendMessageTimeout(hwnd, self.recMesg, 0, 0) except: #raise pass if maxFav is not None and recording is not None: #pass break if maxFav is not None and recording is not None: return (maxFav, recording) else: return (None, None) def GetNewHwnd(self, oldHwnds = [], src = None, hid = False, mut = False): hwnds = HandleRS() if len(hwnds) > 0 and self.GetOneInstance(): wx.CallAfter( MessageBox, None, self.text.message3, self.text.messBoxTit1, wx.ICON_EXCLAMATION, 15, plugin = self, ) return [] maxInst = 2 if self.maxFav == 30 else 10 if len(oldHwnds) >= maxInst: wx.CallAfter( MessageBox, None, self.text.message2 % maxInst, self.text.messBoxTit1, wx.ICON_EXCLAMATION, 15, plugin = self, ) return [] i = 0 hwnds = oldHwnds if oldHwnds else [] rs = u'%s\\RadioSure.exe' % self.RadioSurePath rs = rs.encode(FSE) if isinstance(rs, unicode) else rs args = [rs, "/play"] if mut: args.append("/mute") if hid: args.append("/hidden") if src: args.append(u'/source="%s"' % src) if isfile(rs): Popen(args) while i < 100 and hwnds == oldHwnds: i += 1 hwnds = HandleRS() sleep(1.5) return list(set(hwnds)-set(oldHwnds)) def SetMaxFavs(self): maxFav = 30 hwnds = HandleRS() if hwnds: maxFav, rec = self.GetStatusRS(hwnds) if not maxFav: # ToDo: kill process ??? hwnds = self.GetNewHwnd(hwnds, hid = True, mut = True) if hwnds: maxFav, rec = self.GetStatusRS(hwnds) PostMessage(hwnds[0], WM_COMMAND, 1, 0) # Close else: hwnds = self.GetNewHwnd(hid = True, mut = True) if hwnds: maxFav, rec = self.GetStatusRS(hwnds) PostMessage(hwnds[0], WM_COMMAND, 1, 0) # Close self.maxFav = maxFav def __init__(self): self.observThread = None text=Text self.AddActionsFromList(ACTIONS) def GetLabel( self, path = None, xmlpath = None, logfile = None, holidays = [[], []], first_day = 0, ): if not self.submenus: self.RadioSurePath = path self.xmlpath = xmlpath self.submenus = self.GetSubmenuStrings() return self.name def __start__( self, path = None, xmlpath = None, logfile = None, holidays = [[], []], first_day = 0, ): self.recMesg = RegisterWindowMessage("WM_RADIOSURE_GET_RECORDING_STATUS") self.favMesg = RegisterWindowMessage("WM_RADIOSURE_GET_MAX_FAVORITES") if not self.submenus: self.submenus = self.GetSubmenuStrings() self.RadioSurePath = path self.xmlpath = xmlpath wx.CallAfter(self.SetMaxFavs) self.logfile = logfile self.holidays = holidays self.first_day = first_day self.data = [] self.tmpData = [] if self.xmlpath: if exists(self.xmlpath): self.data = self.xmlToData() if logfile: self.updateLogFile(self.text.start, True) self.UpdateEGscheduler() def __stop__(self): if self.dataToXml(): self.updateLogFile("File Scheduler.xml saved") if self.observThread: ot = self.observThread if ot.isAlive(): ot.AbortObservation() del self.observThread self.observThread = None sched_list = eg.scheduler.__dict__['heap'] tmpLst = [] for sched in sched_list: if sched[1] == self.RadioSureScheduleRun: tmpLst.append(sched) if len(tmpLst) > 0: self.updateLogFile(self.text.stop) for sched in tmpLst: eg.scheduler.CancelTask(sched) self.updateLogFile(self.text.canc % sched[2][0]) if self.dialog: self.dialog.Close() if self.manager: self.manager.Close() def __close__(self): if self.observThread: ot = self.observThread if ot.isAlive(): ot.AbortObservation() def Configure( self, path = "", xmlpath = "", logfile = None, holidays = [[], []], first_day = 0, ): panel = eg.ConfigPanel(self) panel.holidays = cpy(holidays) del holidays managerButton = wx.Button(panel, -1, self.text.managerButton) if not path: #First run after plugin insert managerButton.Enable(False) self.RadioSurePath = path self.xmlpath = xmlpath self.logfile = logfile self.first_day = first_day label1Text = wx.StaticText(panel, -1, self.text.label1) rsPathCtrl = MyDirBrowseButton( panel, toolTip = self.text.toolTipFolder, dialogTitle = self.text.browseTitle, buttonText = eg.text.General.browse ) rsPathCtrl.GetTextCtrl().SetEditable(False) label2Text = wx.StaticText(panel, -1, self.text.label2) xmlPathCtrl = MyDirBrowseButton( panel, toolTip = self.text.toolTipFolder, dialogTitle = self.text.browseTitle, buttonText = eg.text.General.browse ) xmlPathCtrl.GetTextCtrl().SetEditable(False) logFileCtrl = MyFileBrowseButton( panel, toolTip = self.text.toolTipFile, dialogTitle = self.text.browseFile, buttonText = eg.text.General.browse ) logFileCtrl.GetTextCtrl().SetEditable(False) logCheckBox = wx.CheckBox(panel, -1, self.text.logLabel) if not self.RadioSurePath or not exists(self.RadioSurePath): RSpath = getPathFromReg() #Try get path from registry if RSpath: #Regular installation if exists(RSpath): self.RadioSurePath = RSpath else: #Portable installation self.RadioSurePath = u"%s\\RadioSure" % unicode(eg.folderPath.LocalAppData) xmlPath = u"%s\\RadioSure" % unicode(eg.folderPath.LocalAppData) if exists(xmlPath): self.xmlpath = xmlPath if exists(join(self.RadioSurePath, "RadioSure.exe")): rsPathCtrl.GetTextCtrl().ChangeValue(self.RadioSurePath) rsPathCtrl.Enable(False) label1Text.Enable(False) if exists(join(self.xmlpath, "RadioSure.xml")): xmlPathCtrl.GetTextCtrl().ChangeValue(self.xmlpath) xmlPathCtrl.Enable(False) label2Text.Enable(False) def NotHidden(): try: nssh = OpenKey( HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Advanced" ) res = QueryValueEx(nssh, "Hidden")[0] != 2 CloseKey(nssh) except: res = False return res def NotHiddenAttr(path): attr = GetFileAttributes(path) if attr & (FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM): return False else: p = split(path)[0] if len(p) > 3: return NotHiddenAttr(p) return True if self.logfile is None: logCheckBox.SetValue(True) if NotHiddenAttr(self.xmlpath) or NotHidden(): self.logfile = u'%s\\RadioSureSchedulerLog.txt' % self.xmlpath else: self.logfile = u'%s\\RadioSureSchedulerLog.txt' % unicode(eg.folderPath.Documents) else: val = self.logfile != "" logCheckBox.SetValue(val) logFileCtrl.Enable(val) logFileCtrl.GetTextCtrl().ChangeValue(self.logfile) rsPathCtrl.startDirectory = self.RadioSurePath xmlPathCtrl.startDirectory = self.xmlpath logFileCtrl.startDirectory = self.logfile or u"%s\\RadioSure" % unicode(eg.folderPath.LocalAppData) sizerAdd = panel.sizer.Add sizerAdd(label1Text, 0) sizerAdd(rsPathCtrl,0,wx.TOP|wx.EXPAND,2) sizerAdd(label2Text, 0, wx.TOP,15) sizerAdd(xmlPathCtrl,0,wx.TOP|wx.EXPAND,2) sizerAdd(logCheckBox, 0, wx.TOP,15) sizerAdd(logFileCtrl, 0, wx.TOP|wx.EXPAND,2) firstDayLabel = wx.StaticText(panel, -1, self.text.first_day) firstDayCtrl = wx.Choice( panel, -1, choices = ( day_name[0], day_name[6] ), size = (firstDayLabel.GetSize()[0], -1) ) firstDayCtrl.SetSelection(first_day) panel.holidButton = wx.Button(panel, -1, self.text.holidButton) def OnApplyBtn(evt): managerButton.Enable(True) evt.Skip() panel.dialog.buttonRow.applyButton.Bind(wx.EVT_BUTTON, OnApplyBtn) def onManagerButton(evt): if not self.dialog: wx.CallAfter(SchedulerDialog, self.text.OpenScheduler, self) else: if self.dialog.GetPosition() == (-32000, -32000): ShowWindow(self.dialog.GetHandle(), SW_RESTORE) wx.CallAfter(self.dialog.Raise) evt.Skip() managerButton.Bind(wx.EVT_BUTTON, onManagerButton) def onHolidButton(evt): dlg = HolidaysFrame( parent = panel, plugin = self, ) dlg.Centre() wx.CallAfter(dlg.ShowHolidaysFrame) evt.Skip() panel.holidButton.Bind(wx.EVT_BUTTON, onHolidButton) bottomSizer = wx.GridBagSizer(1, 1) bottomSizer.AddGrowableCol(1,1) bottomSizer.AddGrowableCol(3,1) bottomSizer.Add(firstDayLabel, (0, 0), flag = wx.LEFT) bottomSizer.Add(firstDayCtrl, (1, 0), flag = wx.LEFT) bottomSizer.Add((1, -1), (1, 1), flag = wx.EXPAND) bottomSizer.Add((1, -1), (1, 3), flag = wx.EXPAND) bottomSizer.Add(panel.holidButton, (1, 2)) bottomSizer.Add(managerButton, (1, 4), flag = wx.RIGHT) sizerAdd(bottomSizer, 1, wx.TOP | wx.EXPAND, 15) def Validation(): flag1 = "%s\\RadioSure.exe" % exists(rsPathCtrl.GetValue()) flag2 = "%s\\RadioSure.xml" % exists(xmlPathCtrl.GetValue()) flag3 = logCheckBox.IsChecked() and logFileCtrl.GetValue() != "" or not logCheckBox.IsChecked() flag = flag1 and flag2 and flag3 panel.dialog.buttonRow.okButton.Enable(flag) panel.isDirty = True panel.dialog.buttonRow.applyButton.Enable(flag) def OnPathChange(event): path = rsPathCtrl.GetValue() if not exists("%s\\RadioSure.exe" % path): MessageBox( panel, self.text.boxMessage1 % 'RadioSure.exe', self.text.boxTitle % path, wx.ICON_EXCLAMATION, plugin = self ) if path != "": rsPathCtrl.startDirectory = path self.RadioSurePath = path Validation() event.Skip() rsPathCtrl.Bind(wx.EVT_TEXT, OnPathChange) def OnPath2Change(event): path2 = xmlPathCtrl.GetValue() if not exists("%s\\RadioSure.xml" % path2): MessageBox( panel, self.text.boxMessage1 % 'RadioSure.xml', self.text.boxTitle % path2, wx.ICON_EXCLAMATION, plugin = self ) if path2 != "": self.xmlpath = path2 xmlPathCtrl.startDirectory = path2 Validation() event.Skip() xmlPathCtrl.Bind(wx.EVT_TEXT, OnPath2Change) def logFileChange(event): self.logfile = logFileCtrl.GetValue() tmpVal = self.logfile if not tmpVal: tmpPath = u"%s\\RadioSure" % unicode(eg.folderPath.LocalAppData) tmpVal = tmpPath if exists(tmpPath) else self.RadioSurePath logFileCtrl.startDirectory = tmpVal Validation() event.Skip() logFileCtrl.Bind(wx.EVT_TEXT, logFileChange) def onLogCheckBox(evt): val = evt.IsChecked() logFileCtrl.Enable(val) if not val: logFileCtrl.SetValue("") else: Validation() evt.Skip() logCheckBox.Bind(wx.EVT_CHECKBOX, onLogCheckBox) while panel.Affirmed(): panel.SetResult( rsPathCtrl.GetValue(), xmlPathCtrl.GetValue(), logFileCtrl.GetValue(), panel.holidays, firstDayCtrl.GetSelection(), ) #=============================================================================== #cls types for Actions list: #=============================================================================== class Run(eg.ActionBase): class text: play = "Automatically play selected favorite after start" default = "Use start settings RadioSure" label = "Select favorite:" over = "Too large number (%s > %s) !" alr_run = "RadioSure is already running !" def __call__(self, play = False, fav = 1): def Play(hwnds): self.plugin.RefreshVariables() if fav <= len(self.plugin.Favorites): if play: SendMessage(hwnds[0], WM_COMMAND, 4101+fav, 0) return str(fav)+": "+self.plugin.Favorites[self.plugin.FavIx][1] else: return self.text.over % (str(fav),\ str(len(self.plugin.Favorites))) hwnds = HandleRS() if not hwnds: hwnds = self.plugin.GetNewHwnd() if hwnds: return Play(hwnds) else: self.PrintError(self.plugin.text.text1) return self.plugin.text.text1 elif play: for hwnd in hwnds: x, rec = self.plugin.GetStatusRS([hwnd]) if rec != 1: SendMessage(hwnd, WM_COMMAND, 4101+fav, 0) break if rec or rec is None: hwnds = self.plugin.GetNewHwnd(hwnds) if hwnds: return Play(hwnds) else: self.PrintError(self.text.alr_run) return self.text.alr_run def GetLabel(self, play, fav): num = str(fav) if play else '' return "%s: %s" % (self.name, num) def Configure(self, play = False, fav = 1): panel=eg.ConfigPanel(self) sizerAdd=panel.sizer.Add rb1 = panel.RadioButton(play, self.text.play, style=wx.RB_GROUP) rb2 = panel.RadioButton(not play, self.text.default) sizerAdd(rb1,0,wx.TOP,15) sizerAdd(rb2,0,wx.TOP,6) favLbl=wx.StaticText(panel, -1, self.text.label) sizerAdd(favLbl,0,wx.TOP,25) favCtrl = eg.SpinNumCtrl( panel, -1, fav, fractionWidth=0, min=1, max=30, ) favCtrl.SetValue(fav) sizerAdd(favCtrl,0,wx.TOP,5) def onChangeMode(evt=None): enbl=rb1.GetValue() favLbl.Enable(enbl) favCtrl.Enable(enbl) if evt is not None: evt.Skip() rb1.Bind(wx.EVT_RADIOBUTTON, onChangeMode) rb2.Bind(wx.EVT_RADIOBUTTON, onChangeMode) onChangeMode() while panel.Affirmed(): panel.SetResult( rb1.GetValue(), favCtrl.GetValue() ) #=============================================================================== class WindowControl(eg.ActionBase): def __call__(self): hwnd = HandleRS() if hwnd: SendMessage(hwnd[0], WM_SYSCOMMAND, self.value, 0) else: self.PrintError(self.plugin.text.text1) return self.plugin.text.text1 #=============================================================================== class SendMessageActions(eg.ActionBase): def __call__(self): hwnd = HandleRS() if hwnd: SendMessage(hwnd[0], WM_COMMAND, self.value, 0) else: self.PrintError(self.plugin.text.text1) return self.plugin.text.text1 #=============================================================================== class CheckAndChange(eg.ActionBase): def __call__(self): hwnd = HandleRS() if hwnd: status = self.plugin.GetRS_Status(hwnd[0]) if status[self.value[0]] == self.value[1]: SendMessage(hwnd[0], WM_COMMAND, self.value[2], 0) #=============================================================================== class GetStatus(eg.ActionBase): def __call__(self): hwnd = HandleRS() if hwnd: status = self.plugin.GetRS_Status(hwnd[0]) return status[self.value] #=============================================================================== class GetMenuItem(eg.ActionBase): def __call__(self): hwnd = HandleRS() if hwnd: return self.plugin.GetMenuItem(hwnd[0], self.value) #=============================================================================== class SetVolume(eg.ActionBase): class text: label=["Set volume (0 - 100%):", "Set step (1 - 25%):", "Set step (1 - 25%):"] def __call__(self, step = None): if step is None: if self.value == 0: step = 50 else: step = 5 hwnd = GetCtrlByID(1006) #1006 = ID of ctrl "msctls_trackbar32" if hwnd: vol = SendMessage(hwnd, TBM_GETPOS, 0, 0) key = None value = None if self.value == 0: volume = step elif self.value == 1: volume = vol+step if (vol+step)<100 else 100 else: volume = vol-step if (vol-step)>0 else 0 if vol>volume: key='{Left}' if vol>volume+1: value = volume+1 elif vol<volume: key='{Right}' if vol<volume-1: value = volume-1 if value: SendMessage(hwnd, TBM_SETPOS,1,value) if key: eg.SendKeys(hwnd, key, False) return SendMessage(hwnd, TBM_GETPOS, 0, 0) else: self.PrintError(self.plugin.text.text1) return self.plugin.text.text1 def Configure(self, step = None): if step is None: if self.value == 0: step = 50 else: step = 5 panel=eg.ConfigPanel(self) panel.sizer.Add(wx.StaticText(panel, -1, self.text.label[self.value])) if self.value == 0: Min = 0 Max = 100 else: Min = 1 Max = 25 volumeCtrl = eg.SpinNumCtrl( panel, -1, step, fractionWidth=0, increment=1, min=Min, max=Max, ) volumeCtrl.SetValue(step) panel.sizer.Add(volumeCtrl,0,wx.TOP,10) while panel.Affirmed(): panel.SetResult(volumeCtrl.GetValue()) #=============================================================================== class GetVolume(eg.ActionBase): def __call__(self): hwnd = GetCtrlByID(1006) #1006 = ID for ctrl "msctls_trackbar32" if hwnd: return SendMessage(hwnd, TBM_GETPOS, 0, 0) else: self.PrintError(self.plugin.text.text1) return self.plugin.text.text1 #=============================================================================== class SelectFav(eg.ActionBase): class text: label = "Select preset number (1-30):" txtLabel = 'Preset number:' over = "Too large number (%s > %s) !" modeLabel = 'Preset number to get as:' modeChoices = [ 'Event payload', 'Python expression', 'Number' ] def __call__(self,fav = 1, mode = 0, number = '{eg.event.payload}'): hwnd = HandleRS() if hwnd: if mode == 2: indx = fav else: indx = int(eg.ParseString(number)) self.plugin.RefreshVariables() if indx <= len(self.plugin.Favorites): SendMessage(hwnd[0], WM_COMMAND, 4101+indx, 0) return str(indx)+": "+self.plugin.Favorites[indx-1][1] else: self.PrintError( self.text.over % (str(indx),str(len(self.plugin.Favorites)))) return self.text.over % (str(indx),str(len(self.plugin.Favorites))) else: self.PrintError(self.plugin.text.text1) return self.plugin.text.text1 def GetLabel(self, fav, mode, number): return "%s %s" % (self.text.txtLabel, str(fav) if mode == 2 else number) def Configure(self, fav = 1, mode = 0, number = '{eg.event.payload}'): self.number = number panel = eg.ConfigPanel(self) radioBoxMode = wx.RadioBox( panel, -1, self.text.modeLabel, choices = self.text.modeChoices, style=wx.RA_SPECIFY_ROWS ) radioBoxMode.SetSelection(mode) txtBoxLabel = wx.StaticText(panel, -1, self.text.txtLabel) numberCtrl = wx.TextCtrl(panel,-1,self.number) spinLabel = wx.StaticText(panel, -1, self.text.label) favCtrl = eg.SpinNumCtrl( panel, -1, fav, fractionWidth=0, min=1, max=30, ) favCtrl.SetValue(fav) panel.sizer.Add(radioBoxMode, 0, wx.TOP,0) panel.sizer.Add(txtBoxLabel,0,wx.TOP,10) panel.sizer.Add(numberCtrl,0,wx.TOP,5) panel.sizer.Add(spinLabel,0,wx.TOP,10) panel.sizer.Add(favCtrl,0,wx.TOP,5) def onRadioBox(event = None): sel = radioBoxMode.GetSelection() txtBoxLabel.Enable(False) numberCtrl.Enable(False) spinLabel.Enable(False) favCtrl.Enable(False) if sel == 0: self.number = '{eg.event.payload}' elif sel == 1: txtBoxLabel.Enable(True) numberCtrl.Enable(True) else: self.number = favCtrl.GetValue() spinLabel.Enable(True) favCtrl.Enable(True) numberCtrl.ChangeValue(str(self.number)) if event: event.Skip() radioBoxMode.Bind(wx.EVT_RADIOBOX, onRadioBox) onRadioBox() def onSpin(event): numberCtrl.ChangeValue(str(favCtrl.GetValue())) event.Skip() favCtrl.Bind(wx.EVT_TEXT, onSpin) while panel.Affirmed(): panel.SetResult( favCtrl.GetValue(), radioBoxMode.GetSelection(), numberCtrl.GetValue()) #=============================================================================== class NextPrevFav(eg.ActionBase): def __call__(self): hwnd = HandleRS() if hwnd: self.plugin.RefreshVariables() #ix = self.plugin.FavIx ix = self.plugin.GetMenuItem(hwnd[0], 7)[0] if self.value == 1 and ix == len(self.plugin.Favorites) - 1 : ix = -1 elif self.value == -1 and ix == 0: ix = len(self.plugin.Favorites) SendMessage(hwnd[0], WM_COMMAND, 4102+ix+self.value, 0) return (str(ix+self.value+1)+": "+self.plugin.Favorites[ix+self.value][1]) else: self.PrintError(self.plugin.text.text1) return self.plugin.text.text1 #=============================================================================== class RandomFav(eg.ActionBase): def __call__(self): hwnd = HandleRS() if hwnd: self.plugin.RefreshVariables() ix = self.plugin.GetMenuItem(hwnd[0], 7)[0] lng = len(self.plugin.Favorites) if lng > 1: newIx = randrange(lng) while newIx == ix: newIx = randrange(lng) SendMessage(hwnd[0], WM_COMMAND, 4102+newIx, 0) return (str(newIx+1)+": "+self.plugin.Favorites[newIx][1]) else: self.PrintError(self.plugin.text.text1) return self.plugin.text.text1 #=============================================================================== class GetPlayingTitle(eg.ActionBase): def __call__(self): hwnd = HandleRS() if hwnd: return GetWindowText(hwnd[0]) else: self.PrintError(self.plugin.text.text1) return self.plugin.text.text1 #=============================================================================== class StartTitlebarObservation(eg.ActionBase): class text: intervalLabel = "Refresh interval (s):" label = "Event suffix:" timeStamp = "Insert timestamp" def __call__( self, period = 1.0, evtName ="titlebar", ): if self.plugin.observThread: ot = self.plugin.observThread if ot.isAlive(): ot.AbortObservation() del self.plugin.observThread ot = ObservationThread( period, evtName, ) ot.start() self.plugin.observThread = ot def Configure( self, period = 1.0, evtName = "titlebar", ): panel = eg.ConfigPanel() periodNumCtrl = eg.SpinNumCtrl( panel, -1, period, integerWidth = 5, fractionWidth = 1, allowNegative = False, min = 0.1, increment = 0.1, ) intervalLbl = wx.StaticText(panel, -1, self.text.intervalLabel) textLabel = wx.StaticText(panel, -1, self.text.label) textControl = wx.TextCtrl(panel, -1, evtName, size = (200,-1)) AddCtrl = panel.sizer.Add AddCtrl(intervalLbl, 0, wx.TOP, 20) AddCtrl(periodNumCtrl, 0, wx.TOP, 3) AddCtrl(textLabel, 0, wx.TOP, 20) AddCtrl(textControl, 0, wx.TOP, 3) textLabel.SetFocus() while panel.Affirmed(): panel.SetResult( periodNumCtrl.GetValue(), textControl.GetValue(), ) #=============================================================================== class StopTitlebarObservation(eg.ActionBase): def __call__(self): if self.plugin.observThread: ot = self.plugin.observThread if ot.isAlive(): ot.AbortObservation() del self.plugin.observThread self.plugin.observThread = None #=============================================================================== class OpenManager(eg.ActionBase): def __call__(self): if not self.plugin.manager: wx.CallAfter(ManagerDialog, self.text, self.plugin) else: if self.plugin.manager.GetPosition() == (-32000, -32000): ShowWindow(self.plugin.manager.GetHandle(), SW_RESTORE) wx.CallAfter(self.plugin.manager.Raise) #=============================================================================== class HideManager(eg.ActionBase): def __call__(self): if self.plugin.manager: wx.CallAfter(self.plugin.manager.Close) #=============================================================================== class OpenScheduler(eg.ActionBase): def __call__(self): if not self.plugin.dialog: wx.CallAfter(SchedulerDialog, self.text, self.plugin) else: if self.plugin.dialog.GetPosition() == (-32000, -32000): ShowWindow(self.plugin.dialog.GetHandle(), SW_RESTORE) wx.CallAfter(self.plugin.dialog.Raise) #=============================================================================== class HideScheduler(eg.ActionBase): def __call__(self): if self.plugin.dialog: wx.CallAfter(self.plugin.dialog.Close) #=============================================================================== class EnableSchedule(eg.ActionBase): class text: scheduleTitle = "Schedule title:" notFound = 'Can not find schedule "%s" !' def __call__(self, schedule=""): schedule = eg.ParseString(schedule) xmlfile = u'%s\\Scheduler.xml' % self.plugin.xmlpath if not exists(xmlfile): return data = self.plugin.data tmpLst = [item[1] for item in data] if schedule in tmpLst: ix = tmpLst.index(schedule) if self.value > -1: data[ix][0] = self.value self.plugin.dataToXml() self.plugin.UpdateEGscheduler() if self.plugin.dialog: wx.CallAfter(self.plugin.dialog.EnableSchedule, schedule, self.value) return data[tmpLst.index(schedule)] else: return self.text.notFound % schedule def Configure(self, schedule=""): panel = eg.ConfigPanel() xmlfile = u'%s\\Scheduler.xml' % self.plugin.xmlpath if not exists(xmlfile): return data = self.plugin.xmlToData() choices = [item[1] for item in data] textControl = wx.ComboBox(panel, -1, schedule, size = (300,-1), choices = choices) panel.sizer.Add(wx.StaticText(panel,-1,self.text.scheduleTitle), 0,wx.LEFT|wx.TOP, 10) panel.sizer.Add(textControl, 0, wx.LEFT, 10) while panel.Affirmed(): panel.SetResult(textControl.GetValue()) #=============================================================================== class EnableAll(eg.ActionBase): def __call__(self): xmlfile = u'%s\\Scheduler.xml' % self.plugin.xmlpath if not exists(xmlfile): return data = self.plugin.data for schedule in data: schedule[0] = self.value self.plugin.dataToXml() self.plugin.UpdateEGscheduler() if self.plugin.dialog: wx.CallAfter(self.plugin.dialog.EnableAll, self.value) #=============================================================================== class DeleteSchedule(eg.ActionBase): class text: scheduleTitle = "Schedule title:" def __call__(self, schedule=""): schedule = eg.ParseString(schedule) xmlfile = u'%s\\Scheduler.xml' % self.plugin.xmlpath if not exists(xmlfile): return data = self.plugin.data tmpLst = [item[1] for item in data] if schedule in tmpLst: ix = tmpLst.index(schedule) data.pop(ix) self.plugin.dataToXml() self.plugin.UpdateEGscheduler() if self.plugin.dialog: wx.CallAfter(self.plugin.dialog.DeleteSchedule, schedule) def Configure(self, schedule=""): panel = eg.ConfigPanel() xmlfile = u'%s\\Scheduler.xml' % self.plugin.xmlpath if not exists(xmlfile): return data = self.plugin.xmlToData() choices = [item[1] for item in data] textControl = wx.ComboBox(panel, -1, schedule, size = (300,-1), choices = choices) panel.sizer.Add(wx.StaticText(panel,-1,self.text.scheduleTitle), 0,wx.LEFT|wx.TOP, 10) panel.sizer.Add(textControl, 0, wx.LEFT, 10) while panel.Affirmed(): panel.SetResult(textControl.GetValue()) #=============================================================================== class RunScheduleImmediately(eg.ActionBase): class text: scheduleTitle = "Schedule title:" notFound = 'Can not find schedule "%s" !' immedRun = 'Schedule "%s" - IMMEDIATELY execution. Possible next time: %s' def __call__(self, schedule=""): schedule = eg.ParseString(schedule) data = self.plugin.data tmpLst = [item[1] for item in data] if schedule in tmpLst: ix = tmpLst.index(schedule) sched = self.plugin.data[ix] if sched[0]: for sch in eg.scheduler.__dict__['heap']: if sch[1] == self.plugin.RadioSureScheduleRun: if sch[2][0] == sched[1]: eg.scheduler.CancelTask(sch) self.plugin.updateLogFile(self.plugin.text.canc % sch[2][0]) break next, cmdline = self.plugin.Execute(sched, True) next = next[:19] if next else self.plugin.text.none self.plugin.updateLogFile(self.text.immedRun % (sched[1], next)) self.plugin.updateLogFile(self.plugin.text.cmdLine % cmdline) else: self.PrintError(self.text.notFound % schedule) return self.text.notFound % schedule def Configure(self, schedule = ""): panel = eg.ConfigPanel() data = self.plugin.data choices = [item[1] for item in data] textControl = wx.ComboBox(panel, -1, schedule, size = (300, -1), choices = choices) panel.sizer.Add(wx.StaticText(panel, -1, self.text.scheduleTitle), 0, wx.LEFT | wx.TOP, 10) panel.sizer.Add(textControl, 0, wx.LEFT, 10) while panel.Affirmed(): panel.SetResult(textControl.GetValue()) #=============================================================================== class AddSchedule(eg.ActionBase): class text: python_expr = "Python expression:" descr = u'''<rst>**Add schedule**. In the edit box, enter a python expression with the parameters of the plan. This may be for example *eg.result*, *eg.event.payload* or the entire list (in the same format, what you get as a result of actions **"GetSchedule"**). This action works in two ways (depending on the title of the schedule): 1. If the schedule with the same title already exists, its parameters are overwritten by new ones. 2. If the title does not exist yet, the schedule is added to the list as new.''' def __call__(self, expr = ""): schedule = eg.ParseString(expr) schedule = eval(schedule) if len(schedule) == 8 and isinstance(schedule[1], unicode): data = self.plugin.data tmpLst = [item[1] for item in data] if schedule[1] in tmpLst: data[tmpLst.index(schedule[1])] = schedule else: data.append(schedule) self.plugin.UpdateEGscheduler() if self.plugin.dialog: wx.CallAfter(self.plugin.dialog.AddSchedule, schedule) def Configure(self, expr=""): panel = eg.ConfigPanel(resizable=True) textControl = wx.TextCtrl(panel, -1, expr, size = (300,-1), style = wx.TE_MULTILINE ) panel.sizer.Add(wx.StaticText(panel,-1,self.text.python_expr), 0,wx.LEFT|wx.TOP, 10) panel.sizer.Add(textControl, 1, wx.LEFT|wx.RIGHT|wx.BOTTOM|wx.EXPAND, 10) while panel.Affirmed(): panel.SetResult(textControl.GetValue()) #=============================================================================== class ShowMenu(eg.ActionClass): name = "Show Radio Sure menu" description = "Show Radio Sure menu." panel = None class text: OSELabel = 'Menu show on:' menuPreview = 'RS On Screen Menu preview:' menuFont = 'Font:' txtColour = 'Text colour' background = 'Background colour' txtColourSel = 'Selected text colour' backgroundSel = 'Selected background colour' dialog = "Events ..." btnToolTip = """Press this button to assign events to control the menu !!!""" evtAssignTitle = "Menu control - events assignement" events = ( "Cursor up:", "Cursor down:", "Back from the (sub)menu:", "Submenu, or select an item:", "Cancel (Escape):", ) inverted = "Use inverted colours" submenuLbl = "Show main menu or submenu:" def __call__( self, fore, back, fontInfo = TAHOMA_INFO, monitor = 0, foreSel = (180, 180, 180), backSel = (75, 75, 75), evtList = [], inverted = True, submenu = 0 ): hwnd = HandleRS() if hwnd: if not self.plugin.menuDlg: self.plugin.menuDlg = Menu() self.event = CreateEvent(None, 0, 0, None) wx.CallAfter(self.plugin.menuDlg.ShowMenu, fore, back, foreSel, backSel, fontInfo, False, self.plugin, self.event, monitor, hwnd[0], evtList, (0, 7, 8, 9, 10, 11, 12, 14)[submenu], ) eg.actionThread.WaitOnEvent(self.event) def GetLabel( self, fore, back, fontInfo, monitor, foreSel, backSel, evtList, inverted, submenu = 0 ): return "%s: %s" % (self.name, self.plugin.submenus[submenu]) def Configure( self, fore = (75, 75, 75), back = (180, 180, 180), fontInfo = TAHOMA_INFO, monitor = 0, foreSel = (180, 180, 180), backSel = (75, 75, 75), evtList = [[],[],[],[],[]], inverted = True, submenu = 0 ): self.fontInfo = fontInfo self.fore = fore self.back = back self.foreSel = foreSel self.backSel = backSel self.oldSel=0 self.inverted = inverted global panel panel = eg.ConfigPanel(self) panel.evtList = cpy(evtList) previewLbl=wx.StaticText(panel, -1, self.text.menuPreview) listBoxCtrl = MenuGrid(panel, 3) items = (("Blabla_1",0,True,804), ("Blabla_2",1,False,804), ("Blabla_3",2,False,-1),) listBoxCtrl.Set(items) listBoxCtrl.SetBackgroundColour(self.back) listBoxCtrl.SetForegroundColour(self.fore) listBoxCtrl.SetSelectionBackground(self.backSel) listBoxCtrl.SetSelectionForeground(self.foreSel) #Font button fontLbl=wx.StaticText(panel, -1, self.text.menuFont) fontButton = ExtFontSelectButton(panel, value = fontInfo) font = wx.FontFromNativeInfoString(fontInfo) for n in range(10,20): font.SetPointSize(n) fontButton.SetFont(font) hght = fontButton.GetTextExtent('X')[1] if hght > 20: break listBoxCtrl.SetDefaultCellFont(font) arial = wx.FontFromNativeInfoString(ARIAL_INFO) fontButton.SetFont(font) for n in range(1,1000): arial.SetPointSize(n) fontButton.SetFont(arial) h = fontButton.GetTextExtent(u"\u25a0")[1] if h > hght: break arial.SetPointSize(2*n/3) fontButton.SetFont(arial) w0 = 2 * fontButton.GetTextExtent(u"\u25a0")[0] attr = gridlib.GridCellAttr() attr.SetFont(arial) attr.SetAlignment(wx.ALIGN_CENTRE, wx.ALIGN_CENTRE) listBoxCtrl.SetColAttr(0,attr) for n in range(1,1000): arial.SetPointSize(n) fontButton.SetFont(arial) h = fontButton.GetTextExtent(u"\u25ba")[1] if h > hght: break arial.SetPointSize(n/2) fontButton.SetFont(arial) w2 = 2 * fontButton.GetTextExtent(u"\u25ba")[0] attr = gridlib.GridCellAttr() attr.SetFont(arial) attr.SetAlignment(wx.ALIGN_RIGHT, wx.ALIGN_CENTRE) listBoxCtrl.SetColAttr(2,attr) listBoxCtrl.SetDefaultRowSize(hght+4, True) displayChoice = eg.DisplayChoice(panel, monitor) w = displayChoice.GetSize()[0] OSElbl = wx.StaticText(panel, -1, self.text.OSELabel) useInvertedCtrl = wx.CheckBox(panel, -1, self.text.inverted) useInvertedCtrl.SetValue(inverted) subMenuLbl = wx.StaticText(panel, -1, self.text.submenuLbl) if self.plugin.submenus: choices = self.plugin.submenus else: choices = self.plugin.GetSubmenuStrings() subMenuCtrl = wx.Choice(panel, -1, choices = choices) subMenuCtrl.SetSelection(submenu) #Button Text Colour foreLbl=wx.StaticText(panel, -1, self.text.txtColour+':') foreColourButton = ExtColourSelectButton(panel,fore,title = self.text.txtColour) #Button Background Colour backLbl=wx.StaticText(panel, -1, self.text.background+':') backColourButton = ExtColourSelectButton(panel,back,title = self.text.background) #Button Selected Text Colour foreSelLbl=wx.StaticText(panel, -1, self.text.txtColourSel+':') foreSelColourButton = ExtColourSelectButton(panel,foreSel,title = self.text.txtColourSel) #Button Selected Background Colour backSelLbl=wx.StaticText(panel, -1, self.text.backgroundSel+':') backSelColourButton = ExtColourSelectButton(panel,backSel,title = self.text.backgroundSel) #Button Dialog "Menu control - assignement of events" dialogButton = wx.Button(panel,-1,self.text.dialog) dialogButton.SetToolTipString(self.text.btnToolTip) foreSelLbl.Enable(not inverted) foreSelColourButton.Enable(not inverted) backSelLbl.Enable(not inverted) backSelColourButton.Enable(not inverted) #Sizers mainSizer = panel.sizer topSizer=wx.GridBagSizer(2, 30) mainSizer.Add(topSizer) topSizer.Add(previewLbl,(0, 0),flag = wx.TOP,border = 0) topSizer.Add(listBoxCtrl,(1, 0),(4, 1)) topSizer.Add(useInvertedCtrl,(6, 0),flag = wx.TOP, border = 8) topSizer.Add(subMenuLbl,(8, 0), flag = wx.TOP,border = 8) topSizer.Add(subMenuCtrl,(9, 0), flag = wx.TOP) topSizer.Add(fontLbl,(0, 1),flag = wx.TOP) topSizer.Add(fontButton,(1, 1),flag = wx.TOP) topSizer.Add(foreLbl,(2, 1),flag = wx.TOP,border = 8) topSizer.Add(foreColourButton,(3, 1),flag = wx.TOP) topSizer.Add(backLbl,(4, 1),flag = wx.TOP,border = 8) topSizer.Add(backColourButton,(5, 1),flag = wx.TOP) topSizer.Add(OSElbl,(0, 2), flag = wx.TOP) topSizer.Add(displayChoice,(1, 2),flag = wx.TOP) topSizer.Add(foreSelLbl,(6, 1), (1, 2), flag = wx.TOP,border = 8) topSizer.Add(foreSelColourButton, (7, 1), flag = wx.TOP) topSizer.Add(backSelLbl,(8, 1), (1, 2), flag = wx.TOP,border = 8) topSizer.Add(backSelColourButton, (9, 1), flag = wx.TOP) topSizer.Add(dialogButton, (3, 2), flag = wx.TOP|wx.EXPAND) panel.sizer.Layout() wdth = 160 if (hght+4)*listBoxCtrl.GetNumberRows() > listBoxCtrl.GetSize()[1]: #after Layout() !!! wdth -= SYS_VSCROLL_X listBoxCtrl.SetColSize(0, w0) listBoxCtrl.SetColSize(1, wdth - w0 - w2) listBoxCtrl.SetColSize(2, w2) listBoxCtrl.SetGridCursor(-1, 1) listBoxCtrl.SelectRow(0) def OnMonitor(evt): listBoxCtrl.SetFocus() evt.Skip displayChoice.Bind(wx.EVT_CHOICE, OnMonitor) def OnInverted(evt): flag = evt.IsChecked() foreSelLbl.Enable(not flag) foreSelColourButton.Enable(not flag) backSelLbl.Enable(not flag) backSelColourButton.Enable(not flag) self.inverted = flag if flag: self.foreSel = self.back self.backSel = self.fore backSelColourButton.SetValue(self.backSel) foreSelColourButton.SetValue(self.foreSel) listBoxCtrl.SetSelectionForeground(self.foreSel) listBoxCtrl.SetSelectionBackground(self.backSel) listBoxCtrl.SetFocus() evt.Skip useInvertedCtrl.Bind(wx.EVT_CHECKBOX, OnInverted) def OnDialogBtn(evt): dlg = MenuEventsDialog( parent = panel, plugin = self.plugin, ) dlg.Centre() wx.CallAfter(dlg.ShowMenuEventsDialog, self.text.evtAssignTitle, self.text.events) evt.Skip() dialogButton.Bind(wx.EVT_BUTTON, OnDialogBtn) def OnFontBtn(evt): value = evt.GetValue() self.fontInfo = value font = wx.FontFromNativeInfoString(value) for n in range(10,20): font.SetPointSize(n) fontButton.SetFont(font) hght = fontButton.GetTextExtent('X')[1] if hght > 20: break listBoxCtrl.SetDefaultCellFont(font) listBoxCtrl.SetDefaultRowSize(hght+4, True) for i in range(listBoxCtrl.GetNumberRows()): listBoxCtrl.SetCellFont(i,1,font) listBoxCtrl.SetFocus() if evt: evt.Skip() fontButton.Bind(EVT_BUTTON_AFTER, OnFontBtn) def OnColourBtn(evt): id = evt.GetId() value = evt.GetValue() if id == foreColourButton.GetId(): listBoxCtrl.SetForegroundColour(value) if self.inverted: self.backSel = self.fore listBoxCtrl.SetSelectionBackground(value) backSelColourButton.SetValue(value) elif id == backColourButton.GetId(): listBoxCtrl.SetBackgroundColour(value) if self.inverted: self.foreSel = self.back listBoxCtrl.SetSelectionForeground(value) foreSelColourButton.SetValue(value) elif id == foreSelColourButton.GetId(): listBoxCtrl.SetSelectionForeground(value) elif id == backSelColourButton.GetId(): listBoxCtrl.SetSelectionBackground(value) listBoxCtrl.Refresh() listBoxCtrl.SetFocus() evt.Skip() foreColourButton.Bind(EVT_BUTTON_AFTER, OnColourBtn) backColourButton.Bind(EVT_BUTTON_AFTER, OnColourBtn) foreSelColourButton.Bind(EVT_BUTTON_AFTER, OnColourBtn) backSelColourButton.Bind(EVT_BUTTON_AFTER, OnColourBtn) def setFocus(): listBoxCtrl.SetFocus() panel.setFocus = setFocus # re-assign the test button def OnButton(event): hwnds = HandleRS() if hwnds: if not self.plugin.menuDlg: self.plugin.menuDlg = Menu() self.event = CreateEvent(None, 0, 0, None) wx.CallAfter(self.plugin.menuDlg.ShowMenu, foreColourButton.GetValue(), backColourButton.GetValue(), foreSelColourButton.GetValue(), backSelColourButton.GetValue(), self.fontInfo, True, self.plugin, self.event, displayChoice.GetSelection(), hwnds[0], panel.evtList, (0, 7, 8, 9, 10, 11, 12, 14)[subMenuCtrl.GetSelection()] ) eg.actionThread.WaitOnEvent(self.event) panel.dialog.buttonRow.testButton.Bind(wx.EVT_BUTTON, OnButton) while panel.Affirmed(): fontInfo = fontButton.GetValue() if not fontInfo: font = listBoxCtrl.GetFont() font.SetPointSize(36) fontInfo = font.GetNativeFontInfoDesc() panel.SetResult( foreColourButton.GetValue(), backColourButton.GetValue(), fontInfo, displayChoice.GetSelection(), foreSelColourButton.GetValue(), backSelColourButton.GetValue(), panel.evtList, useInvertedCtrl.GetValue(), subMenuCtrl.GetSelection() ) #=============================================================================== ACTIONS = ( (Run,"Run","Run RadioSure","Run RadioSure with its default settings.",None), (SendMessageActions,"Close","Close window (exit RadioSure)","Close window (exit RadioSure).",1), (GetPlayingTitle,"GetPlayingTitle","Get currently playing station/title","Gets the name of currently playing station/title.", None), (StartTitlebarObservation,"StartTitlebarObservation","Start observation of titlebar","Starts observation of titlebar.", None), (StopTitlebarObservation,"StopTitlebarObservation","Stop observation of titlebar","Stops observation of titlebar.", None), (ShowMenu,"ShowMenu","ShowMenu","ShowMenu.",None), (eg.ActionGroup, 'Window', 'Window', 'Window',( (SendMessageActions,"Minimize","Minimize window","Minimize window.",2), (WindowControl,"Restore","Restore window","Restore window.",SC_RESTORE), (SendMessageActions,"MinimRest","Minimize/Restore","Minimize/Restore window.",1075), (SendMessageActions,"Expand","Collapse/Expand window","Collapse/Expand window.",1076), (SendMessageActions,"OnTop","Stay on top On/Off","Stay on top On/Off.",1077), )), (eg.ActionGroup, 'MainControl', 'Main control', 'Main control',( (SendMessageActions,"PlayStop","Play/Stop","Play/Stop.",1000), (CheckAndChange,"Play","Play","Play.",(0, False, 1000)), (SendMessageActions,"Stop","Stop","Stop.",1008), (GetStatus,"GetPlaying","Get status of playing","Get status of playing.",0), (SendMessageActions,"MuteOnOff","Mute On/Off","Mute On/Off.",1027), (CheckAndChange,"MuteOn","Mute on","Mute on.",(1, False, 1027)), (CheckAndChange,"MuteOff","Mute off","Mute off.",(1, True, 1027)), (GetStatus,"GetMuted","Get muted","Get muted.",1), (SendMessageActions,"RecOnOff","Recording On/Off","Recording On/Off.",1051), (CheckAndChange,"RecOn","Recording on","Recording on.",(2, False, 1051)), (CheckAndChange,"RecOff","Recording off","Recording off.",(2, True, 1051)), (GetStatus,"GetRecording","Get recording","Get recording.",2), (SendMessageActions,"RecOnlyCurr",'Toggle "Record only current track"','Toggle "Record only current track".', 4036), (CheckAndChange,"RecOnlyOn",'Set "Record only current track"','Set "Record only current track".',(3, False, 4036)), (CheckAndChange,"RecOnlyOff",'Clear "Record only current track"','Clear "Record only current track".',(3, True, 4036)), (GetStatus,"GetRecOnlyCurr",'Get "Record only current track"','Get "Record only current track".',3), )), (eg.ActionGroup, 'Volume', 'Volume', 'Volume',( (GetVolume,"GetVolume","Get volume","Get volume.", None), (SetVolume,"SetVolume","Set volume","Set volume.", 0), (SetVolume,"VolumeUp","Volume up","Volume up.", 1), (SetVolume,"VolumeDown","Volume down","Volume down.", 2), )), (eg.ActionGroup, 'Clipboard', 'Clipboard', 'Clipboard',( (SendMessageActions,"CopyURLtoClipboard","Copy URL to Clipboard","Copy URL to Clipboard.", 4037), (SendMessageActions,"CopyTitleToClipboard","Copy Title to Clipboard","Copy Title to Clipboard.", 4038), (SendMessageActions,"PlayURLfromClipboard","Play URL from Clipboard","Play URL from Clipboard.", 4039), )), (eg.ActionGroup, 'Equalizer', 'Equalizer', 'Equalizer',( (SendMessageActions,"EqualizerOff","Equalizer Off","Equalizer Off.", 4040), (SendMessageActions,"EqualizerJazz","Equalizer Jazz","Equalizer Jazz.", 4041), (SendMessageActions,"EqualizerPop","Equalizer Pop","Equalizer Pop.", 4042), (SendMessageActions,"EqualizerRock","Equalizer Rock","Equalizer Rock.", 4043), (SendMessageActions,"EqualizerClassic","Equalizer Classic","Equalizer Classic.", 4044), (GetMenuItem, "GetEqualizerIndex", "Get Equalizer", "Get Equalizer.", 9), )), (eg.ActionGroup, 'SleepTimer', 'Sleep timer', 'Sleep timer',( (SendMessageActions,"SleepTimerOff","Sleep timer Off","Sleep timer Off.", 4034), (SendMessageActions,"SleepIn5Min","Sleep in 5 min","Sleep in 5 min.", 4026), (SendMessageActions,"SleepIn10Min","Sleep in 10 min","Sleep in 10 min.", 4027), (SendMessageActions,"SleepIn15Min","Sleep in 15 min","Sleep in 15 min.", 4028), (SendMessageActions,"SleepIn20Min","Sleep in 20 min","Sleep in 20 min.", 4029), (SendMessageActions,"SleepIn30Min","Sleep in 30 min","Sleep in 30 min.", 4030), (SendMessageActions,"SleepIn60Min","Sleep in 60 min","Sleep in 60 min.", 4031), (SendMessageActions,"SleepIn90Min","Sleep in 90 min","Sleep in 90 min.", 4032), (SendMessageActions,"SleepIn120Min","Sleep in 120 min","Sleep in 120 min.", 4033), )), (eg.ActionGroup, 'Fav_and_Hist', 'Favorites and History', 'Favorites and History',( (SendMessageActions,"AddFav","Add to favorites","Add current station to favorites.",1324), (SendMessageActions,"RemFav","Remove from favorites","Remove current station from favorites.",1325), (SelectFav,"SelectFav","Select favorite (preset number)","Select favorite by preset number (order).", None), (NextPrevFav,"NextFav","Next favorite","Next favorite.", 1), (NextPrevFav,"PreviousFav","Previous favorite","Previous favorite.", -1), (RandomFav,"RandomFav","Random favorite","Random favorite.", None), (SendMessageActions,"PreviousHist","Back in history","Back in history.",1038), (SendMessageActions,"ForwardHist","Forward in history","Forward in history.",1039), (OpenManager,"OpenManager","Open manager","Open manager.", None), (HideManager,"HideManager","Hide manager","Hide manager.", None), )), (eg.ActionGroup, 'Scheduler', 'Scheduler', 'Scheduler',( (OpenScheduler,"OpenScheduler","Open scheduler","Open scheduler.", None), (HideScheduler,"HideScheduler","Hide scheduler","Hide scheduler.", None), (EnableSchedule,"EnableSchedule","Enable schedule","Enable schedule.", 1), (EnableSchedule,"DisableSchedule","Disable schedule","Disable schedule.", 0), (EnableAll,"EnableAll","Enable all schedules","Enable all schedules.", 1), (EnableAll,"DisableAll","Disable all schedules","Disable all schedules.", 0), (EnableSchedule,"GetSchedule","Get schedule","Get schedule.", -1), (AddSchedule,"AddSchedule","Add schedule",AddSchedule.text.descr, None), (DeleteSchedule,"DeleteSchedule","Delete schedule","Delete schedule.", None), (RunScheduleImmediately, "RunScheduleImmediately", "Run schedule immediately", "Runs schedule immediately.", None), )), ) #===============================================================================
gpl-2.0
scikit-learn-contrib/py-earth
examples/plot_feature_importance.py
3
2142
""" =========================== Plotting feature importance =========================== A simple example showing how to compute and display feature importances, it is also compared with the feature importances obtained using random forests. Feature importance is a measure of the effect of the features on the outputs. For each feature, the values go from 0 to 1 where a higher the value means that the feature will have a higher effect on the outputs. Currently three criteria are supported : 'gcv', 'rss' and 'nb_subsets'. See [1], section 12.3 for more information about the criteria. .. [1] http://www.milbo.org/doc/earth-notes.pdf """ import numpy import matplotlib.pyplot as plt from sklearn.ensemble import RandomForestRegressor from pyearth import Earth # Create some fake data numpy.random.seed(2) m = 10000 n = 10 X = numpy.random.uniform(size=(m, n)) y = (10 * numpy.sin(numpy.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 + 10 * X[:, 3] + 5 * X[:, 4] + numpy.random.uniform(size=m)) # Fit an Earth model criteria = ('rss', 'gcv', 'nb_subsets') model = Earth(max_degree=3, max_terms=10, minspan_alpha=.5, feature_importance_type=criteria, verbose=True) model.fit(X, y) rf = RandomForestRegressor() rf.fit(X, y) # Print the model print(model.trace()) print(model.summary()) print(model.summary_feature_importances(sort_by='gcv')) # Plot the feature importances importances = model.feature_importances_ importances['random_forest'] = rf.feature_importances_ criteria = criteria + ('random_forest',) idx = 1 fig = plt.figure(figsize=(20, 10)) labels = ['$x_{}$'.format(i) for i in range(n)] for crit in criteria: plt.subplot(2, 2, idx) plt.bar(numpy.arange(len(labels)), importances[crit], align='center', color='red') plt.xticks(numpy.arange(len(labels)), labels) plt.title(crit) plt.ylabel('importances') idx += 1 title = '$x_0,...x_9 \sim \mathcal{N}(0, 1)$\n$y= 10sin(\pi x_{0}x_{1}) + 20(x_2 - 0.5)^2 + 10x_3 + 5x_4 + Unif(0, 1)$' fig.suptitle(title, fontsize="x-large") plt.show()
bsd-3-clause
makerbot/ReplicatorG
skein_engines/skeinforge-47/fabmetheus_utilities/vector3index.py
12
8587
""" Vector3 is a three dimensional vector class. Below are examples of Vector3 use. >>> from vector3 import Vector3 >>> origin = Vector3() >>> origin 0.0, 0.0, 0.0 >>> pythagoras = Vector3( 3, 4, 0 ) >>> pythagoras 3.0, 4.0, 0.0 >>> pythagoras.magnitude() 5.0 >>> pythagoras.magnitudeSquared() 25 >>> triplePythagoras = pythagoras * 3.0 >>> triplePythagoras 9.0, 12.0, 0.0 >>> plane = pythagoras.dropAxis() >>> plane (3+4j) """ from __future__ import absolute_import try: import psyco psyco.full() except: pass #Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module. import __init__ from fabmetheus_utilities import xml_simple_writer import math import operator __author__ = 'Enrique Perez ([email protected])' __credits__ = 'Nophead <http://forums.reprap.org/profile.php?12,28>\nArt of Illusion <http://www.artofillusion.org/>' __date__ = '$Date: 2008/21/04 $' __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' class Vector3Index: 'A three dimensional vector index class.' __slots__ = ['index', 'x', 'y', 'z'] def __init__( self, index, x = 0.0, y = 0.0, z = 0.0 ): self.index = index self.x = x self.y = y self.z = z def __abs__(self): 'Get the magnitude of the Vector3.' return math.sqrt( self.x * self.x + self.y * self.y + self.z * self.z ) magnitude = __abs__ def __add__(self, other): 'Get the sum of this Vector3 and other one.' return Vector3Index( self.index, self.x + other.x, self.y + other.y, self.z + other.z ) def __copy__(self): 'Get the copy of this Vector3.' return Vector3Index( self.index, self.x, self.y, self.z ) __pos__ = __copy__ copy = __copy__ def __div__(self, other): 'Get a new Vector3 by dividing each component of this one.' return Vector3Index( self.index, self.x / other, self.y / other, self.z / other ) def __eq__(self, other): 'Determine whether this vector is identical to other one.' if other == None: return False if other.__class__ != self.__class__: return False return self.x == other.x and self.y == other.y and self.z == other.z def __floordiv__(self, other): 'Get a new Vector3 by floor dividing each component of this one.' return Vector3Index( self.index, self.x // other, self.y // other, self.z // other ) def __hash__(self): 'Determine whether this vector is identical to other one.' return self.__repr__().__hash__() def __iadd__(self, other): 'Add other Vector3 to this one.' self.x += other.x self.y += other.y self.z += other.z return self def __idiv__(self, other): 'Divide each component of this Vector3.' self.x /= other self.y /= other self.z /= other return self def __ifloordiv__(self, other): 'Floor divide each component of this Vector3.' self.x //= other self.y //= other self.z //= other return self def __imul__(self, other): 'Multiply each component of this Vector3.' self.x *= other self.y *= other self.z *= other return self def __isub__(self, other): 'Subtract other Vector3 from this one.' self.x -= other.x self.y -= other.y self.z -= other.z return self def __itruediv__(self, other): 'True divide each component of this Vector3.' self.x = operator.truediv( self.x, other ) self.y = operator.truediv( self.y, other ) self.z = operator.truediv( self.z, other ) return self def __mul__(self, other): 'Get a new Vector3 by multiplying each component of this one.' return Vector3Index( self.index, self.x * other, self.y * other, self.z * other ) def __ne__(self, other): 'Determine whether this vector is not identical to other one.' return not self.__eq__(other) def __neg__(self): return Vector3Index( self.index, - self.x, - self.y, - self.z ) def __nonzero__(self): return self.x != 0 or self.y != 0 or self.z != 0 def __rdiv__(self, other): 'Get a new Vector3 by dividing each component of this one.' return Vector3Index( self.index, other / self.x, other / self.y, other / self.z ) def __repr__(self): 'Get the string representation of this Vector3 index.' return '(%s, %s, %s, %s)' % (self.index, self.x, self.y, self.z) def __rfloordiv__(self, other): 'Get a new Vector3 by floor dividing each component of this one.' return Vector3Index( self.index, other // self.x, other // self.y, other // self.z ) def __rmul__(self, other): 'Get a new Vector3 by multiplying each component of this one.' return Vector3Index( self.index, self.x * other, self.y * other, self.z * other ) def __rtruediv__(self, other): 'Get a new Vector3 by true dividing each component of this one.' return Vector3Index( self.index, operator.truediv( other , self.x ), operator.truediv( other, self.y ), operator.truediv( other, self.z ) ) def __sub__(self, other): 'Get the difference between the Vector3 and other one.' return Vector3Index( self.index, self.x - other.x, self.y - other.y, self.z - other.z ) def __truediv__(self, other): 'Get a new Vector3 by true dividing each component of this one.' return Vector3Index( self.index, operator.truediv( self.x, other ), operator.truediv( self.y, other ), operator.truediv( self.z, other ) ) def _getAccessibleAttribute(self, attributeName): 'Get the accessible attribute.' global globalGetAccessibleAttributeSet if attributeName in globalGetAccessibleAttributeSet: return getattr(self, attributeName, None) return None def _setAccessibleAttribute(self, attributeName, value): 'Set the accessible attribute.' if attributeName in globalSetAccessibleAttributeSet: setattr(self, attributeName, value) def cross(self, other): 'Calculate the cross product of this vector with other one.' return Vector3Index( self.index, self.y * other.z - self.z * other.y, - self.x * other.z + self.z * other.x, self.x * other.y - self.y * other.x ) def distance(self, other): 'Get the Euclidean distance between this vector and other one.' return math.sqrt( self.distanceSquared(other) ) def distanceSquared(self, other): 'Get the square of the Euclidean distance between this vector and other one.' separationX = self.x - other.x separationY = self.y - other.y separationZ = self.z - other.z return separationX * separationX + separationY * separationY + separationZ * separationZ def dot(self, other): 'Calculate the dot product of this vector with other one.' return self.x * other.x + self.y * other.y + self.z * other.z def dropAxis( self, which = 2 ): 'Get a complex by removing one axis of the vector3.' if which == 0: return complex( self.y, self.z ) if which == 1: return complex( self.x, self.z ) if which == 2: return complex( self.x, self.y ) def getFloatList(self): 'Get the vector as a list of floats.' return [ float( self.x ), float( self.y ), float( self.z ) ] def getIsDefault(self): 'Determine if this is the zero vector.' if self.x != 0.0: return False if self.y != 0.0: return False return self.z == 0.0 def getNormalized(self): 'Get the normalized Vector3.' magnitude = abs(self) if magnitude == 0.0: return self.copy() return self / magnitude def magnitudeSquared(self): 'Get the square of the magnitude of the Vector3.' return self.x * self.x + self.y * self.y + self.z * self.z def maximize(self, other): 'Maximize the Vector3.' self.x = max(other.x, self.x) self.y = max(other.y, self.y) self.z = max(other.z, self.z) def minimize(self, other): 'Minimize the Vector3.' self.x = min(other.x, self.x) self.y = min(other.y, self.y) self.z = min(other.z, self.z) def normalize(self): 'Scale each component of this Vector3 so that it has a magnitude of 1. If this Vector3 has a magnitude of 0, this method has no effect.' magnitude = abs(self) if magnitude != 0.0: self /= magnitude def reflect( self, normal ): 'Reflect the Vector3 across the normal, which is assumed to be normalized.' distance = 2 * ( self.x * normal.x + self.y * normal.y + self.z * normal.z ) return Vector3Index( self.index, self.x - distance * normal.x, self.y - distance * normal.y, self.z - distance * normal.z ) def setToVector3(self, other): 'Set this Vector3 to be identical to other one.' self.x = other.x self.y = other.y self.z = other.z def setToXYZ( self, x, y, z ): 'Set the x, y, and z components of this Vector3.' self.x = x self.y = y self.z = z globalGetAccessibleAttributeSet = 'x y z'.split() globalSetAccessibleAttributeSet = globalGetAccessibleAttributeSet
gpl-2.0
Pasithea/Flask-RESTFul-Sample
sample/exception.py
1
1163
from flask import jsonify class NotUniqueException(Exception): pass class ExistedException(Exception): pass class DoesNotExistsException(Exception): pass class HttpException(Exception): pass except_dict = { 'LoginFailed': { 'code': 403, 'message': "Login Failed" }, 'NeedAuth': { 'code': 403, 'message': "Need Auth" }, 'NotPermission': { 'code': 403, 'message': "Not Permission" }, 'GrandTypeError': { 'code': 400, 'message': "Grand Type Error" }, 'ParamsError': { 'code': 400, 'message': "Parameter Error" } } def __init__(self, **kwargs): self.message = self.message.format(**kwargs) def __str__(self): return self.message def __repr__(self): return self.message exceptions_list = [] bases = (HttpException,) attrs = { '__init__': __init__, '__str__': __str__, '__repr__': __repr__ } for (eklass_name, attr) in except_dict.items(): attrs.update(attr) eklass = type(str(eklass_name), bases, attrs) exceptions_list.append(eklass) globals().update({eklass_name: eklass})
mit
akosel/servo
python/servo/devenv_commands.py
31
4939
# Copyright 2013 The Servo Project Developers. See the COPYRIGHT # file at the top-level directory of this distribution. # # Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or # http://www.apache.org/licenses/LICENSE-2.0> or the MIT license # <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your # option. This file may not be copied, modified, or distributed # except according to those terms. from __future__ import print_function, unicode_literals from os import path, getcwd, listdir import subprocess import sys from mach.decorators import ( CommandArgument, CommandProvider, Command, ) from servo.command_base import CommandBase, cd @CommandProvider class MachCommands(CommandBase): @Command('cargo', description='Run Cargo', category='devenv') @CommandArgument( 'params', default=None, nargs='...', help="Command-line arguments to be passed through to Cargo") def cargo(self, params): if not params: params = [] if self.context.topdir == getcwd(): with cd(path.join('components', 'servo')): return subprocess.call( ["cargo"] + params, env=self.build_env()) return subprocess.call(['cargo'] + params, env=self.build_env()) @Command('cargo-update', description='Same as update-cargo', category='devenv') @CommandArgument( 'params', default=None, nargs='...', help='Command-line arguments to be passed through to cargo update') @CommandArgument( '--package', '-p', default=None, help='Updates selected package') @CommandArgument( '--all-packages', '-a', action='store_true', help='Updates all packages') def cargo_update(self, params=None, package=None, all_packages=None): self.update_cargo(params, package, all_packages) @Command('update-cargo', description='Update Cargo dependencies', category='devenv') @CommandArgument( 'params', default=None, nargs='...', help='Command-line arguments to be passed through to cargo update') @CommandArgument( '--package', '-p', default=None, help='Updates selected package') @CommandArgument( '--all-packages', '-a', action='store_true', help='Updates all packages') def update_cargo(self, params=None, package=None, all_packages=None): if not params: params = [] if package: params += ["-p", package] elif all_packages: params = [] else: print("Please choose package to update with the --package (-p) ") print("flag or update all packages with --all-packages (-a) flag") sys.exit(1) cargo_paths = [path.join('components', 'servo'), path.join('ports', 'cef'), path.join('ports', 'gonk')] for cargo_path in cargo_paths: with cd(cargo_path): print(cargo_path) subprocess.call(["cargo", "update"] + params, env=self.build_env()) @Command('rustc', description='Run the Rust compiler', category='devenv') @CommandArgument( 'params', default=None, nargs='...', help="Command-line arguments to be passed through to rustc") def rustc(self, params): if params is None: params = [] return subprocess.call(["rustc"] + params, env=self.build_env()) @Command('rust-root', description='Print the path to the root of the Rust compiler', category='devenv') def rust_root(self): print(self.config["tools"]["rust-root"]) @Command('grep', description='`git grep` for selected directories.', category='devenv') @CommandArgument( 'params', default=None, nargs='...', help="Command-line arguments to be passed through to `git grep`") def grep(self, params): if not params: params = [] # get all directories under tests/ tests_dirs = listdir('tests') # Remove 'wpt' from obtained dir list tests_dirs = filter(lambda dir: dir != 'wpt', tests_dirs) # Set of directories in project root root_dirs = ['components', 'ports', 'python', 'etc', 'resources'] # Generate absolute paths for directories in tests/ and project-root/ tests_dirs_abs = [path.join(self.context.topdir, 'tests', s) for s in tests_dirs] root_dirs_abs = [path.join(self.context.topdir, s) for s in root_dirs] # Absolute paths for all directories to be considered grep_paths = root_dirs_abs + tests_dirs_abs return subprocess.call(["git"] + ["grep"] + params + ['--'] + grep_paths, env=self.build_env())
mpl-2.0
willthames/ansible
test/units/parsing/vault/test_vault.py
13
18751
# -*- coding: utf-8 -*- # (c) 2012-2014, Michael DeHaan <[email protected]> # (c) 2016, Toshio Kuratomi <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import binascii import io import os from binascii import hexlify import pytest from ansible.compat.tests import unittest from ansible import errors from ansible.module_utils import six from ansible.module_utils._text import to_bytes, to_text from ansible.parsing.vault import VaultLib from ansible.parsing import vault class TestVaultIsEncrypted(unittest.TestCase): def test_bytes_not_encrypted(self): b_data = b"foobar" self.assertFalse(vault.is_encrypted(b_data)) def test_bytes_encrypted(self): b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible") self.assertTrue(vault.is_encrypted(b_data)) def test_text_not_encrypted(self): b_data = to_text(b"foobar") self.assertFalse(vault.is_encrypted(b_data)) def test_text_encrypted(self): b_data = to_text(b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")) self.assertTrue(vault.is_encrypted(b_data)) def test_invalid_text_not_ascii(self): data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ " self.assertFalse(vault.is_encrypted(data)) def test_invalid_bytes_not_ascii(self): data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ " b_data = to_bytes(data, encoding='utf-8') self.assertFalse(vault.is_encrypted(b_data)) class TestVaultIsEncryptedFile(unittest.TestCase): def test_binary_file_handle_not_encrypted(self): b_data = b"foobar" b_data_fo = io.BytesIO(b_data) self.assertFalse(vault.is_encrypted_file(b_data_fo)) def test_text_file_handle_not_encrypted(self): data = u"foobar" data_fo = io.StringIO(data) self.assertFalse(vault.is_encrypted_file(data_fo)) def test_binary_file_handle_encrypted(self): b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible") b_data_fo = io.BytesIO(b_data) self.assertTrue(vault.is_encrypted_file(b_data_fo)) def test_text_file_handle_encrypted(self): data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % to_text(hexlify(b"ansible")) data_fo = io.StringIO(data) self.assertTrue(vault.is_encrypted_file(data_fo)) def test_binary_file_handle_invalid(self): data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ " b_data = to_bytes(data) b_data_fo = io.BytesIO(b_data) self.assertFalse(vault.is_encrypted_file(b_data_fo)) def test_text_file_handle_invalid(self): data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ " data_fo = io.StringIO(data) self.assertFalse(vault.is_encrypted_file(data_fo)) def test_file_already_read_from_finds_header(self): b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos") b_data_fo = io.BytesIO(b_data) b_data_fo.read(42) # Arbitrary number self.assertTrue(vault.is_encrypted_file(b_data_fo)) def test_file_already_read_from_saves_file_pos(self): b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos") b_data_fo = io.BytesIO(b_data) b_data_fo.read(69) # Arbitrary number vault.is_encrypted_file(b_data_fo) self.assertEqual(b_data_fo.tell(), 69) def test_file_with_offset(self): b_data = b"JUNK$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos") b_data_fo = io.BytesIO(b_data) self.assertTrue(vault.is_encrypted_file(b_data_fo, start_pos=4)) def test_file_with_count(self): b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos") vault_length = len(b_data) b_data = b_data + u'ァ ア'.encode('utf-8') b_data_fo = io.BytesIO(b_data) self.assertTrue(vault.is_encrypted_file(b_data_fo, count=vault_length)) def test_file_with_offset_and_count(self): b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos") vault_length = len(b_data) b_data = b'JUNK' + b_data + u'ァ ア'.encode('utf-8') b_data_fo = io.BytesIO(b_data) self.assertTrue(vault.is_encrypted_file(b_data_fo, start_pos=4, count=vault_length)) @pytest.mark.skipif(not vault.HAS_CRYPTOGRAPHY, reason="Skipping cryptography tests because cryptography is not installed") class TestVaultCipherAes256(unittest.TestCase): def setUp(self): self.vault_cipher = vault.VaultAES256() def test(self): self.assertIsInstance(self.vault_cipher, vault.VaultAES256) # TODO: tag these as slow tests def test_create_key_cryptography(self): b_password = b'hunter42' b_salt = os.urandom(32) b_key_cryptography = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16) self.assertIsInstance(b_key_cryptography, six.binary_type) @pytest.mark.skipif(not vault.HAS_PYCRYPTO, reason='Not testing pycrypto key as pycrypto is not installed') def test_create_key_pycrypto(self): b_password = b'hunter42' b_salt = os.urandom(32) b_key_pycrypto = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16) self.assertIsInstance(b_key_pycrypto, six.binary_type) @pytest.mark.skipif(not vault.HAS_PYCRYPTO, reason='Not comparing cryptography key to pycrypto key as pycrypto is not installed') def test_compare_new_keys(self): b_password = b'hunter42' b_salt = os.urandom(32) b_key_cryptography = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16) b_key_pycrypto = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16) self.assertEqual(b_key_cryptography, b_key_pycrypto) def test_create_key_known_cryptography(self): b_password = b'hunter42' # A fixed salt b_salt = b'q' * 32 # q is the most random letter. b_key_1 = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16) self.assertIsInstance(b_key_1, six.binary_type) # verify we get the same answer # we could potentially run a few iterations of this and time it to see if it's roughly constant time # and or that it exceeds some minimal time, but that would likely cause unreliable fails, esp in CI b_key_2 = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16) self.assertIsInstance(b_key_2, six.binary_type) self.assertEqual(b_key_1, b_key_2) # And again with pycrypto b_key_3 = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16) self.assertIsInstance(b_key_3, six.binary_type) # verify we get the same answer # we could potentially run a few iterations of this and time it to see if it's roughly constant time # and or that it exceeds some minimal time, but that would likely cause unreliable fails, esp in CI b_key_4 = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16) self.assertIsInstance(b_key_4, six.binary_type) self.assertEqual(b_key_3, b_key_4) self.assertEqual(b_key_1, b_key_4) def test_create_key_known_pycrypto(self): b_password = b'hunter42' # A fixed salt b_salt = b'q' * 32 # q is the most random letter. b_key_3 = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16) self.assertIsInstance(b_key_3, six.binary_type) # verify we get the same answer # we could potentially run a few iterations of this and time it to see if it's roughly constant time # and or that it exceeds some minimal time, but that would likely cause unreliable fails, esp in CI b_key_4 = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16) self.assertIsInstance(b_key_4, six.binary_type) self.assertEqual(b_key_3, b_key_4) def test_is_equal_is_equal(self): self.assertTrue(self.vault_cipher._is_equal(b'abcdefghijklmnopqrstuvwxyz', b'abcdefghijklmnopqrstuvwxyz')) def test_is_equal_unequal_length(self): self.assertFalse(self.vault_cipher._is_equal(b'abcdefghijklmnopqrstuvwxyz', b'abcdefghijklmnopqrstuvwx and sometimes y')) def test_is_equal_not_equal(self): self.assertFalse(self.vault_cipher._is_equal(b'abcdefghijklmnopqrstuvwxyz', b'AbcdefghijKlmnopQrstuvwxZ')) def test_is_equal_empty(self): self.assertTrue(self.vault_cipher._is_equal(b'', b'')) def test_is_equal_non_ascii_equal(self): utf8_data = to_bytes(u'私はガラスを食べられます。それは私を傷つけません。') self.assertTrue(self.vault_cipher._is_equal(utf8_data, utf8_data)) def test_is_equal_non_ascii_unequal(self): utf8_data = to_bytes(u'私はガラスを食べられます。それは私を傷つけません。') utf8_data2 = to_bytes(u'Pot să mănânc sticlă și ea nu mă rănește.') # Test for the len optimization path self.assertFalse(self.vault_cipher._is_equal(utf8_data, utf8_data2)) # Test for the slower, char by char comparison path self.assertFalse(self.vault_cipher._is_equal(utf8_data, utf8_data[:-1] + b'P')) def test_is_equal_non_bytes(self): """ Anything not a byte string should raise a TypeError """ self.assertRaises(TypeError, self.vault_cipher._is_equal, u"One fish", b"two fish") self.assertRaises(TypeError, self.vault_cipher._is_equal, b"One fish", u"two fish") self.assertRaises(TypeError, self.vault_cipher._is_equal, 1, b"red fish") self.assertRaises(TypeError, self.vault_cipher._is_equal, b"blue fish", 2) @pytest.mark.skipif(not vault.HAS_PYCRYPTO, reason="Skipping Pycrypto tests because pycrypto is not installed") class TestVaultCipherAes256PyCrypto(TestVaultCipherAes256): def setUp(self): self.has_cryptography = vault.HAS_CRYPTOGRAPHY vault.HAS_CRYPTOGRAPHY = False super(TestVaultCipherAes256PyCrypto, self).setUp() def tearDown(self): vault.HAS_CRYPTOGRAPHY = self.has_cryptography super(TestVaultCipherAes256PyCrypto, self).tearDown() @pytest.mark.skipif(not vault.HAS_CRYPTOGRAPHY, reason="Skipping cryptography tests because cryptography is not installed") class TestVaultLib(unittest.TestCase): def setUp(self): self.v = VaultLib('test-vault-password') def test_encrypt(self): plaintext = u'Some text to encrypt in a café' b_vaulttext = self.v.encrypt(plaintext) self.assertIsInstance(b_vaulttext, six.binary_type) b_header = b'$ANSIBLE_VAULT;1.1;AES256\n' self.assertEqual(b_vaulttext[:len(b_header)], b_header) def test_encrypt_bytes(self): plaintext = to_bytes(u'Some text to encrypt in a café') b_vaulttext = self.v.encrypt(plaintext) self.assertIsInstance(b_vaulttext, six.binary_type) b_header = b'$ANSIBLE_VAULT;1.1;AES256\n' self.assertEqual(b_vaulttext[:len(b_header)], b_header) def test_is_encrypted(self): self.assertFalse(self.v.is_encrypted(b"foobar"), msg="encryption check on plaintext yielded false positive") b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible") self.assertTrue(self.v.is_encrypted(b_data), msg="encryption check on headered text failed") def test_format_output(self): self.v.cipher_name = "TEST" b_ciphertext = b"ansible" b_vaulttext = self.v._format_output(b_ciphertext) b_lines = b_vaulttext.split(b'\n') self.assertGreater(len(b_lines), 1, msg="failed to properly add header") b_header = b_lines[0] self.assertTrue(b_header.endswith(b';TEST'), msg="header does not end with cipher name") b_header_parts = b_header.split(b';') self.assertEqual(len(b_header_parts), 3, msg="header has the wrong number of parts") self.assertEqual(b_header_parts[0], b'$ANSIBLE_VAULT', msg="header does not start with $ANSIBLE_VAULT") self.assertEqual(b_header_parts[1], self.v.b_version, msg="header version is incorrect") self.assertEqual(b_header_parts[2], b'TEST', msg="header does not end with cipher name") def test_split_header(self): b_vaulttext = b"$ANSIBLE_VAULT;9.9;TEST\nansible" b_ciphertext = self.v._split_header(b_vaulttext) b_lines = b_ciphertext.split(b'\n') self.assertEqual(b_lines[0], b"ansible", msg="Payload was not properly split from the header") self.assertEqual(self.v.cipher_name, u'TEST', msg="cipher name was not properly set") self.assertEqual(self.v.b_version, b"9.9", msg="version was not properly set") def test_encrypt_decrypt_aes(self): self.v.cipher_name = u'AES' self.v.b_password = b'ansible' # AES encryption code has been removed, so this is old output for # AES-encrypted 'foobar' with password 'ansible'. b_vaulttext = b'''$ANSIBLE_VAULT;1.1;AES 53616c7465645f5fc107ce1ef4d7b455e038a13b053225776458052f8f8f332d554809d3f150bfa3 fe3db930508b65e0ff5947e4386b79af8ab094017629590ef6ba486814cf70f8e4ab0ed0c7d2587e 786a5a15efeb787e1958cbdd480d076c ''' b_plaintext = self.v.decrypt(b_vaulttext) self.assertEqual(b_plaintext, b"foobar", msg="decryption failed") def test_encrypt_decrypt_aes256(self): self.v.cipher_name = u'AES256' plaintext = u"foobar" b_vaulttext = self.v.encrypt(plaintext) b_plaintext = self.v.decrypt(b_vaulttext) self.assertNotEqual(b_vaulttext, b"foobar", msg="encryption failed") self.assertEqual(b_plaintext, b"foobar", msg="decryption failed") def test_encrypt_decrypt_aes256_existing_vault(self): self.v.cipher_name = u'AES256' b_orig_plaintext = b"Setec Astronomy" vaulttext = u'''$ANSIBLE_VAULT;1.1;AES256 33363965326261303234626463623963633531343539616138316433353830356566396130353436 3562643163366231316662386565383735653432386435610a306664636137376132643732393835 63383038383730306639353234326630666539346233376330303938323639306661313032396437 6233623062366136310a633866373936313238333730653739323461656662303864663666653563 3138''' b_plaintext = self.v.decrypt(vaulttext) self.assertEqual(b_plaintext, b_plaintext, msg="decryption failed") b_vaulttext = to_bytes(vaulttext, encoding='ascii', errors='strict') b_plaintext = self.v.decrypt(b_vaulttext) self.assertEqual(b_plaintext, b_orig_plaintext, msg="decryption failed") # FIXME This test isn't working quite yet. @pytest.mark.skip(reason='This test is not ready yet') def test_encrypt_decrypt_aes256_bad_hmac(self): self.v.cipher_name = 'AES256' # plaintext = "Setec Astronomy" enc_data = '''$ANSIBLE_VAULT;1.1;AES256 33363965326261303234626463623963633531343539616138316433353830356566396130353436 3562643163366231316662386565383735653432386435610a306664636137376132643732393835 63383038383730306639353234326630666539346233376330303938323639306661313032396437 6233623062366136310a633866373936313238333730653739323461656662303864663666653563 3138''' b_data = to_bytes(enc_data, errors='strict', encoding='utf-8') b_data = self.v._split_header(b_data) foo = binascii.unhexlify(b_data) lines = foo.splitlines() # line 0 is salt, line 1 is hmac, line 2+ is ciphertext b_salt = lines[0] b_hmac = lines[1] b_ciphertext_data = b'\n'.join(lines[2:]) b_ciphertext = binascii.unhexlify(b_ciphertext_data) # b_orig_ciphertext = b_ciphertext[:] # now muck with the text # b_munged_ciphertext = b_ciphertext[:10] + b'\x00' + b_ciphertext[11:] # b_munged_ciphertext = b_ciphertext # assert b_orig_ciphertext != b_munged_ciphertext b_ciphertext_data = binascii.hexlify(b_ciphertext) b_payload = b'\n'.join([b_salt, b_hmac, b_ciphertext_data]) # reformat b_invalid_ciphertext = self.v._format_output(b_payload) # assert we throw an error self.v.decrypt(b_invalid_ciphertext) def test_encrypt_encrypted(self): self.v.cipher_name = u'AES' b_vaulttext = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible") vaulttext = to_text(b_vaulttext, errors='strict') self.assertRaises(errors.AnsibleError, self.v.encrypt, b_vaulttext) self.assertRaises(errors.AnsibleError, self.v.encrypt, vaulttext) def test_decrypt_decrypted(self): plaintext = u"ansible" self.assertRaises(errors.AnsibleError, self.v.decrypt, plaintext) b_plaintext = b"ansible" self.assertRaises(errors.AnsibleError, self.v.decrypt, b_plaintext) def test_cipher_not_set(self): plaintext = u"ansible" self.v.encrypt(plaintext) self.assertEquals(self.v.cipher_name, "AES256") @pytest.mark.skipif(not vault.HAS_PYCRYPTO, reason="Skipping Pycrypto tests because pycrypto is not installed") class TestVaultLibPyCrypto(TestVaultLib): def setUp(self): self.has_cryptography = vault.HAS_CRYPTOGRAPHY vault.HAS_CRYPTOGRAPHY = False super(TestVaultLibPyCrypto, self).setUp() def tearDown(self): vault.HAS_CRYPTOGRAPHY = self.has_cryptography super(TestVaultLibPyCrypto, self).tearDown()
gpl-3.0
alphatwirl/alphatwirl
alphatwirl/collector/ToDataFrameWithDatasetColumn.py
1
1326
# Tai Sakuma <[email protected]> import pandas as pd from .ToTupleListWithDatasetColumn import ToTupleListWithDatasetColumn ##__________________________________________________________________|| class ToDataFrameWithDatasetColumn: def __init__(self, summaryColumnNames, datasetColumnName = 'component' ): self.summaryColumnNames = summaryColumnNames self.datasetColumnName = datasetColumnName self.to_tuple_list = ToTupleListWithDatasetColumn( summaryColumnNames = summaryColumnNames, datasetColumnName = datasetColumnName) def __repr__(self): name_value_pairs = ( ('summaryColumnNames', self.summaryColumnNames), ('datasetColumnName', self.datasetColumnName), ) return '{}({})'.format( self.__class__.__name__, ', '.join(['{} = {!r}'.format(n, v) for n, v in name_value_pairs]), ) def combine(self, dataset_readers_list): tuple_list = self.to_tuple_list.combine(dataset_readers_list) if tuple_list is None: return None header = tuple_list[0] contents = tuple_list[1:] return pd.DataFrame(contents, columns = header) ##__________________________________________________________________||
bsd-3-clause
FilipDominec/python-meep-utils
scripts_postpro/plot_TY.py
1
4307
#!/usr/bin/env python #-*- coding: utf-8 -*- simtime = 80e-12 size_y = 1400e-6 c = 3e8 maxfreq = 2e12 ## Import common moduli import numpy as np from scipy.constants import c, hbar, pi import matplotlib, sys, os, time import matplotlib.pyplot as plt ## Start figure + subplot (interactive) fig = plt.figure(figsize=(10,10)) ax = plt.subplot(111, axisbg='w') fig.subplots_adjust(left=.05, bottom=.05, right=.99, top=.99, wspace=.05, hspace=.05) ## Start figure + subplot (interactive) fig = plt.figure(figsize=(10,10)) ax = plt.subplot(111, axisbg='w') fig.subplots_adjust(left=.05, bottom=.05, right=.99, top=.99, wspace=.05, hspace=.05) ## Decide the filename to load data import sys filename = sys.argv[1] if len(sys.argv)>1 else 'input.dat' if not os.path.isfile(filename): raise IOError, 'File %s can not be opened!' % filename ## Load n-dimensional arrays from a HDF5 file import h5py h5file = h5py.File(filename, "r") print "Found datasets:", h5file.keys() time1 = time.time() data = np.array(h5file['ex.r']) * (1+0j) data += np.array(h5file['ex.i']) * 1j print "Loaded dataset with shape:", data.shape, 'in %04d s.' % (time.time()-time1) try: Et = data[:,-1,:] ## take the farthest slice by the z-axis except IndexError: Et = data ## if data already 2D t = np.linspace(0, simtime, Et.shape[1]) ## define the dimension of data axes y = np.linspace(0, size_y, Et.shape[0]) ## Export n-dimensional arrays to a HDF5 file ## Fourier transform freq = np.fft.fftfreq(len(t), d=(t[1]-t[0])) # calculate the frequency axis with proper spacing Efy = np.fft.fft(Et, axis=1) / len(t) * 2*np.pi # calculate the FFT values #def ffts(arr): #return np.hstack([arr[len(arr)/2+1:], arr[:len(arr)/2]]) def ffts2(arr): return np.vstack([arr[len(arr)/2:,:], arr[:len(arr)/2,:]]) #freq = ffts(freq) #Efy = ffts2(Efy) freq = np.fft.fftshift(freq) #+ freq[len(freq)/2] Efy = np.fft.fftshift(Efy) kT = np.fft.fftfreq(len(y), d=(y[1]-y[0])) # calculate the frequency axis with proper spacing Ef = np.fft.fft(Efy, axis=0) / len(y) * 2*np.pi # calculate the FFT values kT = np.fft.fftshift(kT) #Ef = np.fft.fftshift(Ef) print Ef.shape Ef = ffts2(Ef) print Ef.shape truncated = np.logical_and(freq>0, freq<maxfreq) # (optional) get the frequency range freq = freq[truncated] Ef = Ef[:,truncated] print 'freq', freq.shape, freq[::10] print 'kT', kT.shape, kT[::10] ## plot contours for gridded data #contours = plt.contourf(t, y, np.log10(np.abs(et)+1e-6), cmap=matplotlib.cm.gist_earth, extend='both') # levels=np.arange(0.,1,.01), #contours = plt.contourf(t, y, et, cmap=matplotlib.cm.rdbu, extend='both') # levels=np.arange(0.,1,.01), toplot = (np.abs(Et)) contours = plt.contourf(t, y, toplot, cmap=matplotlib.cm.gist_earth, levels=np.linspace(np.min(toplot)*0+np.max(toplot)*0,np.max(toplot),200) ,extend='both') # #contours = plt.contourf(freq, kT, np.abs(Ef), cmap=matplotlib.cm.gist_earth, extend='both') # levels=np.arange(0.,1,.01), #plt.plot([0, maxfreq], [0, 0], c='w',lw=.5) #plt.plot([0, maxfreq], [0, maxfreq/c], c='w',lw=.5) #plt.plot([0, maxfreq], [0, -maxfreq/c], c='w',lw=.5) #plt.annotate('+45$^\\circ$', xy = (maxfreq/2, maxfreq/2/c), xytext = (-10, 10), textcoords='offset points',color='w') #plt.annotate('-45$^\\circ$', xy = (maxfreq/2, -maxfreq/2/c), xytext = (10, 10), textcoords='offset points',color='w') # try: ## Load 1D curve filename = "effparam.dat" (x, y) = np.loadtxt(filename, usecols=(0,5), unpack=True) truncated = np.logical_and(x>0, x<maxfreq) # (optional) get the frequency range x = x[truncated] y = y[truncated] ## Plot line plt.plot(x, np.real(y)*1000, color="#FF8800", label=u"$y'$", ls='-', c='w',lw=1) except: print "refractive index could not be loaded" for contour in contours.collections: contour.set_antialiased(False) ## optional: avoid white aliasing (for matplotlib 1.0.1 and older) plt.colorbar() ## optional: colorbar ## Finish the plot + save #plt.ylim((-2e4,2e4)) plt.xlabel(u"time"); plt.ylabel(u"y"); plt.grid() plt.legend(prop={'size':10}, loc='upper right') plt.savefig("output_T-Y.png", bbox_inches='tight')
gpl-2.0
danielharbor/openerp
addons/stock_picking_wave/__init__.py
374
1105
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import stock_picking_wave import wizard import controllers # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
xzturn/tensorflow
tensorflow/python/distribute/input_lib.py
2
49393
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Various classes representing distributed inputs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import six from tensorflow.python.data.experimental.ops import batching from tensorflow.python.data.experimental.ops import distribute from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import multi_device_iterator_ops from tensorflow.python.distribute import device_util from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.distribute import input_ops from tensorflow.python.distribute import reduce_util from tensorflow.python.distribute import values from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import device as tf_device from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.util import nest from tensorflow.python.util.deprecation import deprecated def get_distributed_dataset(dataset, input_workers, strategy, split_batch_by=None, input_context=None): """Returns a wrapped tf.data.DatasetV1 or tf.data.DatasetV2 instance. This is a common function that is used by all strategies to return the right tf.data.Dataset wrapped instance depending on the `dataset` argument type. Args: dataset: a tf.data.DatasetV1 or tf.data.DatasetV2 instance. input_workers: an InputWorkers object which specifies devices on which iterators should be created. strategy: a `tf.distribute.Strategy` object, used to run all-reduce to handle last partial batch. split_batch_by: Optional integer. If present, we "split" each batch of the dataset by `split_batch_by` value. input_context: `InputContext` for sharding. Only pass this in for between graph multi-worker cases where there is only one `input_worker`. In these cases, we will shard based on the `input_pipeline_id` and `num_input_pipelines` in the `InputContext`. Returns: A wrapped tf.data.DatasetV1 or tf.data.DatasetV2 instance. """ if isinstance(dataset, dataset_ops.DatasetV1): return DistributedDatasetV1( dataset, input_workers, strategy, split_batch_by=split_batch_by, input_context=input_context) else: return DistributedDataset( dataset, input_workers, strategy, split_batch_by=split_batch_by, input_context=input_context) def get_distributed_datasets_from_function(dataset_fn, input_workers, input_contexts, strategy): """Returns a wrapped tf.data.DatasetV1 or tf.data.DatasetV2 instance. This is a common function that is used by all strategies to return the right tf.data.Dataset wrapped instance depending on if we are in graph or eager mode. Args: dataset_fn: a function that returns a tf.data.DatasetV1 or tf.data.DatasetV2 instance. input_workers: an InputWorkers object which specifies devices on which iterators should be created. input_contexts: A list of `InputContext` instances to be passed to call(s) to `dataset_fn`. Length and order should match worker order in `worker_device_pairs`. strategy: a `tf.distribute.Strategy` object, used to run all-reduce to handle last partial batch. Returns: A wrapped tf.data.DatasetV1 or tf.data.DatasetV2 instance. """ if ops.executing_eagerly_outside_functions(): return DistributedDatasetsFromFunction( dataset_fn, input_workers, input_contexts, strategy) else: return DistributedDatasetsFromFunctionV1( dataset_fn, input_workers, input_contexts, strategy) class InputWorkers(object): """A 1-to-many mapping from input worker devices to compute devices.""" def __init__(self, worker_device_pairs): """Initialize an `InputWorkers` object. Args: worker_device_pairs: A sequence of pairs: `(input device, a tuple of compute devices fed by that input device)`. """ self._input_worker_devices = tuple(d for d, _ in worker_device_pairs) self._fed_devices = tuple(tuple(device_util.canonicalize(d) for d in f) for _, f in worker_device_pairs) @property def num_workers(self): return len(self._input_worker_devices) @property def worker_devices(self): return self._input_worker_devices def compute_devices_for_worker(self, worker_index): return self._fed_devices[worker_index] def __repr__(self): devices = self.worker_devices debug_repr = ",\n".join(" %d %s: %s" % (i, devices[i], self._fed_devices[i]) for i in range(len(devices))) return "%s:{\n%s}" % (self.__class__.__name__, debug_repr) def _get_next_as_optional(iterator, strategy, name=None): """Returns an empty dataset indicator and the next input from the iterator.""" replicas = [] worker_has_values = [] worker_devices = [] for i, worker in enumerate(iterator._input_workers.worker_devices): # pylint: disable=protected-access if name is not None: d = tf_device.DeviceSpec.from_string(worker) new_name = "%s_%s_%d" % (name, d.job, d.task) else: new_name = None with ops.device(worker): worker_has_value, next_element = ( iterator._iterators[i].get_next_as_list(new_name)) # pylint: disable=protected-access # Collective all-reduce requires explicit devices for inputs. with ops.device("/cpu:0"): # Converting to integers for all-reduce. worker_has_value = math_ops.cast(worker_has_value, dtypes.int32) worker_devices.append(worker_has_value.device) worker_has_values.append(worker_has_value) # Make `replicas` a flat list of values across all replicas. replicas.append(next_element) # Run an all-reduce to see whether any worker has values. # TODO(b/131423105): we should be able to short-cut the all-reduce in some # cases. if getattr(strategy.extended, "_support_per_replica_values", True): # Slight hack: `reduce` expects a `PerReplica`, so we pass it one, even # though it doesn't actually have a value per replica. worker_has_values = values.PerReplica(worker_has_values) global_has_value = strategy.reduce( reduce_util.ReduceOp.SUM, worker_has_values, axis=None) else: assert len(worker_has_values) == 1 global_has_value = worker_has_values[0] global_has_value = array_ops.reshape( math_ops.cast(global_has_value, dtypes.bool), []) return global_has_value, replicas def _is_statically_shaped(tensor_class, shape): """Test if an iteratort output is statically shaped. For sparse and ragged tensors this only tests the batch dimension. Args: tensor_class: a class from an iterator.output_classes list. shape: a TensorShape from an iterator.output_shapes list. Returns: True if the shape is static, false otherwise. """ if (tensor_class == sparse_tensor.SparseTensor or isinstance(tensor_class, ragged_tensor.RaggedTensorSpec)): # For sparse or ragged tensor, we should only check the first # dimension in order to get_next_as_optional. This is because # when these tensors get batched by dataset only the batch dimension # is set. if shape.rank > 0 and shape.as_list()[0] is None: return False return True return shape.is_fully_defined() class DistributedIterator(object): """Common implementation for all input iterators.""" def __init__(self, input_workers, iterators, strategy): static_shape = True for iterator in iterators: if not isinstance(iterator, _SingleWorkerDatasetIterator): continue flattened = zip(nest.flatten(iterator.output_shapes), nest.flatten(iterator.output_classes)) for output_shape, output_class in flattened: if not _is_statically_shaped(output_class, output_shape): static_shape = False break # TODO(b/133073708): we currently need a flag to control the usage because # there is a performance difference between get_next() and # get_next_as_optional(). And we only enable get_next_as_optional when the # output shapes are not static. # # TODO(yuefengz): Currently `experimental_enable_get_next_as_optional` is # always set to False in CollectiveAllReduceStrategy. We want to have a way # to distinguish multi workers/single worker between graph, so we can enable # the behavior in single worker case. # # TODO(rxsang): We want to always enable the get_next_as_optional behavior # when user passed input_fn instead of dataset. if getattr( strategy.extended, "experimental_enable_get_next_as_optional", False): self._enable_get_next_as_optional = not static_shape else: self._enable_get_next_as_optional = False assert isinstance(input_workers, InputWorkers) if not input_workers.worker_devices: raise ValueError("Should have at least one worker for input iterator.") self._iterators = iterators self._input_workers = input_workers self._strategy = strategy def next(self): return self.__next__() def __next__(self): try: return self.get_next() except errors.OutOfRangeError: raise StopIteration def __iter__(self): return self def get_next(self, name=None): """Returns the next input from the iterator for all replicas.""" if not self._enable_get_next_as_optional: replicas = [] for i, worker in enumerate(self._input_workers.worker_devices): if name is not None: d = tf_device.DeviceSpec.from_string(worker) new_name = "%s_%s_%d" % (name, d.job, d.task) else: new_name = None with ops.device(worker): # Make `replicas` a flat list of values across all replicas. replicas.extend( self._iterators[i].get_next_as_list_static_shapes(new_name)) return values.regroup(replicas) out_of_range_replicas = [] def out_of_range_fn(worker_index, device): """This function will throw an OutOfRange error.""" # As this will be only called when there is no data left, so calling # get_next() will trigger an OutOfRange error. data = self._iterators[worker_index].get_next(device) out_of_range_replicas.append(data) return data global_has_value, replicas = _get_next_as_optional(self, self._strategy) results = [] for i, worker in enumerate(self._input_workers.worker_devices): with ops.device(worker): devices = self._input_workers.compute_devices_for_worker(i) for j, device in enumerate(devices): with ops.device(device): # pylint: disable=undefined-loop-variable # pylint: disable=cell-var-from-loop # It is fine for the lambda to capture variables from the loop as # the lambda is executed in the loop as well. result = control_flow_ops.cond( global_has_value, lambda: replicas[i][j], lambda: out_of_range_fn(i, device), strict=True, ) # pylint: enable=cell-var-from-loop # pylint: enable=undefined-loop-variable results.append(result) replicas = results # Some dimensions in `replicas` will become unknown after we conditionally # return the real tensors or the dummy tensors. We fix the input shapes by # using the shapes from `out_of_range_replicas` because it is calling # get_next() inside. flattened_replicas = nest.flatten(replicas) for i, replica_data in enumerate(nest.flatten(out_of_range_replicas)): for target, source in zip( nest.flatten(flattened_replicas[i], expand_composites=True), nest.flatten(replica_data, expand_composites=True)): target.set_shape(source.get_shape()) # `SparseTensor` shape is not determined by the shape of its component # tensors. Rather, its shape depends on a tensor's values. if sparse_tensor.is_sparse(replica_data) and replica_data.get_shape(): dense_shape = replica_data.get_shape() with ops.device(flattened_replicas[i].op.device): # For partially defined shapes, fill in missing values from tensor. if not dense_shape.is_fully_defined(): dense_shape = array_ops.stack([ flattened_replicas[i].dense_shape[j] if dim is None else dim for j, dim in enumerate(dense_shape.as_list()) ]) flattened_replicas[i] = sparse_tensor.SparseTensor( indices=flattened_replicas[i].indices, values=flattened_replicas[i].values, dense_shape=dense_shape) replicas = nest.pack_sequence_as(replicas, flattened_replicas) return values.regroup(replicas) # We need a private initializer method for re-initializing multidevice # iterators when used with Keras training loops. If we don't reinitialize the # iterator we run into memory leak issues (b/123315763). @property def _initializer(self): init_ops = [] for it in self._iterators: init_ops.extend(it.initialize()) return control_flow_ops.group(init_ops) @property def element_spec(self): """The type specification of an element of this iterator.""" return self._element_spec class DistributedIteratorV1(DistributedIterator): """Input Iterator for tf.data.DatasetV1.""" @deprecated(None, "Use the iterator's `initializer` property instead.") def initialize(self): """Initialze underlying iterators. Returns: A list of any initializer ops that should be run. """ return super(DistributedIteratorV1, self)._initializer @property def initializer(self): """Returns a list of ops that initialize the iterator.""" return self.initialize() # TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs. @property def output_classes(self): return self._iterators[0].output_classes # TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs. @property def output_shapes(self): return self._iterators[0].output_shapes # TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs. @property def output_types(self): return self._iterators[0].output_types # TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs. def get_iterator(self, worker): for i, w in enumerate(self._input_workers.worker_devices): if worker == w: return self._iterators[i] return None class _IterableInput(object): """Base class for iterable inputs for distribution strategies.""" def __init__(self, input_workers): assert isinstance(input_workers, InputWorkers) self._input_workers = input_workers def __iter__(self): raise NotImplementedError("must be implemented in descendants") def reduce(self, initial_state, reduce_fn): """Execute a `reduce_fn` over all the elements of the input.""" iterator = iter(self) has_data, data = _get_next_as_optional(iterator, self._strategy) def cond(has_data, data, state): del data, state # Unused. return has_data def loop_body(has_data, data, state): """Executes `reduce_fn` in a loop till the dataset is empty.""" del has_data # Unused. # data is list of lists here. where each list corresponds to one worker. # TODO(b/130570614): Add support for the multiworker and TPU pods use # case. if self._input_workers.num_workers == 1: data = data[0] else: raise ValueError("Dataset iteration within a tf.function is" " not supported for multiple workers.") state = reduce_fn(state, values.regroup(data)) has_data, data = _get_next_as_optional(iterator, self._strategy) return has_data, data, state has_data, data, final_state = control_flow_ops.while_loop( cond, loop_body, [has_data, data, initial_state], parallel_iterations=1) return final_state class DistributedDataset(_IterableInput): """Wrapped tf.data.DatasetV2 that supports prefetching to multiple devices.""" def __init__(self, dataset, input_workers, strategy, split_batch_by=None, input_context=None): """Distribute the dataset on all workers. If `split_batch_by` is not None, we "split" each batch of the dataset by `split_batch_by` value. Args: dataset: `tf.data.Dataset` that will be used as the input source. input_workers: an `InputWorkers` object. strategy: a `tf.distribute.Strategy` object, used to run all-reduce to handle last partial batch. split_batch_by: Optional integer. If present, we "split" each batch of the dataset by `split_batch_by` value. input_context: `InputContext` for sharding. Only pass this in for between graph multi-worker cases where there is only one `input_worker`. In these cases, we will shard based on the `input_pipeline_id` and `num_input_pipelines` in the `InputContext`. """ super(DistributedDataset, self).__init__(input_workers=input_workers) # We clone and shard the dataset on each worker. The current setup tries to # shard the dataset by files if possible so that each worker sees a # different subset of files. If that is not possible, will attempt to shard # the final input such that each worker will run the entire preprocessing # pipeline and only receive its own shard of the dataset. if split_batch_by: try: # pylint: disable=protected-access with ops.colocate_with(dataset._variant_tensor): dataset = distribute._RebatchDataset(dataset, split_batch_by) # Add a prefetch to pipeline rebatching for performance. # TODO(rachelim): Instead of inserting an extra prefetch stage here, # leverage static graph rewrites to insert _RebatchDataset before # the final `prefetch` if it exists. dataset = dataset.prefetch(split_batch_by) except errors.InvalidArgumentError as e: if "without encountering a batch" in str(e): six.reraise( ValueError, ValueError( "Call the `batch` method on the input Dataset in order to be " "able to split your input across {} replicas.\n Please " "the tf.distribute.Strategy guide. {}".format( split_batch_by, e)), sys.exc_info()[2]) else: raise # TODO(b/138745411): Remove once stateful transformations are supported. options = dataset_ops.Options() options.experimental_distribute._make_stateless = True # pylint: disable=protected-access dataset = dataset.with_options(options) self._cloned_datasets = [] if input_context: # Between-graph where we rely on the input_context for sharding assert input_workers.num_workers == 1 dataset = input_ops.auto_shard_dataset(dataset, input_context.num_input_pipelines, input_context.input_pipeline_id) self._cloned_datasets.append(dataset) else: replicated_ds = distribute.replicate(dataset, input_workers.worker_devices) for i, worker in enumerate(input_workers.worker_devices): with ops.device(worker): cloned_dataset = replicated_ds[worker] cloned_dataset = cloned_dataset.with_options(dataset.options()) cloned_dataset = input_ops.auto_shard_dataset( cloned_dataset, len(input_workers.worker_devices), i) self._cloned_datasets.append(cloned_dataset) self._input_workers = input_workers self._strategy = strategy self._element_spec = _create_distributed_tensor_spec(self._strategy, dataset.element_spec) # pylint: disable=protected-access def __iter__(self): if not (context.executing_eagerly() or ops.get_default_graph().building_function): raise RuntimeError("__iter__() is only supported inside of tf.function " "or when eager execution is enabled.") worker_iterators = _create_iterators_per_worker(self._cloned_datasets, self._input_workers) iterator = DistributedIterator(self._input_workers, worker_iterators, self._strategy) iterator._element_spec = self.element_spec # pylint: disable=protected-access return iterator @property def element_spec(self): """The type specification of an element of this dataset.""" return self._element_spec class DistributedDatasetV1(DistributedDataset): """Wrapped tf.data.DatasetV1 that supports prefetching to multiple devices.""" def __init__(self, dataset, input_workers, strategy, split_batch_by=None, input_context=None): self._input_workers = input_workers super(DistributedDatasetV1, self).__init__( dataset, input_workers, strategy, split_batch_by=split_batch_by, input_context=input_context) def make_one_shot_iterator(self): """Get a one time use iterator for DistributedDatasetV1. Note: This API is deprecated. Please use `for ... in dataset:` to iterate over the dataset or `iter` to create an iterator. Returns: A DistributedIteratorV1 instance. """ return self._make_one_shot_iterator() def _make_one_shot_iterator(self): """Get an iterator for DistributedDatasetV1.""" # Graph mode with one shot iterator is disabled because we have to call # `initialize` on the iterator which is only required if we are using a # tf.distribute strategy. if not context.executing_eagerly(): raise ValueError("Cannot create a one shot iterator. Please use " "`make_initializable_iterator()` instead.") return self._get_iterator() def make_initializable_iterator(self): """Get an initializable iterator for DistributedDatasetV1. Note: This API is deprecated. Please use `tf.compat.v1.data.make_initializable_iterator(dataset)` to create an initializable iterator. Returns: A DistributedIteratorV1 instance. """ return self._make_initializable_iterator() def _make_initializable_iterator(self, shared_name=None): # pylint: disable=unused-argument """Get an initializable iterator for DistributedDatasetV1.""" # Eager mode generates already initialized iterators. Hence we cannot create # an initializable iterator. if context.executing_eagerly(): raise ValueError("Cannot create initializable iterator in Eager mode. " "Please use `iter()` instead.") return self._get_iterator() def _get_iterator(self): worker_iterators = _create_iterators_per_worker(self._cloned_datasets, self._input_workers) iterator = DistributedIteratorV1(self._input_workers, worker_iterators, self._strategy) iterator._element_spec = self.element_spec # pylint: disable=protected-access return iterator # TODO(priyag): Add other replication modes. class DistributedDatasetsFromFunction(_IterableInput): """Inputs created from dataset function.""" def __init__(self, dataset_fn, input_workers, input_contexts, strategy): """Makes an iterable from datasets created by the given function. Args: dataset_fn: A function that returns a `Dataset` given an `InputContext`. input_workers: an `InputWorkers` object. input_contexts: A list of `InputContext` instances to be passed to call(s) to `dataset_fn`. Length and order should match worker order in `worker_device_pairs`. strategy: a `tf.distribute.Strategy` object, used to run all-reduce to handle last partial batch. """ super(DistributedDatasetsFromFunction, self).__init__( input_workers=input_workers) if input_workers.num_workers != len(input_contexts): raise ValueError( "Number of input workers (%d) is not same as number of " "input_contexts (%d)" % (input_workers.num_workers, len(input_contexts))) self._dataset_fn = dataset_fn self._input_workers = input_workers self._input_contexts = input_contexts self._strategy = strategy self._element_spec = None def __iter__(self): if not (context.executing_eagerly() or ops.get_default_graph().building_function): raise RuntimeError("__iter__() is only supported inside of tf.function " "or when eager execution is enabled.") iterators, element_spec = _create_iterators_per_worker_with_input_context( self._input_contexts, self._input_workers, self._dataset_fn) iterator = DistributedIterator(self._input_workers, iterators, self._strategy) self._element_spec = _create_distributed_tensor_spec(self._strategy, element_spec) iterator._element_spec = self._element_spec # pylint: disable=protected-access return iterator @property def element_spec(self): """The type specification of an element of this dataset.""" if self._element_spec is None: raise ValueError("You must create an iterator before calling " "`element_spec` on the distributed dataset or iterator. " "This is because the dataset function is not called " "before an iterator is created.") return self._element_spec class DistributedDatasetsFromFunctionV1(DistributedDatasetsFromFunction): """Inputs created from dataset function.""" def _make_initializable_iterator(self, shared_name=None): """Get an initializable iterator for DistributedDatasetsFromFunctionV1.""" del shared_name # Unused # Eager mode generates already initialized iterators. Hence we cannot create # an initializable iterator. if context.executing_eagerly(): raise ValueError("Cannot create initializable iterator in Eager mode. " "Please use `iter()` instead.") return self._get_iterator() def _make_one_shot_iterator(self): """Get an iterator for iterating over DistributedDatasetsFromFunctionV1.""" # Graph mode with one shot iterator is disabled because we have to call # `initialize` on the iterator which is only required if we are using a # tf.distribute strategy. if not context.executing_eagerly(): raise ValueError("Cannot create a one shot iterator. Please use " "`make_initializable_iterator()` instead.") return self._get_iterator() def _get_iterator(self): iterators, element_spec = _create_iterators_per_worker_with_input_context( self._input_contexts, self._input_workers, self._dataset_fn) iterator = DistributedIteratorV1(self._input_workers, iterators, self._strategy) self._element_spec = _create_distributed_tensor_spec(self._strategy, element_spec) iterator._element_spec = self._element_spec # pylint: disable=protected-access return iterator # TODO(anjalisridhar): This class will be soon be removed in favor of newer # APIs. class InputFunctionIterator(DistributedIteratorV1): """Iterator created from input function.""" def __init__(self, input_fn, input_workers, input_contexts, strategy): """Make an iterator for input provided via an input function. Currently implements PER_WORKER mode, in which the `input_fn` is called once on each worker. TODO(priyag): Add other replication modes. Args: input_fn: Input function that returns a `tf.data.Dataset` object. input_workers: an `InputWorkers` object. input_contexts: A list of `InputContext` instances to be passed to call(s) to `input_fn`. Length and order should match worker order in `worker_device_pairs`. strategy: a `tf.distribute.Strategy` object, used to run all-reduce to handle last partial batch. """ assert isinstance(input_workers, InputWorkers) if input_workers.num_workers != len(input_contexts): raise ValueError( "Number of input workers (%d) is not same as number of " "input_contexts (%d)" % (input_workers.num_workers, len(input_contexts))) iterators = [] for i, ctx in enumerate(input_contexts): worker = input_workers.worker_devices[i] with ops.device(worker): result = input_fn(ctx) devices = input_workers.compute_devices_for_worker(i) if isinstance(result, dataset_ops.DatasetV2): iterator = _SingleWorkerDatasetIterator(result, worker, devices) elif callable(result): iterator = _SingleWorkerCallableIterator(result, worker, devices) else: raise ValueError( "input_fn must return a tf.data.Dataset or a callable.") iterators.append(iterator) super(InputFunctionIterator, self).__init__(input_workers, iterators, strategy) # TODO(anjalisridhar): This class will soon be removed and users should move # to using DistributedIterator. class DatasetIterator(DistributedIteratorV1): """Iterator created from input dataset.""" def __init__(self, dataset, input_workers, strategy, split_batch_by=None, input_context=None): """Make an iterator for the dataset on given devices. If `split_batch_by` is not None, we "split" each batch of the dataset by `split_batch_by` value. Args: dataset: `tf.data.Dataset` that will be used as the input source. input_workers: an `InputWorkers` object. strategy: a `tf.distribute.Strategy` object, used to run all-reduce to handle last partial batch. split_batch_by: Optional integer. If present, we "split" each batch of the dataset by `split_batch_by` value. input_context: `InputContext` for sharding. Only pass this in for between graph multi-worker cases where there is only one `input_worker`. In these cases, we will shard based on the `input_pipeline_id` and `num_input_pipelines` in the `InputContext`. """ dist_dataset = DistributedDatasetV1( dataset, input_workers, strategy, split_batch_by=split_batch_by, input_context=input_context) worker_iterators = _create_iterators_per_worker( dist_dataset._cloned_datasets, input_workers) # pylint: disable=protected-access super(DatasetIterator, self).__init__( input_workers, worker_iterators, # pylint: disable=protected-access strategy) self._element_spec = dist_dataset.element_spec def _dummy_tensor_fn(value_structure): """A function to create dummy tensors from `value_structure`.""" def create_dummy_tensor(type_spec): """Create a dummy tensor with possible batch dimensions set to 0.""" if isinstance(type_spec, ragged_tensor.RaggedTensorSpec): # Splice out the ragged dimensions. # pylint: disable=protected-access feature_shape = type_spec._shape[:1].concatenate( type_spec._shape[(1 + type_spec._ragged_rank):]) feature_type = type_spec._dtype # pylint: enable=protected-access else: feature_shape = type_spec.shape feature_type = type_spec.dtype # Ideally we should set the batch dimension to 0, however as in # DistributionStrategy we don't know the batch dimension, we try to # guess it as much as possible. If the feature has unknown dimensions, we # will set them to 0. If the feature shape is already static, we guess the # first dimension as batch dimension and set it to 0. dims = ([dim if dim is not None else 0 for dim in feature_shape.as_list()] if feature_shape else []) if dims and (isinstance(type_spec, ragged_tensor.RaggedTensorSpec) or feature_shape.is_fully_defined()): dims[0] = tensor_shape.Dimension(0) if isinstance(type_spec, sparse_tensor.SparseTensorSpec): return sparse_tensor.SparseTensor( values=array_ops.zeros(0, feature_type), indices=array_ops.zeros((0, len(dims)), dtypes.int64), dense_shape=dims) # Create the dummy tensor. dummy_tensor = array_ops.zeros(tensor_shape.TensorShape(dims), feature_type) if isinstance(type_spec, ragged_tensor.RaggedTensorSpec): # Reinsert the ragged dimensions with size 0. # pylint: disable=protected-access row_splits = array_ops.zeros(1, type_spec._row_splits_dtype) dummy_tensor = ragged_tensor.RaggedTensor.from_nested_row_splits( dummy_tensor, (row_splits,) * type_spec._ragged_rank, validate=False) # pylint: enable=protected-access return dummy_tensor return nest.map_structure(create_dummy_tensor, value_structure) class _SingleWorkerDatasetIterator(object): """Iterator for a single `tf.data.Dataset`.""" def __init__(self, dataset, worker, devices): """Create iterator for the `dataset` to fetch data to worker's `devices` . `MultiDeviceIterator` is used to prefetch input to the devices on the given worker. Args: dataset: A `tf.data.Dataset` instance. worker: Worker on which ops should be created. devices: Distribute data from `dataset` to these devices. """ self._dataset = dataset self._worker = worker self._devices = devices self._make_iterator() def _make_iterator(self): """Make appropriate iterator on the dataset.""" with ops.device(self._worker): self._iterator = multi_device_iterator_ops.MultiDeviceIterator( self._dataset, self._devices) def get_next(self, device, name=None): """Get next element for the given device.""" del name with ops.device(self._worker): return self._iterator.get_next(device) def get_next_as_list_static_shapes(self, name=None): """Get next element from the underlying iterator. Runs the iterator get_next() within a device scope. Since this doesn't use get_next_as_optional(), is is considerably faster than get_next_as_list() (but can only be used when the shapes are static). Args: name: not used. Returns: A list consisting of the next data from each device. """ del name with ops.device(self._worker): return self._iterator.get_next() def get_next_as_list(self, name=None): """Get next element from underlying iterator. If there is no data left, a list of dummy tensors with possible batch dimensions set to 0 will be returned. Use of get_next_as_optional() and extra logic adds overhead compared to get_next_as_list_static_shapes(), but allows us to handle non-static shapes. Args: name: not used. Returns: A boolean tensor indicates whether there is any data in next element and the real data as the next element or a list of dummy tensors if no data left. """ del name with ops.device(self._worker): data_list = self._iterator.get_next_as_optional() result = [] for i, data in enumerate(data_list): # Place the condition op in the same device as the data so the data # doesn't need to be sent back to the worker. with ops.device(self._devices[i]): # As MultiDeviceIterator will fetch data in order, so we only need to # check if the first replica has value to see whether there is data # left for this single worker. if i == 0: worker_has_value = data.has_value() # pylint: disable=unnecessary-lambda # pylint: disable=cell-var-from-loop real_data = control_flow_ops.cond( data.has_value(), lambda: data.get_value(), lambda: _dummy_tensor_fn(data.value_structure), strict=True, ) result.append(real_data) # pylint: enable=cell-var-from-loop # pylint: enable=unnecessary-lambda return worker_has_value, result def initialize(self): """Initialze underlying iterator. In eager execution, this simply recreates the underlying iterator. In graph execution, it returns the initializer ops for the underlying iterator. Returns: A list of any initializer ops that should be run. """ if ops.executing_eagerly_outside_functions(): self._iterator._eager_reset() # pylint: disable=protected-access return [] else: return [self._iterator.initializer] @property def output_classes(self): return dataset_ops.get_legacy_output_classes(self._iterator) @property def output_shapes(self): return dataset_ops.get_legacy_output_shapes(self._iterator) @property def output_types(self): return dataset_ops.get_legacy_output_types(self._iterator) class _SingleWorkerCallableIterator(object): """Iterator for a single tensor-returning callable.""" def __init__(self, fn, worker, devices): self._fn = fn self._worker = worker self._devices = devices def get_next(self, device, name=None): """Get next element for the given device from the callable.""" del device, name with ops.device(self._worker): return self._fn() def get_next_as_list_static_shapes(self, name=None): """Get next element from the callable.""" del name with ops.device(self._worker): data_list = [self._fn() for _ in self._devices] return data_list def get_next_as_list(self, name=None): """Get next element from the callable.""" del name with ops.device(self._worker): data_list = [self._fn() for _ in self._devices] return constant_op.constant(True), data_list def initialize(self): # TODO(petebu) Should this throw an exception instead? return [] def _create_iterators_per_worker(worker_datasets, input_workers): """Create a multidevice iterator on each of the workers.""" assert isinstance(input_workers, InputWorkers) assert len(worker_datasets) == len(input_workers.worker_devices) iterators = [] for i, worker in enumerate(input_workers.worker_devices): with ops.device(worker): worker_devices = input_workers.compute_devices_for_worker(i) iterator = _SingleWorkerDatasetIterator(worker_datasets[i], worker, worker_devices) iterators.append(iterator) return iterators def _create_iterators_per_worker_with_input_context(input_contexts, input_workers, dataset_fn): """Create a multidevice iterator per workers given a dataset function.""" iterators = [] for i, ctx in enumerate(input_contexts): worker = input_workers.worker_devices[i] with ops.device(worker): dataset = dataset_fn(ctx) # TODO(b/138745411): Remove once stateful transformations are supported. options = dataset_ops.Options() options.experimental_distribute._make_stateless = True # pylint: disable=protected-access dataset = dataset.with_options(options) devices = input_workers.compute_devices_for_worker(i) iterator = _SingleWorkerDatasetIterator(dataset, worker, devices) iterators.append(iterator) return iterators, dataset.element_spec # TODO(sourabhbajaj): Remove this in lieu of distributed datasets def _get_batched_dataset(d): """Get the batched dataset from `d`.""" # pylint: disable=protected-access if isinstance(d, dataset_ops.DatasetV1Adapter): d = d._dataset if isinstance(d, (dataset_ops.BatchDataset, batching._MapAndBatchDataset)): return d elif isinstance(d, (dataset_ops.PrefetchDataset, dataset_ops._OptionsDataset)): return _get_batched_dataset(d._input_dataset) raise ValueError( "Unable to get batched dataset from the input dataset. `batch` " "`map_and_batch` need to be the last operations on the dataset. " "The batch operations can be followed by a prefetch.") def _get_batched_dataset_attributes(d): """Get `batch_size`, `drop_remainder` of dataset.""" # pylint: disable=protected-access assert isinstance(d, (dataset_ops.BatchDataset, batching._MapAndBatchDataset)) if isinstance(d, dataset_ops.BatchDataset): batch_size = d._batch_size drop_remainder = d._drop_remainder elif isinstance(d, batching._MapAndBatchDataset): batch_size = d._batch_size_t drop_remainder = d._drop_remainder_t # pylint: enable=protected-access if tensor_util.is_tensor(batch_size): batch_size = tensor_util.constant_value(batch_size) if tensor_util.is_tensor(drop_remainder): drop_remainder = tensor_util.constant_value(drop_remainder) return batch_size, drop_remainder # TODO(sourabhbajaj): Remove this in lieu of distributed datasets def _get_dataset_attributes(dataset): """Get the underlying attributes from the dataset object.""" # pylint: disable=protected-access # First, get batch_size and drop_remainder from the dataset. We need # to walk back the dataset creation process and find the batched version in # order to get the attributes. batched_dataset = _get_batched_dataset(dataset) batch_size, drop_remainder = _get_batched_dataset_attributes(batched_dataset) # Second, prefetch buffer should be get from the original dataset. prefetch_buffer = None if isinstance(dataset, dataset_ops.PrefetchDataset): prefetch_buffer = dataset._buffer_size elif (isinstance(dataset, dataset_ops.DatasetV1Adapter) and isinstance(dataset._dataset, dataset_ops.PrefetchDataset)): prefetch_buffer = dataset._dataset._buffer_size return batch_size, drop_remainder, prefetch_buffer class MultiStepContext(object): """A context object that can be used to capture things when running steps. This context object is useful when running multiple steps at a time using the `experimental_run_steps_on_iterator` API. For e.g. it allows the user's step function to specify which outputs to emit at what frequency. Currently it supports capturing output from the last step, as well as capturing non tensor outputs. In the future it will be augmented to support other use cases such as output each N steps. """ def __init__(self): """Initialize an output context. Returns: A context object. """ self._last_step_outputs = {} self._last_step_outputs_reduce_ops = {} self._non_tensor_outputs = {} @property def last_step_outputs(self): """A dictionary consisting of outputs to be captured on last step. Keys in the dictionary are names of tensors to be captured, as specified when `set_last_step_output` is called. Values in the dictionary are the tensors themselves. If `set_last_step_output` was called with a `reduce_op` for this output, then the value is the reduced value. Returns: A dictionary with last step outputs. """ return self._last_step_outputs def _set_last_step_outputs(self, outputs): """Replace the entire dictionary of last step outputs.""" if not isinstance(outputs, dict): raise ValueError("Need a dictionary to set last_step_outputs.") self._last_step_outputs = outputs def set_last_step_output(self, name, output, reduce_op=None): """Set `output` with `name` to be outputted from the last step. Args: name: String, name to identify the output. Doesn't need to match tensor name. output: The tensors that should be outputted with `name`. See below for actual types supported. reduce_op: Reduction method to use to reduce outputs from multiple replicas. Required if `set_last_step_output` is called in a replica context. Optional in cross_replica_context. When present, the outputs from all the replicas are reduced using the current distribution strategy's `reduce` method. Hence, the type of `output` must be what's supported by the corresponding `reduce` method. For e.g. if using MirroredStrategy and reduction is set, output must be a `PerReplica` value. The reduce method is also recorded in a dictionary `_last_step_outputs_reduce_ops` for later interpreting of the outputs as already reduced or not. """ if distribution_strategy_context.in_cross_replica_context(): self._last_step_outputs_reduce_ops[name] = reduce_op if reduce_op is None: self._last_step_outputs[name] = output else: distribution = distribution_strategy_context.get_strategy() self._last_step_outputs[name] = distribution.reduce(reduce_op, output, axis=None) else: assert reduce_op is not None def merge_fn(distribution, value): self._last_step_outputs[name] = distribution.reduce(reduce_op, value, axis=None) # Setting this inside the `merge_fn` because all replicas share the same # context object, so it's more robust to set it only once (even if all # the replicas are trying to set the same value). self._last_step_outputs_reduce_ops[name] = reduce_op distribution_strategy_context.get_replica_context().merge_call( merge_fn, args=(output,)) @property def non_tensor_outputs(self): """A dictionary consisting of any non tensor outputs to be captured.""" return self._non_tensor_outputs def set_non_tensor_output(self, name, output): """Set `output` with `name` to be captured as a non tensor output.""" if distribution_strategy_context.in_cross_replica_context(): self._non_tensor_outputs[name] = output else: def merge_fn(distribution, value): # NOTE(priyag): For non tensor outputs, we simply return all the values # in a list as reduction doesn't make sense on non tensors. self._non_tensor_outputs[name] = ( distribution.experimental_local_results(value)) distribution_strategy_context.get_replica_context().merge_call( merge_fn, args=(output,)) def _create_distributed_tensor_spec(strategy, tensor_spec): """Create a `tf.TypeSpec` for a given strategy and input `tensor_spec`. Args: strategy: The given `tf.distribute` strategy. tensor_spec: `tf.TensorSpec` of a given value. The batch dimension of the shape should be None if you have partial batches. Returns: A `tf.TypeSpec` that matches the values produced by a given strategy. This can be a `tf.TensorSpec` or a `PerRelicaSpec`. """ num_replicas = len(strategy.extended.worker_devices) # If the number of devices used in the strategy is just 1 then we return # the tensor_spec as is. if num_replicas == 1: return tensor_spec # If the number of devices is greater than 1 then we assume the input to # tf.function is a per replica type. def _get_value_per_replica(tensor_spec_per_input): value_specs = [tensor_spec_per_input for _ in range(num_replicas)] return values.PerReplicaSpec(*value_specs) return nest.map_structure(_get_value_per_replica, tensor_spec)
apache-2.0
bloomberg/phabricator-tools
py/aon/aoncmd_taskquery.py
4
8598
"""display and filter the list of maniphest tasks. you can use the 'task id' output from this command as input to the 'arcyon task-update' command. usage examples: list all tasks: $ arcyon task-query output formats: --format-ids 3 2 1 --format-short 8 / Open / High / rethink the blob module 7 / Open / High / document the lifecycle of a request 3 / Open / Low / extract methods out of the doWork() function --format-python [{'description': u'', 'id': u'1', 'objectName': u'T1', 'priority': u'Needs Triage', 'status': u'0', ... --format-json [ { "description": "", "id": "1", "objectName": "T1", ... """ # ============================================================================= # CONTENTS # ----------------------------------------------------------------------------- # aoncmd_taskquery # # Public Functions: # getFromfilePrefixChars # setupParser # process # # ----------------------------------------------------------------------------- # (this contents block is generated, edits will be lost) # ============================================================================= from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import pprint import textwrap import phlcon_maniphest import phlcon_project import phlcon_user import phlsys_makeconduit def getFromfilePrefixChars(): return "" def setupParser(parser): # make a list of priority names in increasing order of importance priority_name_list = phlcon_maniphest.PRIORITIES.keys() priority_name_list.sort( key=lambda x: phlcon_maniphest.PRIORITIES[x]) priorities = parser.add_argument_group( 'optional priority arguments', 'use any of ' + textwrap.fill( str(priority_name_list))) output_group = parser.add_argument_group( 'Output format arguments', 'Mutually exclusive, defaults to "--format-short"') output = output_group.add_mutually_exclusive_group() opt = parser.add_argument_group( 'Optional task arguments') priorities.add_argument( '--priorities', '-p', nargs="*", choices=priority_name_list, metavar="PRIORITY", default=None, type=str, help="filter by priority of the task") opt.add_argument( '--order', choices=phlcon_maniphest.ORDERS.keys(), default=None, type=str, help="the ordering of the returned results") opt.add_argument( '--ids', nargs="+", metavar='INT', default=[], help='specific task ids to restrict the query to', type=str) opt.add_argument( '--owners', '-o', nargs="+", metavar='USER', default=[], help='specific owners usernames to restrict the query to', type=str) opt.add_argument( '--authors', nargs="+", metavar='USER', default=[], help='specific author usernames to restrict the query to', type=str) opt.add_argument( '--ccs', '-c', nargs="+", metavar='USER', default=[], help='specific cc usernames to restrict the query to', type=str) opt.add_argument( '--projects', nargs="+", metavar='PROJECT', default=[], help='a list of project names to restrict the query', type=str) opt.add_argument( '--status', type=str, default=None, choices=phlcon_maniphest.STATUS_FILTERS.keys(), help='a single status type to restrict items to') opt.add_argument( '--text', type=str, metavar='STRING', default=None, help='string to search the full text of each task for.') opt.add_argument( '--max-results', type=int, metavar='INT', default=None, help='limit the number of results returned, if unspecified then the ' 'server default limit is used (seems to be 1000).') opt.add_argument( '--offset-results', type=int, metavar='INT', default=None, help='where there is a limit on the number of results, you can supply ' 'an offset to return the next batch of results. e.g. if the ' 'number of results is limited to 100, then to see the next "page"' 'of results, supply an offset of 100. To see "page 3" of the ' 'results, supply an offset of 200 and so on. Theres no way to ' 'count the total number of results at present.') output.add_argument( '--format-short', action='store_true', help='will print a short human-readable summary of each task.') output.add_argument( '--format-ids', action='store_true', help='will print just the ids of the tasks, for scripting.') output.add_argument( '--format-string', type=str, default=None, help='will print using the supplied format string, e.g. "{id}" ' 'to print a list of ids. use --format-python to list all the ' 'available attributes for printing.') output.add_argument( '--format-python', action='store_true', help='will pretty-print the response as a python object.') output.add_argument( '--format-json', action='store_true', help='will pretty-print the response in json.') phlsys_makeconduit.add_argparse_arguments(parser) def _combine_lists_if_not_none(*lists): result = [] for l in lists: if l is not None: result += l return result def process(args): conduit = phlsys_makeconduit.make_conduit( args.uri, args.user, args.cert, args.act_as_user) # conduit expects PHIDs not plain usernames user_phids = phlcon_user.UserPhidCache(conduit) user_phids.add_hint_list( _combine_lists_if_not_none(args.owners, args.ccs)) authors = [user_phids.get_phid(user) for user in args.authors] owners = [user_phids.get_phid(user) for user in args.owners] ccs = [user_phids.get_phid(user) for user in args.ccs] # conduit expects PHIDs not plain project names projects = None if args.projects: project_to_phid = phlcon_project.make_project_to_phid_dict(conduit) projects = [project_to_phid[p] for p in args.projects] filters = phlcon_maniphest.STATUS_FILTERS status = filters[args.status] if args.status is not None else None orderings = phlcon_maniphest.ORDERS order = orderings[args.order] if args.order is not None else None results = phlcon_maniphest.query( conduit, ids=args.ids, authors=authors, owners=owners, ccs=ccs, projects=projects, status=status, limit=args.max_results, offset=args.offset_results, order=order, text=args.text) results = [dict(r.__dict__) for r in results] for r in results: if r['statusName'] is None: r['statusName'] = phlcon_maniphest.STATUSES[int(r['status'])] # initialise to format for 'args.format_short' output_format = "{id} / {statusName} / {priority} / {title}" if args.format_ids: output_format = "{id}" elif args.format_string is not None: output_format = args.format_string if args.format_python: pprint.pprint(results) elif args.format_json: print(json.dumps(results, sort_keys=True, indent=2)) else: for r in results: print(output_format.format(**r)) # ----------------------------------------------------------------------------- # Copyright (C) 2013-2014 Bloomberg Finance L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------ END-OF-FILE ----------------------------------
apache-2.0
fkie-cad/FACT_core
src/objects/firmware.py
1
2206
from objects.file import FileObject from helperFunctions.hash import get_md5 from helperFunctions.tag import TagColor from contextlib import suppress class Firmware(FileObject): ''' This objects represents a firmware ''' def __init__(self, binary=None, file_name=None, file_path=None, scheduled_analysis=None): super().__init__(binary=binary, file_name=file_name, file_path=file_path, scheduled_analysis=scheduled_analysis) self.device_name = None self.version = None self.device_class = None self.vendor = None self.part = '' self.release_date = None self.tags = dict() self._update_root_id_and_virtual_path() def set_device_name(self, device_name): self.device_name = device_name def set_part_name(self, part): if part == 'complete': self.part = '' else: self.part = part def set_firmware_version(self, version): self.version = version def set_device_class(self, device_class): self.device_class = device_class def set_binary(self, binary): super().set_binary(binary) self._update_root_id_and_virtual_path() self.md5 = get_md5(binary) def set_vendor(self, vendor): self.vendor = vendor def set_release_date(self, release_date): self.release_date = release_date def _update_root_id_and_virtual_path(self): self.root_uid = self.uid self.virtual_file_path = {self.uid: [self.uid]} def set_tag(self, tag, tag_color=TagColor.GRAY): self.tags[tag] = tag_color def remove_tag(self, tag): with suppress(KeyError): self.tags.pop(tag) def get_hid(self, root_uid=None): ''' return a human readable identifier ''' part = ' - {}'.format(self.part) if self.part else '' return '{} {}{} v. {}'.format(self.vendor, self.device_name, part, self.version) def __str__(self): return '{}\nProcessed Analysis: {}\nScheduled Analysis: {}'.format(self.get_hid(), list(self.processed_analysis.keys()), self.scheduled_analysis) def __repr__(self): return self.__str__()
gpl-3.0
asendecka/djangae
djangae/tests/test_transactional.py
7
6364
from djangae.test import TestCase from djangae.db import transaction from djangae.contrib import sleuth class TransactionTests(TestCase): def test_repeated_usage_in_a_loop(self): from .test_connector import TestUser pk = TestUser.objects.create(username="foo").pk for i in xrange(4): with transaction.atomic(xg=True): TestUser.objects.get(pk=pk) continue with transaction.atomic(xg=True): TestUser.objects.get(pk=pk) def test_atomic_decorator(self): from .test_connector import TestUser @transaction.atomic def txn(): TestUser.objects.create(username="foo", field2="bar") self.assertTrue(transaction.in_atomic_block()) raise ValueError() with self.assertRaises(ValueError): txn() self.assertEqual(0, TestUser.objects.count()) def test_interaction_with_datastore_txn(self): from google.appengine.ext import db from google.appengine.datastore.datastore_rpc import TransactionOptions from .test_connector import TestUser @db.transactional(propagation=TransactionOptions.INDEPENDENT) def some_indie_txn(_username): TestUser.objects.create(username=_username) @db.transactional() def some_non_indie_txn(_username): TestUser.objects.create(username=_username) @db.transactional() def double_nested_transactional(): @db.transactional(propagation=TransactionOptions.INDEPENDENT) def do_stuff(): TestUser.objects.create(username="Double") raise ValueError() try: return do_stuff except: return with transaction.atomic(): double_nested_transactional() @db.transactional() def something_containing_atomic(): with transaction.atomic(): TestUser.objects.create(username="Inner") something_containing_atomic() with transaction.atomic(): with transaction.atomic(): some_non_indie_txn("Bob1") some_indie_txn("Bob2") some_indie_txn("Bob3") with transaction.atomic(independent=True): some_non_indie_txn("Fred1") some_indie_txn("Fred2") some_indie_txn("Fred3") def test_atomic_context_manager(self): from .test_connector import TestUser with self.assertRaises(ValueError): with transaction.atomic(): TestUser.objects.create(username="foo", field2="bar") raise ValueError() self.assertEqual(0, TestUser.objects.count()) def test_non_atomic_context_manager(self): from .test_connector import TestUser existing = TestUser.objects.create(username="existing", field2="exists") with transaction.atomic(): self.assertTrue(transaction.in_atomic_block()) user = TestUser.objects.create(username="foo", field2="bar") with transaction.non_atomic(): # We're outside the transaction, so the user should not exist self.assertRaises(TestUser.DoesNotExist, TestUser.objects.get, pk=user.pk) self.assertFalse(transaction.in_atomic_block()) with sleuth.watch("google.appengine.api.datastore.Get") as datastore_get: TestUser.objects.get(pk=existing.pk) #Should hit the cache, not the datastore self.assertFalse(datastore_get.called) with transaction.atomic(independent=True): user2 = TestUser.objects.create(username="foo2", field2="bar2") self.assertTrue(transaction.in_atomic_block()) with transaction.non_atomic(): self.assertFalse(transaction.in_atomic_block()) self.assertRaises(TestUser.DoesNotExist, TestUser.objects.get, pk=user2.pk) with transaction.non_atomic(): self.assertFalse(transaction.in_atomic_block()) self.assertRaises(TestUser.DoesNotExist, TestUser.objects.get, pk=user2.pk) with sleuth.watch("google.appengine.api.datastore.Get") as datastore_get: TestUser.objects.get(pk=existing.pk) #Should hit the cache, not the datastore self.assertFalse(transaction.in_atomic_block()) self.assertRaises(TestUser.DoesNotExist, TestUser.objects.get, pk=user2.pk) self.assertTrue(TestUser.objects.filter(pk=user2.pk).exists()) self.assertTrue(transaction.in_atomic_block()) def test_xg_argument(self): from .test_connector import TestUser, TestFruit @transaction.atomic(xg=True) def txn(_username): TestUser.objects.create(username=_username, field2="bar") TestFruit.objects.create(name="Apple", color="pink") raise ValueError() with self.assertRaises(ValueError): txn("foo") self.assertEqual(0, TestUser.objects.count()) self.assertEqual(0, TestFruit.objects.count()) def test_independent_argument(self): """ We would get a XG error if the inner transaction was not independent """ from .test_connector import TestUser, TestFruit @transaction.atomic def txn1(_username, _fruit): @transaction.atomic(independent=True) def txn2(_fruit): TestFruit.objects.create(name=_fruit, color="pink") raise ValueError() TestUser.objects.create(username=_username) txn2(_fruit) with self.assertRaises(ValueError): txn1("test", "banana") def test_nested_decorator(self): # Nested decorator pattern we discovered can cause a connection_stack # underflow. @transaction.atomic def inner_txn(): pass @transaction.atomic def outer_txn(): inner_txn() # Calling inner_txn first puts it in a state which means it doesn't # then behave properly in a nested transaction. inner_txn() outer_txn()
bsd-3-clause
ryuunosukeyoshi/PartnerPoi-Bot
lib/aiohttp/multipart.py
20
32684
import asyncio import base64 import binascii import io import json import mimetypes import os import re import sys import uuid import warnings import zlib from collections import Mapping, Sequence, deque from pathlib import Path from urllib.parse import parse_qsl, quote, unquote, urlencode from multidict import CIMultiDict from .hdrs import (CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LENGTH, CONTENT_TRANSFER_ENCODING, CONTENT_TYPE) from .helpers import parse_mimetype from .protocol import HttpParser __all__ = ('MultipartReader', 'MultipartWriter', 'BodyPartReader', 'BodyPartWriter', 'BadContentDispositionHeader', 'BadContentDispositionParam', 'parse_content_disposition', 'content_disposition_filename') CHAR = set(chr(i) for i in range(0, 128)) CTL = set(chr(i) for i in range(0, 32)) | {chr(127), } SEPARATORS = {'(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', chr(9)} TOKEN = CHAR ^ CTL ^ SEPARATORS PY_35 = sys.version_info >= (3, 5) PY_352 = sys.version_info >= (3, 5, 2) class BadContentDispositionHeader(RuntimeWarning): pass class BadContentDispositionParam(RuntimeWarning): pass def parse_content_disposition(header): def is_token(string): return string and TOKEN >= set(string) def is_quoted(string): return string[0] == string[-1] == '"' def is_rfc5987(string): return is_token(string) and string.count("'") == 2 def is_extended_param(string): return string.endswith('*') def is_continuous_param(string): pos = string.find('*') + 1 if not pos: return False substring = string[pos:-1] if string.endswith('*') else string[pos:] return substring.isdigit() def unescape(text, *, chars=''.join(map(re.escape, CHAR))): return re.sub('\\\\([{}])'.format(chars), '\\1', text) if not header: return None, {} disptype, *parts = header.split(';') if not is_token(disptype): warnings.warn(BadContentDispositionHeader(header)) return None, {} params = {} for item in parts: if '=' not in item: warnings.warn(BadContentDispositionHeader(header)) return None, {} key, value = item.split('=', 1) key = key.lower().strip() value = value.lstrip() if key in params: warnings.warn(BadContentDispositionHeader(header)) return None, {} if not is_token(key): warnings.warn(BadContentDispositionParam(item)) continue elif is_continuous_param(key): if is_quoted(value): value = unescape(value[1:-1]) elif not is_token(value): warnings.warn(BadContentDispositionParam(item)) continue elif is_extended_param(key): if is_rfc5987(value): encoding, _, value = value.split("'", 2) encoding = encoding or 'utf-8' else: warnings.warn(BadContentDispositionParam(item)) continue try: value = unquote(value, encoding, 'strict') except UnicodeDecodeError: # pragma: nocover warnings.warn(BadContentDispositionParam(item)) continue else: if is_quoted(value): value = unescape(value[1:-1].lstrip('\\/')) elif not is_token(value): warnings.warn(BadContentDispositionHeader(header)) return None, {} params[key] = value return disptype.lower(), params def content_disposition_filename(params): if not params: return None elif 'filename*' in params: return params['filename*'] elif 'filename' in params: return params['filename'] else: parts = [] fnparams = sorted((key, value) for key, value in params.items() if key.startswith('filename*')) for num, (key, value) in enumerate(fnparams): _, tail = key.split('*', 1) if tail.endswith('*'): tail = tail[:-1] if tail == str(num): parts.append(value) else: break if not parts: return None value = ''.join(parts) if "'" in value: encoding, _, value = value.split("'", 2) encoding = encoding or 'utf-8' return unquote(value, encoding, 'strict') return value class MultipartResponseWrapper(object): """Wrapper around the :class:`MultipartBodyReader` to take care about underlying connection and close it when it needs in.""" def __init__(self, resp, stream): self.resp = resp self.stream = stream if PY_35: def __aiter__(self): return self if not PY_352: # pragma: no cover __aiter__ = asyncio.coroutine(__aiter__) @asyncio.coroutine def __anext__(self): part = yield from self.next() if part is None: raise StopAsyncIteration # NOQA return part def at_eof(self): """Returns ``True`` when all response data had been read. :rtype: bool """ return self.resp.content.at_eof() @asyncio.coroutine def next(self): """Emits next multipart reader object.""" item = yield from self.stream.next() if self.stream.at_eof(): yield from self.release() return item @asyncio.coroutine def release(self): """Releases the connection gracefully, reading all the content to the void.""" yield from self.resp.release() class BodyPartReader(object): """Multipart reader for single body part.""" chunk_size = 8192 def __init__(self, boundary, headers, content): self.headers = headers self._boundary = boundary self._content = content self._at_eof = False length = self.headers.get(CONTENT_LENGTH, None) self._length = int(length) if length is not None else None self._read_bytes = 0 self._unread = deque() self._prev_chunk = None self._content_eof = 0 if PY_35: def __aiter__(self): return self if not PY_352: # pragma: no cover __aiter__ = asyncio.coroutine(__aiter__) @asyncio.coroutine def __anext__(self): part = yield from self.next() if part is None: raise StopAsyncIteration # NOQA return part @asyncio.coroutine def next(self): item = yield from self.read() if not item: return None return item @asyncio.coroutine def read(self, *, decode=False): """Reads body part data. :param bool decode: Decodes data following by encoding method from `Content-Encoding` header. If it missed data remains untouched :rtype: bytearray """ if self._at_eof: return b'' data = bytearray() if self._length is None: while not self._at_eof: data.extend((yield from self.readline())) else: while not self._at_eof: data.extend((yield from self.read_chunk(self.chunk_size))) if decode: return self.decode(data) return data @asyncio.coroutine def read_chunk(self, size=chunk_size): """Reads body part content chunk of the specified size. :param int size: chunk size :rtype: bytearray """ if self._at_eof: return b'' if self._length: chunk = yield from self._read_chunk_from_length(size) else: chunk = yield from self._read_chunk_from_stream(size) self._read_bytes += len(chunk) if self._read_bytes == self._length: self._at_eof = True if self._at_eof: assert b'\r\n' == (yield from self._content.readline()), \ 'reader did not read all the data or it is malformed' return chunk @asyncio.coroutine def _read_chunk_from_length(self, size): """Reads body part content chunk of the specified size. The body part must has `Content-Length` header with proper value. :param int size: chunk size :rtype: bytearray """ assert self._length is not None, \ 'Content-Length required for chunked read' chunk_size = min(size, self._length - self._read_bytes) chunk = yield from self._content.read(chunk_size) return chunk @asyncio.coroutine def _read_chunk_from_stream(self, size): """Reads content chunk of body part with unknown length. The `Content-Length` header for body part is not necessary. :param int size: chunk size :rtype: bytearray """ assert size >= len(self._boundary) + 2, \ 'Chunk size must be greater or equal than boundary length + 2' first_chunk = self._prev_chunk is None if first_chunk: self._prev_chunk = yield from self._content.read(size) chunk = yield from self._content.read(size) self._content_eof += int(self._content.at_eof()) assert self._content_eof < 3, "Reading after EOF" window = self._prev_chunk + chunk sub = b'\r\n' + self._boundary if first_chunk: idx = window.find(sub) else: idx = window.find(sub, max(0, len(self._prev_chunk) - len(sub))) if idx >= 0: # pushing boundary back to content self._content.unread_data(window[idx:]) if size > idx: self._prev_chunk = self._prev_chunk[:idx] chunk = window[len(self._prev_chunk):idx] if not chunk: self._at_eof = True if 0 < len(chunk) < len(sub) and not self._content_eof: self._prev_chunk += chunk self._at_eof = False return b'' result = self._prev_chunk self._prev_chunk = chunk return result @asyncio.coroutine def readline(self): """Reads body part by line by line. :rtype: bytearray """ if self._at_eof: return b'' if self._unread: line = self._unread.popleft() else: line = yield from self._content.readline() if line.startswith(self._boundary): # the very last boundary may not come with \r\n, # so set single rules for everyone sline = line.rstrip(b'\r\n') boundary = self._boundary last_boundary = self._boundary + b'--' # ensure that we read exactly the boundary, not something alike if sline == boundary or sline == last_boundary: self._at_eof = True self._unread.append(line) return b'' else: next_line = yield from self._content.readline() if next_line.startswith(self._boundary): line = line[:-2] # strip CRLF but only once self._unread.append(next_line) return line @asyncio.coroutine def release(self): """Like :meth:`read`, but reads all the data to the void. :rtype: None """ if self._at_eof: return if self._length is None: while not self._at_eof: yield from self.readline() else: while not self._at_eof: yield from self.read_chunk(self.chunk_size) @asyncio.coroutine def text(self, *, encoding=None): """Like :meth:`read`, but assumes that body part contains text data. :param str encoding: Custom text encoding. Overrides specified in charset param of `Content-Type` header :rtype: str """ data = yield from self.read(decode=True) encoding = encoding or self.get_charset(default='latin1') return data.decode(encoding) @asyncio.coroutine def json(self, *, encoding=None): """Like :meth:`read`, but assumes that body parts contains JSON data. :param str encoding: Custom JSON encoding. Overrides specified in charset param of `Content-Type` header """ data = yield from self.read(decode=True) if not data: return None encoding = encoding or self.get_charset(default='utf-8') return json.loads(data.decode(encoding)) @asyncio.coroutine def form(self, *, encoding=None): """Like :meth:`read`, but assumes that body parts contains form urlencoded data. :param str encoding: Custom form encoding. Overrides specified in charset param of `Content-Type` header """ data = yield from self.read(decode=True) if not data: return None encoding = encoding or self.get_charset(default='utf-8') return parse_qsl(data.rstrip().decode(encoding), encoding=encoding) def at_eof(self): """Returns ``True`` if the boundary was reached or ``False`` otherwise. :rtype: bool """ return self._at_eof def decode(self, data): """Decodes data according the specified `Content-Encoding` or `Content-Transfer-Encoding` headers value. Supports ``gzip``, ``deflate`` and ``identity`` encodings for `Content-Encoding` header. Supports ``base64``, ``quoted-printable``, ``binary`` encodings for `Content-Transfer-Encoding` header. :param bytearray data: Data to decode. :raises: :exc:`RuntimeError` - if encoding is unknown. :rtype: bytes """ if CONTENT_TRANSFER_ENCODING in self.headers: data = self._decode_content_transfer(data) if CONTENT_ENCODING in self.headers: return self._decode_content(data) return data def _decode_content(self, data): encoding = self.headers[CONTENT_ENCODING].lower() if encoding == 'deflate': return zlib.decompress(data, -zlib.MAX_WBITS) elif encoding == 'gzip': return zlib.decompress(data, 16 + zlib.MAX_WBITS) elif encoding == 'identity': return data else: raise RuntimeError('unknown content encoding: {}'.format(encoding)) def _decode_content_transfer(self, data): encoding = self.headers[CONTENT_TRANSFER_ENCODING].lower() if encoding == 'base64': return base64.b64decode(data) elif encoding == 'quoted-printable': return binascii.a2b_qp(data) elif encoding == 'binary': return data else: raise RuntimeError('unknown content transfer encoding: {}' ''.format(encoding)) def get_charset(self, default=None): """Returns charset parameter from ``Content-Type`` header or default. """ ctype = self.headers.get(CONTENT_TYPE, '') *_, params = parse_mimetype(ctype) return params.get('charset', default) @property def filename(self): """Returns filename specified in Content-Disposition header or ``None`` if missed or header is malformed.""" _, params = parse_content_disposition( self.headers.get(CONTENT_DISPOSITION)) return content_disposition_filename(params) class MultipartReader(object): """Multipart body reader.""" #: Response wrapper, used when multipart readers constructs from response. response_wrapper_cls = MultipartResponseWrapper #: Multipart reader class, used to handle multipart/* body parts. #: None points to type(self) multipart_reader_cls = None #: Body part reader class for non multipart/* content types. part_reader_cls = BodyPartReader def __init__(self, headers, content): self.headers = headers self._boundary = ('--' + self._get_boundary()).encode() self._content = content self._last_part = None self._at_eof = False self._at_bof = True self._unread = [] if PY_35: def __aiter__(self): return self if not PY_352: # pragma: no cover __aiter__ = asyncio.coroutine(__aiter__) @asyncio.coroutine def __anext__(self): part = yield from self.next() if part is None: raise StopAsyncIteration # NOQA return part @classmethod def from_response(cls, response): """Constructs reader instance from HTTP response. :param response: :class:`~aiohttp.client.ClientResponse` instance """ obj = cls.response_wrapper_cls(response, cls(response.headers, response.content)) return obj def at_eof(self): """Returns ``True`` if the final boundary was reached or ``False`` otherwise. :rtype: bool """ return self._at_eof @asyncio.coroutine def next(self): """Emits the next multipart body part.""" # So, if we're at BOF, we need to skip till the boundary. if self._at_eof: return yield from self._maybe_release_last_part() if self._at_bof: yield from self._read_until_first_boundary() self._at_bof = False else: yield from self._read_boundary() if self._at_eof: # we just read the last boundary, nothing to do there return self._last_part = yield from self.fetch_next_part() return self._last_part @asyncio.coroutine def release(self): """Reads all the body parts to the void till the final boundary.""" while not self._at_eof: item = yield from self.next() if item is None: break yield from item.release() @asyncio.coroutine def fetch_next_part(self): """Returns the next body part reader.""" headers = yield from self._read_headers() return self._get_part_reader(headers) def _get_part_reader(self, headers): """Dispatches the response by the `Content-Type` header, returning suitable reader instance. :param dict headers: Response headers """ ctype = headers.get(CONTENT_TYPE, '') mtype, *_ = parse_mimetype(ctype) if mtype == 'multipart': if self.multipart_reader_cls is None: return type(self)(headers, self._content) return self.multipart_reader_cls(headers, self._content) else: return self.part_reader_cls(self._boundary, headers, self._content) def _get_boundary(self): mtype, *_, params = parse_mimetype(self.headers[CONTENT_TYPE]) assert mtype == 'multipart', 'multipart/* content type expected' if 'boundary' not in params: raise ValueError('boundary missed for Content-Type: %s' % self.headers[CONTENT_TYPE]) boundary = params['boundary'] if len(boundary) > 70: raise ValueError('boundary %r is too long (70 chars max)' % boundary) return boundary @asyncio.coroutine def _readline(self): if self._unread: return self._unread.pop() return (yield from self._content.readline()) @asyncio.coroutine def _read_until_first_boundary(self): while True: chunk = yield from self._readline() if chunk == b'': raise ValueError("Could not find starting boundary %r" % (self._boundary)) chunk = chunk.rstrip() if chunk == self._boundary: return elif chunk == self._boundary + b'--': self._at_eof = True return @asyncio.coroutine def _read_boundary(self): chunk = (yield from self._readline()).rstrip() if chunk == self._boundary: pass elif chunk == self._boundary + b'--': self._at_eof = True else: raise ValueError('Invalid boundary %r, expected %r' % (chunk, self._boundary)) @asyncio.coroutine def _read_headers(self): lines = [b''] while True: chunk = yield from self._content.readline() chunk = chunk.strip() lines.append(chunk) if not chunk: break parser = HttpParser() headers, *_ = parser.parse_headers(lines) return headers @asyncio.coroutine def _maybe_release_last_part(self): """Ensures that the last read body part is read completely.""" if self._last_part is not None: if not self._last_part.at_eof(): yield from self._last_part.release() self._unread.extend(self._last_part._unread) self._last_part = None class BodyPartWriter(object): """Multipart writer for single body part.""" def __init__(self, obj, headers=None, *, chunk_size=8192): if headers is None: headers = CIMultiDict() elif not isinstance(headers, CIMultiDict): headers = CIMultiDict(headers) self.obj = obj self.headers = headers self._chunk_size = chunk_size self._fill_headers_with_defaults() self._serialize_map = { bytes: self._serialize_bytes, str: self._serialize_str, io.IOBase: self._serialize_io, MultipartWriter: self._serialize_multipart, ('application', 'json'): self._serialize_json, ('application', 'x-www-form-urlencoded'): self._serialize_form } def _fill_headers_with_defaults(self): if CONTENT_TYPE not in self.headers: content_type = self._guess_content_type(self.obj) if content_type is not None: self.headers[CONTENT_TYPE] = content_type if CONTENT_LENGTH not in self.headers: content_length = self._guess_content_length(self.obj) if content_length is not None: self.headers[CONTENT_LENGTH] = str(content_length) if CONTENT_DISPOSITION not in self.headers: filename = self._guess_filename(self.obj) if filename is not None: self.set_content_disposition('attachment', filename=filename) def _guess_content_length(self, obj): if isinstance(obj, bytes): return len(obj) elif isinstance(obj, str): *_, params = parse_mimetype(self.headers.get(CONTENT_TYPE)) charset = params.get('charset', 'us-ascii') return len(obj.encode(charset)) elif isinstance(obj, io.StringIO): *_, params = parse_mimetype(self.headers.get(CONTENT_TYPE)) charset = params.get('charset', 'us-ascii') return len(obj.getvalue().encode(charset)) - obj.tell() elif isinstance(obj, io.BytesIO): return len(obj.getvalue()) - obj.tell() elif isinstance(obj, io.IOBase): try: return os.fstat(obj.fileno()).st_size - obj.tell() except (AttributeError, OSError): return None else: return None def _guess_content_type(self, obj, default='application/octet-stream'): if hasattr(obj, 'name'): name = getattr(obj, 'name') return mimetypes.guess_type(name)[0] elif isinstance(obj, (str, io.StringIO)): return 'text/plain; charset=utf-8' else: return default def _guess_filename(self, obj): if isinstance(obj, io.IOBase): name = getattr(obj, 'name', None) if name is not None: return Path(name).name def serialize(self): """Yields byte chunks for body part.""" has_encoding = ( CONTENT_ENCODING in self.headers and self.headers[CONTENT_ENCODING] != 'identity' or CONTENT_TRANSFER_ENCODING in self.headers ) if has_encoding: # since we're following streaming approach which doesn't assumes # any intermediate buffers, we cannot calculate real content length # with the specified content encoding scheme. So, instead of lying # about content length and cause reading issues, we have to strip # this information. self.headers.pop(CONTENT_LENGTH, None) if self.headers: yield b'\r\n'.join( b': '.join(map(lambda i: i.encode('latin1'), item)) for item in self.headers.items() ) yield b'\r\n\r\n' yield from self._maybe_encode_stream(self._serialize_obj()) yield b'\r\n' def _serialize_obj(self): obj = self.obj mtype, stype, *_ = parse_mimetype(self.headers.get(CONTENT_TYPE)) serializer = self._serialize_map.get((mtype, stype)) if serializer is not None: return serializer(obj) for key in self._serialize_map: if not isinstance(key, tuple) and isinstance(obj, key): return self._serialize_map[key](obj) return self._serialize_default(obj) def _serialize_bytes(self, obj): yield obj def _serialize_str(self, obj): *_, params = parse_mimetype(self.headers.get(CONTENT_TYPE)) yield obj.encode(params.get('charset', 'us-ascii')) def _serialize_io(self, obj): while True: chunk = obj.read(self._chunk_size) if not chunk: break if isinstance(chunk, str): yield from self._serialize_str(chunk) else: yield from self._serialize_bytes(chunk) def _serialize_multipart(self, obj): yield from obj.serialize() def _serialize_json(self, obj): *_, params = parse_mimetype(self.headers.get(CONTENT_TYPE)) yield json.dumps(obj).encode(params.get('charset', 'utf-8')) def _serialize_form(self, obj): if isinstance(obj, Mapping): obj = list(obj.items()) return self._serialize_str(urlencode(obj, doseq=True)) def _serialize_default(self, obj): raise TypeError('unknown body part type %r' % type(obj)) def _maybe_encode_stream(self, stream): if CONTENT_ENCODING in self.headers: stream = self._apply_content_encoding(stream) if CONTENT_TRANSFER_ENCODING in self.headers: stream = self._apply_content_transfer_encoding(stream) yield from stream def _apply_content_encoding(self, stream): encoding = self.headers[CONTENT_ENCODING].lower() if encoding == 'identity': yield from stream elif encoding in ('deflate', 'gzip'): if encoding == 'gzip': zlib_mode = 16 + zlib.MAX_WBITS else: zlib_mode = -zlib.MAX_WBITS zcomp = zlib.compressobj(wbits=zlib_mode) for chunk in stream: yield zcomp.compress(chunk) else: yield zcomp.flush() else: raise RuntimeError('unknown content encoding: {}' ''.format(encoding)) def _apply_content_transfer_encoding(self, stream): encoding = self.headers[CONTENT_TRANSFER_ENCODING].lower() if encoding == 'base64': buffer = bytearray() while True: if buffer: div, mod = divmod(len(buffer), 3) chunk, buffer = buffer[:div * 3], buffer[div * 3:] if chunk: yield base64.b64encode(chunk) chunk = next(stream, None) if not chunk: if buffer: yield base64.b64encode(buffer[:]) return buffer.extend(chunk) elif encoding == 'quoted-printable': for chunk in stream: yield binascii.b2a_qp(chunk) elif encoding == 'binary': yield from stream else: raise RuntimeError('unknown content transfer encoding: {}' ''.format(encoding)) def set_content_disposition(self, disptype, **params): """Sets ``Content-Disposition`` header. :param str disptype: Disposition type: inline, attachment, form-data. Should be valid extension token (see RFC 2183) :param dict params: Disposition params """ if not disptype or not (TOKEN > set(disptype)): raise ValueError('bad content disposition type {!r}' ''.format(disptype)) value = disptype if params: lparams = [] for key, val in params.items(): if not key or not (TOKEN > set(key)): raise ValueError('bad content disposition parameter' ' {!r}={!r}'.format(key, val)) qval = quote(val, '') lparams.append((key, '"%s"' % qval)) if key == 'filename': lparams.append(('filename*', "utf-8''" + qval)) sparams = '; '.join('='.join(pair) for pair in lparams) value = '; '.join((value, sparams)) self.headers[CONTENT_DISPOSITION] = value @property def filename(self): """Returns filename specified in Content-Disposition header or ``None`` if missed.""" _, params = parse_content_disposition( self.headers.get(CONTENT_DISPOSITION)) return content_disposition_filename(params) class MultipartWriter(object): """Multipart body writer.""" #: Body part reader class for non multipart/* content types. part_writer_cls = BodyPartWriter def __init__(self, subtype='mixed', boundary=None): boundary = boundary if boundary is not None else uuid.uuid4().hex try: boundary.encode('us-ascii') except UnicodeEncodeError: raise ValueError('boundary should contains ASCII only chars') self.headers = CIMultiDict() self.headers[CONTENT_TYPE] = 'multipart/{}; boundary="{}"'.format( subtype, boundary ) self.parts = [] def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass def __iter__(self): return iter(self.parts) def __len__(self): return len(self.parts) @property def boundary(self): *_, params = parse_mimetype(self.headers.get(CONTENT_TYPE)) return params['boundary'].encode('us-ascii') def append(self, obj, headers=None): """Adds a new body part to multipart writer.""" if isinstance(obj, self.part_writer_cls): if headers: obj.headers.update(headers) self.parts.append(obj) else: if not headers: headers = CIMultiDict() self.parts.append(self.part_writer_cls(obj, headers)) return self.parts[-1] def append_json(self, obj, headers=None): """Helper to append JSON part.""" if not headers: headers = CIMultiDict() headers[CONTENT_TYPE] = 'application/json' return self.append(obj, headers) def append_form(self, obj, headers=None): """Helper to append form urlencoded part.""" if not headers: headers = CIMultiDict() headers[CONTENT_TYPE] = 'application/x-www-form-urlencoded' assert isinstance(obj, (Sequence, Mapping)) return self.append(obj, headers) def serialize(self): """Yields multipart byte chunks.""" if not self.parts: yield b'' return for part in self.parts: yield b'--' + self.boundary + b'\r\n' yield from part.serialize() else: yield b'--' + self.boundary + b'--\r\n' yield b''
gpl-3.0
TacticalGoat/reddit
ErroneousQuotes/erroneousquotes.py
3
3063
#/u/GoldenSights import praw # simple interface to the reddit API, also handles rate limiting of requests import time import sqlite3 import random '''USER CONFIGURATION''' USERNAME = "" #This is the bot's Username. In order to send mail, he must have some amount of Karma. PASSWORD = "" #This is the bot's Password. USERAGENT = "" #This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter bot" SUBREDDIT = "GoldTesting" #This is the sub or list of subs to scan for new posts. For a single sub, use "sub1". For multiple subreddits, use "sub1+sub2+sub3+..." NAMES = ["Abraham Lincoln", "George Washington", "Bill Gates", "Rosa Parks", "GoldenSights", "Unidan", "Napoleon Bonaparte"] #Famous People MAXPOSTS = 100 #This is how many posts you want to retrieve all at once. PRAW can download 100 at a time. MAXLENGTH = 150 #To avoid bot abuse, do not generate any quotes longer than this many characters. WAIT = 20 #This is how many seconds you will wait between cycles. The bot is completely inactive during this time. '''All done!''' WAITS = str(WAIT) try: import bot #This is a file in my python library which contains my Bot's username and password. I can push code to Git without showing credentials USERNAME = bot.getuG() PASSWORD = bot.getpG() USERAGENT = bot.getaG() except ImportError: pass cutoff = len(USERNAME) + 4 sql = sqlite3.connect('sql.db') print('Loaded SQL Database') cur = sql.cursor() cur.execute('CREATE TABLE IF NOT EXISTS oldposts(ID TEXT)') print('Loaded Completed table') sql.commit() r = praw.Reddit(USERAGENT) r.login(USERNAME, PASSWORD) def scanSub(): print('Searching '+ SUBREDDIT + '.') subreddit = r.get_subreddit(SUBREDDIT) posts = subreddit.get_comments(limit=MAXPOSTS) for post in posts: pid = post.id pbody = post.body try: pauthor = post.author.name except AttributeError: pauthor = '[DELETED]' cur.execute('SELECT * FROM oldposts WHERE ID=?', [pid]) if not cur.fetchone(): cur.execute('INSERT INTO oldposts VALUES(?)', [pid]) if pbody.lower()[:cutoff] == '/u/' + USERNAME.lower() + ' ': quote = pbody.split('\n\n')[0][cutoff:] if len(quote) <= MAXLENGTH and pauthor != USERNAME: if ('/u/' + USERNAME) in quote: print(pid + ': Meatbag detected') response = 'Nice try, meatbag' else: name = NAMES[random.randint(0,len(NAMES)-1)] print(pid + ': ' + quote + '- ' + name) response = '>' + quote + '\n\n- ' + name post.reply(response) else: print(pid + ': Comment too long') sql.commit() while True: try: scanSub() except Exception as e: print('An error has occured:', str(e)) print('Running again in ' + WAITS + ' seconds \n') sql.commit() time.sleep(WAIT)
mit
jkorell/PTVS
Python/Product/PythonTools/visualstudio_py_repl.py
19
51102
 # ############################################################################ # # Copyright (c) Microsoft Corporation. # # This source code is subject to terms and conditions of the Apache License, Version 2.0. A # copy of the license can be found in the License.html file at the root of this distribution. If # you cannot locate the Apache License, Version 2.0, please send an email to # [email protected]. By using this source code in any fashion, you are agreeing to be bound # by the terms of the Apache License, Version 2.0. # # You must not remove this notice, or any other, from this software. # # ########################################################################### from __future__ import with_statement # This module MUST NOT import threading in global scope. This is because in a direct (non-ptvsd) # attach scenario, it is loaded on the injected debugger attach thread, and if threading module # hasn't been loaded already, it will assume that the thread on which it is being loaded is the # main thread. This will cause issues when the thread goes away after attach completes. try: import thread except ImportError: # Renamed in Python3k import _thread as thread try: from ssl import SSLError except: SSLError = None import sys import socket import select import time import struct import imp import traceback import random import os import inspect import types from collections import deque try: # In the local attach scenario, visualstudio_py_util is injected into globals() # by PyDebugAttach before loading this module, and cannot be imported. _vspu = visualstudio_py_util except: try: import visualstudio_py_util as _vspu except ImportError: import ptvsd.visualstudio_py_util as _vspu to_bytes = _vspu.to_bytes read_bytes = _vspu.read_bytes read_int = _vspu.read_int read_string = _vspu.read_string write_bytes = _vspu.write_bytes write_int = _vspu.write_int write_string = _vspu.write_string try: unicode except NameError: unicode = str try: BaseException except NameError: # BaseException not defined until Python 2.5 BaseException = Exception DEBUG = os.environ.get('DEBUG_REPL') is not None __all__ = ['ReplBackend', 'BasicReplBackend', 'BACKEND'] def _debug_write(out): if DEBUG: sys.__stdout__.write(out) sys.__stdout__.flush() class SafeSendLock(object): """a lock which ensures we're released if we take a KeyboardInterrupt exception acquiring it""" def __init__(self): self.lock = thread.allocate_lock() def __enter__(self): self.acquire() def __exit__(self, exc_type, exc_value, tb): self.release() def acquire(self): try: self.lock.acquire() except KeyboardInterrupt: try: self.lock.release() except: pass raise def release(self): self.lock.release() def _command_line_to_args_list(cmdline): """splits a string into a list using Windows command line syntax.""" args_list = [] if cmdline and cmdline.strip(): from ctypes import c_int, c_voidp, c_wchar_p from ctypes import byref, POINTER, WinDLL clta = WinDLL('shell32').CommandLineToArgvW clta.argtypes = [c_wchar_p, POINTER(c_int)] clta.restype = POINTER(c_wchar_p) lf = WinDLL('kernel32').LocalFree lf.argtypes = [c_voidp] pNumArgs = c_int() r = clta(cmdline, byref(pNumArgs)) if r: for index in range(0, pNumArgs.value): if sys.hexversion >= 0x030000F0: argval = r[index] else: argval = r[index].encode('ascii', 'replace') args_list.append(argval) lf(r) else: sys.stderr.write('Error parsing script arguments:\n') sys.stderr.write(cmdline + '\n') return args_list class UnsupportedReplException(Exception): def __init__(self, reason): self.reason = reason # save the start_new_thread so we won't debug/break into the REPL comm thread. start_new_thread = thread.start_new_thread class ReplBackend(object): """back end for executing REPL code. This base class handles all of the communication with the remote process while derived classes implement the actual inspection and introspection.""" _MRES = to_bytes('MRES') _SRES = to_bytes('SRES') _MODS = to_bytes('MODS') _IMGD = to_bytes('IMGD') _PRPC = to_bytes('PRPC') _RDLN = to_bytes('RDLN') _STDO = to_bytes('STDO') _STDE = to_bytes('STDE') _DBGA = to_bytes('DBGA') _DETC = to_bytes('DETC') _DPNG = to_bytes('DPNG') _DXAM = to_bytes('DXAM') _MERR = to_bytes('MERR') _SERR = to_bytes('SERR') _ERRE = to_bytes('ERRE') _EXIT = to_bytes('EXIT') _DONE = to_bytes('DONE') _MODC = to_bytes('MODC') def __init__(self): import threading self.conn = None self.send_lock = SafeSendLock() self.input_event = threading.Lock() self.input_event.acquire() # lock starts acquired (we use it like a manual reset event) self.input_string = None self.exit_requested = False def connect(self, port): self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.conn.connect(('127.0.0.1', port)) # start a new thread for communicating w/ the remote process start_new_thread(self._repl_loop, ()) def connect_using_socket(self, socket): self.conn = socket start_new_thread(self._repl_loop, ()) def _repl_loop(self): """loop on created thread which processes communicates with the REPL window""" try: while True: if self.check_for_exit_repl_loop(): break # we receive a series of 4 byte commands. Each command then # has it's own format which we must parse before continuing to # the next command. self.flush() self.conn.settimeout(10) # 2.x raises SSLError in case of timeout (http://bugs.python.org/issue10272) if SSLError: timeout_exc_types = (socket.timeout, SSLError) else: timeout_exc_types = socket.timeout try: inp = read_bytes(self.conn, 4) except timeout_exc_types: r, w, x = select.select([], [], [self.conn], 0) if x: # an exception event has occured on the socket... raise continue self.conn.settimeout(None) if inp == '': break self.flush() cmd = ReplBackend._COMMANDS.get(inp) if cmd is not None: cmd(self) except: _debug_write('error in repl loop') _debug_write(traceback.format_exc()) self.exit_process() time.sleep(2) # try and exit gracefully, then interrupt main if necessary if sys.platform == 'cli': # just kill us as fast as possible import System System.Environment.Exit(1) self.interrupt_main() def check_for_exit_repl_loop(self): return False def _cmd_run(self): """runs the received snippet of code""" self.run_command(read_string(self.conn)) def _cmd_abrt(self): """aborts the current running command""" # abort command, interrupts execution of the main thread. self.interrupt_main() def _cmd_exit(self): """exits the interactive process""" self.exit_requested = True self.exit_process() def _cmd_mems(self): """gets the list of members available for the given expression""" expression = read_string(self.conn) try: name, inst_members, type_members = self.get_members(expression) except: with self.send_lock: write_bytes(self.conn, ReplBackend._MERR) _debug_write('error in eval') _debug_write(traceback.format_exc()) else: with self.send_lock: write_bytes(self.conn, ReplBackend._MRES) write_string(self.conn, name) self._write_member_dict(inst_members) self._write_member_dict(type_members) def _cmd_sigs(self): """gets the signatures for the given expression""" expression = read_string(self.conn) try: sigs = self.get_signatures(expression) except: with self.send_lock: write_bytes(self.conn, ReplBackend._SERR) _debug_write('error in eval') _debug_write(traceback.format_exc()) else: with self.send_lock: write_bytes(self.conn, ReplBackend._SRES) # single overload write_int(self.conn, len(sigs)) for doc, args, vargs, varkw, defaults in sigs: # write overload write_string(self.conn, (doc or '')[:4096]) arg_count = len(args) + (vargs is not None) + (varkw is not None) write_int(self.conn, arg_count) def_values = [''] * (len(args) - len(defaults)) + ['=' + d for d in defaults] for arg, def_value in zip(args, def_values): write_string(self.conn, (arg or '') + def_value) if vargs is not None: write_string(self.conn, '*' + vargs) if varkw is not None: write_string(self.conn, '**' + varkw) def _cmd_setm(self): global exec_mod """sets the current module which code will execute against""" mod_name = read_string(self.conn) self.set_current_module(mod_name) def _cmd_sett(self): """sets the current thread and frame which code will execute against""" thread_id = read_int(self.conn) frame_id = read_int(self.conn) frame_kind = read_int(self.conn) self.set_current_thread_and_frame(thread_id, frame_id, frame_kind) def _cmd_mods(self): """gets the list of available modules""" try: res = self.get_module_names() res.sort() except: res = [] with self.send_lock: write_bytes(self.conn, ReplBackend._MODS) write_int(self.conn, len(res)) for name, filename in res: write_string(self.conn, name) write_string(self.conn, filename) def _cmd_inpl(self): """handles the input command which returns a string of input""" self.input_string = read_string(self.conn) self.input_event.release() def _cmd_excf(self): """handles executing a single file""" filename = read_string(self.conn) args = read_string(self.conn) self.execute_file(filename, args) def _cmd_excx(self): """handles executing a single file, module or process""" filetype = read_string(self.conn) filename = read_string(self.conn) args = read_string(self.conn) self.execute_file_ex(filetype, filename, args) def _cmd_debug_attach(self): import visualstudio_py_debugger port = read_int(self.conn) id = read_string(self.conn) debug_options = visualstudio_py_debugger.parse_debug_options(read_string(self.conn)) self.attach_process(port, id, debug_options) _COMMANDS = { to_bytes('run '): _cmd_run, to_bytes('abrt'): _cmd_abrt, to_bytes('exit'): _cmd_exit, to_bytes('mems'): _cmd_mems, to_bytes('sigs'): _cmd_sigs, to_bytes('mods'): _cmd_mods, to_bytes('setm'): _cmd_setm, to_bytes('sett'): _cmd_sett, to_bytes('inpl'): _cmd_inpl, to_bytes('excf'): _cmd_excf, to_bytes('excx'): _cmd_excx, to_bytes('dbga'): _cmd_debug_attach, } def _write_member_dict(self, mem_dict): write_int(self.conn, len(mem_dict)) for name, type_name in mem_dict.items(): write_string(self.conn, name) write_string(self.conn, type_name) def on_debugger_detach(self): with self.send_lock: write_bytes(self.conn, ReplBackend._DETC) def init_debugger(self): from os import path sys.path.append(path.dirname(__file__)) import visualstudio_py_debugger visualstudio_py_debugger.DONT_DEBUG.append(path.normcase(__file__)) new_thread = visualstudio_py_debugger.new_thread() sys.settrace(new_thread.trace_func) visualstudio_py_debugger.intercept_threads(True) def send_image(self, filename): with self.send_lock: write_bytes(self.conn, ReplBackend._IMGD) write_string(self.conn, filename) def write_png(self, image_bytes): with self.send_lock: write_bytes(self.conn, ReplBackend._DPNG) write_int(self.conn, len(image_bytes)) write_bytes(self.conn, image_bytes) def write_xaml(self, xaml_bytes): with self.send_lock: write_bytes(self.conn, ReplBackend._DXAM) write_int(self.conn, len(xaml_bytes)) write_bytes(self.conn, xaml_bytes) def send_prompt(self, ps1, ps2, update_all = True): """sends the current prompt to the interactive window""" with self.send_lock: write_bytes(self.conn, ReplBackend._PRPC) write_string(self.conn, ps1) write_string(self.conn, ps2) write_int(self.conn, update_all) def send_error(self): """reports that an error occured to the interactive window""" with self.send_lock: write_bytes(self.conn, ReplBackend._ERRE) def send_exit(self): """reports the that the REPL process has exited to the interactive window""" with self.send_lock: write_bytes(self.conn, ReplBackend._EXIT) def send_command_executed(self): with self.send_lock: write_bytes(self.conn, ReplBackend._DONE) def send_modules_changed(self): with self.send_lock: write_bytes(self.conn, ReplBackend._MODC) def read_line(self): """reads a line of input from standard input""" with self.send_lock: write_bytes(self.conn, ReplBackend._RDLN) self.input_event.acquire() return self.input_string def write_stdout(self, value): """writes a string to standard output in the remote console""" with self.send_lock: write_bytes(self.conn, ReplBackend._STDO) write_string(self.conn, value) def write_stderr(self, value): """writes a string to standard input in the remote console""" with self.send_lock: write_bytes(self.conn, ReplBackend._STDE) write_string(self.conn, value) ################################################################ # Implementation of execution, etc... def execution_loop(self): """starts processing execution requests""" raise NotImplementedError def run_command(self, command): """runs the specified command which is a string containing code""" raise NotImplementedError def execute_file(self, filename, args): """executes the given filename as the main module""" return self.execute_file_ex('script', filename, args) def execute_file_ex(self, filetype, filename, args): """executes the given filename as a 'script', 'module' or 'process'.""" raise NotImplementedError def interrupt_main(self): """aborts the current running command""" raise NotImplementedError def exit_process(self): """exits the REPL process""" raise NotImplementedError def get_members(self, expression): """returns a tuple of the type name, instance members, and type members""" raise NotImplementedError def get_signatures(self, expression): """returns doc, args, vargs, varkw, defaults.""" raise NotImplementedError def set_current_module(self, module): """sets the module which code executes against""" raise NotImplementedError def set_current_thread_and_frame(self, thread_id, frame_id, frame_kind): """sets the current thread and frame which code will execute against""" raise NotImplementedError def get_module_names(self): """returns a list of module names""" raise NotImplementedError def flush(self): """flushes the stdout/stderr buffers""" raise NotImplementedError def attach_process(self, port, debugger_id, debug_options): """starts processing execution requests""" raise NotImplementedError def exit_work_item(): sys.exit(0) if sys.platform == 'cli': # We need special handling to reset the abort for keyboard interrupt exceptions class ReplAbortException(Exception): pass import clr clr.AddReference('Microsoft.Dynamic') clr.AddReference('Microsoft.Scripting') clr.AddReference('IronPython') from Microsoft.Scripting import KeyboardInterruptException from Microsoft.Scripting import ParamDictionaryAttribute from IronPython.Runtime.Operations import PythonOps from IronPython.Runtime import PythonContext from Microsoft.Scripting import SourceUnit, SourceCodeKind from Microsoft.Scripting.Runtime import Scope python_context = clr.GetCurrentRuntime().GetLanguage(PythonContext) from System import DBNull, ParamArrayAttribute builtin_method_descriptor_type = type(list.append) import System NamespaceType = type(System) class _OldClass: pass _OldClassType = type(_OldClass) _OldInstanceType = type(_OldClass()) class BasicReplBackend(ReplBackend): future_bits = 0x3e010 # code flags used to mark future bits """Basic back end which executes all Python code in-proc""" def __init__(self, mod_name = '__main__', launch_file = None): import threading ReplBackend.__init__(self) if mod_name is not None: if sys.platform == 'cli': self.exec_mod = Scope() self.exec_mod.__name__ = '__main__' else: sys.modules[mod_name] = self.exec_mod = imp.new_module(mod_name) else: self.exec_mod = sys.modules['__main__'] self.launch_file = launch_file self.code_flags = 0 self.execute_item = None self.execute_item_lock = threading.Lock() self.execute_item_lock.acquire() # lock starts acquired (we use it like manual reset event) def init_connection(self): sys.stdout = _ReplOutput(self, is_stdout = True) sys.stderr = _ReplOutput(self, is_stdout = False) sys.stdin = _ReplInput(self) if sys.platform == 'cli': import System System.Console.SetOut(DotNetOutput(self, True)) System.Console.SetError(DotNetOutput(self, False)) def connect(self, port): ReplBackend.connect(self, port) self.init_connection() def connect_using_socket(self, socket): ReplBackend.connect_using_socket(self, socket) self.init_connection() def run_file_as_main(self, filename, args): f = open(filename, 'rb') try: contents = f.read().replace(to_bytes('\r\n'), to_bytes('\n')) finally: f.close() sys.argv = [filename] sys.argv.extend(_command_line_to_args_list(args)) self.exec_mod.__file__ = filename if sys.platform == 'cli': code = python_context.CreateSnippet(contents, None, SourceCodeKind.File) code.Execute(self.exec_mod) else: self.code_flags = 0 real_file = filename if isinstance(filename, unicode) and unicode is not str: # http://pytools.codeplex.com/workitem/696 # We need to encode the unicode filename here, Python 2.x will throw trying # to convert it to ASCII instead of the filesystem encoding. real_file = filename.encode(sys.getfilesystemencoding()) code = compile(contents, real_file, 'exec') self.code_flags |= (code.co_flags & BasicReplBackend.future_bits) exec(code, self.exec_mod.__dict__, self.exec_mod.__dict__) def python_executor(self, code): """we can't close over unbound variables in execute_code_work_item due to the exec, so we do it here""" def func(): code.Execute(self.exec_mod) return func def execute_code_work_item(self): _debug_write('Executing: ' + repr(self.current_code)) stripped_code = self.current_code.strip() if sys.platform == 'cli': code_to_send = '' for line in stripped_code.split('\n'): stripped = line.strip() if (stripped.startswith('#') or not stripped) and not code_to_send: continue code_to_send += line + '\n' code = python_context.CreateSnippet(code_to_send, None, SourceCodeKind.InteractiveCode) dispatcher = clr.GetCurrentRuntime().GetLanguage(PythonContext).GetCommandDispatcher() if dispatcher is not None: dispatcher(self.python_executor(code)) else: code.Execute(self.exec_mod) else: code = compile(self.current_code, '<stdin>', 'single', self.code_flags) self.code_flags |= (code.co_flags & BasicReplBackend.future_bits) exec(code, self.exec_mod.__dict__, self.exec_mod.__dict__) self.current_code = None def run_one_command(self, cur_modules, cur_ps1, cur_ps2): # runs a single iteration of an input, execute file, etc... # This is extracted into it's own method so we play nice w/ IronPython thread abort. # Otherwise we have a nested exception hanging around and the 2nd abort doesn't # work (that's probably an IronPython bug) try: new_modules = self._get_cur_module_set() try: if new_modules != cur_modules: self.send_modules_changed() except: pass cur_modules = new_modules self.execute_item_lock.acquire() if self.check_for_exit_execution_loop(): return True, None, None, None if self.execute_item is not None: try: self.execute_item() finally: self.execute_item = None try: self.send_command_executed() except SocketError: return True, None, None, None try: if cur_ps1 != sys.ps1 or cur_ps2 != sys.ps2: new_ps1 = str(sys.ps1) new_ps2 = str(sys.ps2) self.send_prompt(new_ps1, new_ps2) cur_ps1 = new_ps1 cur_ps2 = new_ps2 except: pass except SystemExit: self.send_error() self.send_exit() # wait for ReplEvaluator to send back exit requested which will indicate # that all the output has been processed. while not self.exit_requested: time.sleep(.25) return True, None, None, None except BaseException: _debug_write('Exception') exc_type, exc_value, exc_tb = sys.exc_info() if sys.platform == 'cli': if isinstance(exc_value.clsException, System.Threading.ThreadAbortException): try: System.Threading.Thread.ResetAbort() except SystemError: pass sys.stderr.write('KeyboardInterrupt') else: # let IronPython format the exception so users can do -X:ExceptionDetail or -X:ShowClrExceptions exc_next = self.skip_internal_frames(exc_tb) sys.stderr.write(''.join(traceback.format_exception(exc_type, exc_value, exc_next))) else: exc_next = self.skip_internal_frames(exc_tb) sys.stderr.write(''.join(traceback.format_exception(exc_type, exc_value, exc_next))) try: self.send_error() except SocketError: _debug_write('err sending DONE') return True, None, None, None return False, cur_modules, cur_ps1, cur_ps2 def skip_internal_frames(self, tb): """return the first frame outside of the repl/debugger code""" while tb is not None and self.is_internal_frame(tb): tb = tb.tb_next return tb def is_internal_frame(self, tb): """return true if the frame is from internal code (repl or debugger)""" f = tb.tb_frame co = f.f_code filename = co.co_filename return filename.endswith('visualstudio_py_repl.py') or filename.endswith('visualstudio_py_debugger.py') def execution_loop(self): """loop on the main thread which is responsible for executing code""" if sys.platform == 'cli' and sys.version_info[:3] < (2, 7, 1): # IronPython doesn't support thread.interrupt_main until 2.7.1 import System self.main_thread = System.Threading.Thread.CurrentThread # save our selves so global lookups continue to work (required pre-2.6)... cur_modules = set() try: cur_ps1 = sys.ps1 cur_ps2 = sys.ps2 except: # CPython/IronPython don't set sys.ps1 for non-interactive sessions, Jython and PyPy do sys.ps1 = cur_ps1 = '>>> ' sys.ps2 = cur_ps2 = '... ' self.send_prompt(cur_ps1, cur_ps2) # launch the startup script if one has been specified if self.launch_file: try: self.run_file_as_main(self.launch_file, '') except: print('error in launching startup script:') traceback.print_exc() while True: exit, cur_modules, cur_ps1, cur_ps2 = self.run_one_command(cur_modules, cur_ps1, cur_ps2) if exit: return def check_for_exit_execution_loop(self): return False def execute_script_work_item(self): self.run_file_as_main(self.current_code, self.current_args) def execute_module_work_item(self): new_argv = [''] + _command_line_to_args_list(self.current_args) old_argv = sys.argv import runpy try: sys.argv = new_argv runpy.run_module(self.current_code, alter_sys=True) except Exception: traceback.print_exc() finally: sys.argv = old_argv def execute_process_work_item(self): try: from subprocess import Popen, PIPE, STDOUT import codecs out_codec = codecs.lookup(sys.stdout.encoding) proc = Popen( '"%s" %s' % (self.current_code, self.current_args), stdout=PIPE, stderr=STDOUT, bufsize=0, ) for line in proc.stdout: print(out_codec.decode(line, 'replace')[0].rstrip('\r\n')) except Exception: traceback.print_exc() @staticmethod def _get_cur_module_set(): """gets the set of modules avoiding exceptions if someone puts something weird in there""" try: return set(sys.modules) except: res = set() for name in sys.modules: try: res.add(name) except: pass return res def run_command(self, command): self.current_code = command self.execute_item = self.execute_code_work_item self.execute_item_lock.release() def execute_file_ex(self, filetype, filename, args): self.current_code = filename self.current_args = args self.execute_item = getattr(self, 'execute_%s_work_item' % filetype, None) self.execute_item_lock.release() def interrupt_main(self): # acquire the send lock so we dont interrupt while we're communicting w/ the debugger with self.send_lock: if sys.platform == 'cli' and sys.version_info[:3] < (2, 7, 1): # IronPython doesn't get thread.interrupt_main until 2.7.1 self.main_thread.Abort(ReplAbortException()) else: thread.interrupt_main() def exit_process(self): self.execute_item = exit_work_item try: self.execute_item_lock.release() except: pass sys.exit(0) def get_members(self, expression): """returns a tuple of the type name, instance members, and type members""" getattr_func = getattr if not expression: all_members = {} if sys.platform == 'cli': code = python_context.CreateSnippet('vars()', None, SourceCodeKind.AutoDetect) items = code.Execute(self.exec_mod) else: items = self.exec_mod.__dict__ for key, value in items.items(): all_members[key] = self.get_type_name(value) return '', all_members, {} else: if sys.platform == 'cli': code = python_context.CreateSnippet(expression, None, SourceCodeKind.AutoDetect) val = code.Execute(self.exec_mod) code = python_context.CreateSnippet('dir(' + expression + ')', None, SourceCodeKind.AutoDetect) members = code.Execute(self.exec_mod) code = python_context.CreateSnippet('lambda value, name: getattr(value, name)', None, SourceCodeKind.AutoDetect) getattr_func = code.Execute(self.exec_mod) else: val = eval(expression, self.exec_mod.__dict__, self.exec_mod.__dict__) members = dir(val) return self.collect_members(val, members, getattr_func) def collect_members(self, val, members, getattr_func): t = type(val) inst_members = {} if hasattr(val, '__dict__'): # collect the instance members try: for mem_name in val.__dict__: mem_t = self._get_member_type(val, mem_name, True, getattr_func) if mem_t is not None: inst_members[mem_name] = mem_t except: pass # collect the type members type_members = {} for mem_name in members: if mem_name not in inst_members: mem_t = self._get_member_type(val, mem_name, False, getattr_func) if mem_t is not None: type_members[mem_name] = mem_t return t.__module__ + '.' + t.__name__, inst_members, type_members def get_ipy_sig(self, obj, ctor): args = [] vargs = None varkw = None defaults = [] for param in ctor.GetParameters(): if param.IsDefined(ParamArrayAttribute, False): vargs = param.Name elif param.IsDefined(ParamDictionaryAttribute, False): varkw = param.Name else: args.append(param.Name) if param.DefaultValue is not DBNull.Value: defaults.append(repr(param.DefaultValue)) return obj.__doc__, args, vargs, varkw, tuple(defaults) def get_signatures(self, expression): if sys.platform == 'cli': code = python_context.CreateSnippet(expression, None, SourceCodeKind.AutoDetect) val = code.Execute(self.exec_mod) else: val = eval(expression, self.exec_mod.__dict__, self.exec_mod.__dict__) return self.collect_signatures(val) def collect_signatures(self, val): doc = val.__doc__ type_obj = None if isinstance(val, type) or isinstance(val, _OldClassType): type_obj = val val = val.__init__ try: args, vargs, varkw, defaults = inspect.getargspec(val) except TypeError: # we're not doing inspect on a Python function... if sys.platform == 'cli': if type_obj is not None: clr_type = clr.GetClrType(type_obj) ctors = clr_type.GetConstructors() return [self.get_ipy_sig(type_obj, ctor) for ctor in ctors] elif type(val) is types.BuiltinFunctionType: return [self.get_ipy_sig(target, target.Targets[0]) for target in val.Overloads.Functions] elif type(val) is builtin_method_descriptor_type: val = PythonOps.GetBuiltinMethodDescriptorTemplate(val) return [self.get_ipy_sig(target, target.Targets[0]) for target in val.Overloads.Functions] raise remove_self = type_obj is not None or (type(val) is types.MethodType and ((sys.version_info >= (3,) and val.__self__ is not None) or (sys.version_info < (3,) and val.im_self is not None))) if remove_self: # remove self for instance methods and types args = args[1:] if defaults is not None: defaults = [repr(default) for default in defaults] else: defaults = [] return [(doc, args, vargs, varkw, defaults)] def set_current_module(self, module): mod = sys.modules.get(module) if mod is not None: _debug_write('Setting module to ' + module) if sys.platform == 'cli': self.exec_mod = clr.GetClrType(type(sys)).GetProperty('Scope', System.Reflection.BindingFlags.Public | System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Instance).GetValue(sys, ()) else: self.exec_mod = mod else: _debug_write('Unknown module ' + module) def get_module_names(self): res = [] for name, module in sys.modules.items(): try: if name != 'visualstudio_py_repl' and name != '$visualstudio_py_debugger': if sys.platform == 'cli' and type(module) is NamespaceType: self.get_namespaces(name, module, res) else: filename = getattr(module, '__file__', '') or '' res.append((name, filename)) except: pass return res def get_namespaces(self, basename, namespace, names): names.append((basename, '')) try: for name in dir(namespace): new_name = basename + '.' + name new_namespace = getattr(namespace, name) if type(new_namespace) is NamespaceType: self.get_namespaces(new_name, new_namespace, names) except: pass def flush(self): sys.stdout.flush() def do_detach(self): import visualstudio_py_debugger visualstudio_py_debugger.DETACH_CALLBACKS.remove(self.do_detach) self.on_debugger_detach() def attach_process(self, port, debugger_id, debug_options): def execute_attach_process_work_item(): import visualstudio_py_debugger visualstudio_py_debugger.DETACH_CALLBACKS.append(self.do_detach) visualstudio_py_debugger.attach_process(port, debugger_id, debug_options, report=True, block=True) self.execute_item = execute_attach_process_work_item self.execute_item_lock.release() @staticmethod def get_type_name(val): try: mem_t = type(val) mem_t_name = mem_t.__module__ + '.' + mem_t.__name__ return mem_t_name except: pass @staticmethod def _get_member_type(inst, name, from_dict, getattr_func = None): try: if from_dict: val = inst.__dict__[name] elif type(inst) is _OldInstanceType: val = getattr_func(inst.__class__, name) else: val = getattr_func(type(inst), name) mem_t_name = BasicReplBackend.get_type_name(val) return mem_t_name except: if not from_dict: try: return BasicReplBackend.get_type_name(getattr_func(inst, name)) except: pass return class DebugReplBackend(BasicReplBackend): def __init__(self, debugger): BasicReplBackend.__init__(self, None, None) self.debugger = debugger self.thread_id = None self.frame_id = None self.frame_kind = None self.disconnect_requested = False def init_connection(self): sys.stdout = _ReplOutput(self, is_stdout = True, old_out = sys.stdout) sys.stderr = _ReplOutput(self, is_stdout = False, old_out = sys.stderr) if sys.platform == 'cli': import System self.old_cli_stdout = System.Console.Out self.old_cli_stderr = System.Console.Error System.Console.SetOut(DotNetOutput(self, True, System.Console.Out)) System.Console.SetError(DotNetOutput(self, False, System.Console.Error)) def connect_from_debugger(self, port): ReplBackend.connect(self, port) self.init_connection() def connect_from_debugger_using_socket(self, socket): ReplBackend.connect_using_socket(self, socket) self.init_connection() def disconnect_from_debugger(self): sys.stdout = sys.stdout.old_out sys.stderr = sys.stderr.old_out if sys.platform == 'cli': System.Console.SetOut(self.old_cli_stdout) System.Console.SetError(self.old_cli_stderr) del self.old_cli_stdout del self.old_cli_stderr # this tells both _repl_loop and execution_loop, each # running on its own worker thread, to exit self.disconnect_requested = True self.execute_item_lock.release() def set_current_thread_and_frame(self, thread_id, frame_id, frame_kind): self.thread_id = thread_id self.frame_id = frame_id self.frame_kind = frame_kind self.exec_mod = None def execute_code_work_item(self): if self.exec_mod is not None: BasicReplBackend.execute_code_work_item(self) else: try: self.debugger.execute_code_no_report(self.current_code, self.thread_id, self.frame_id, self.frame_kind) finally: self.current_code = None def get_members(self, expression): """returns a tuple of the type name, instance members, and type members""" if self.exec_mod is not None: return BasicReplBackend.get_members(self, expression) else: thread, cur_frame = self.debugger.get_thread_and_frame(self.thread_id, self.frame_id, self.frame_kind) return self.get_members_for_frame(expression, thread, cur_frame, self.frame_kind) def get_signatures(self, expression): """returns doc, args, vargs, varkw, defaults.""" if self.exec_mod is not None: return BasicReplBackend.get_signatures(self, expression) else: thread, cur_frame = self.debugger.get_thread_and_frame(self.thread_id, self.frame_id, self.frame_kind) return self.get_signatures_for_frame(expression, thread, cur_frame, self.frame_kind) def get_members_for_frame(self, expression, thread, cur_frame, frame_kind): """returns a tuple of the type name, instance members, and type members""" getattr_func = getattr if not expression: all_members = {} if sys.platform == 'cli': code = python_context.CreateSnippet('vars()', None, SourceCodeKind.AutoDetect) globals = code.Execute(Scope(cur_frame.f_globals)) locals = code.Execute(Scope(thread.get_locals(cur_frame, frame_kind))) else: globals = cur_frame.f_globals locals = thread.get_locals(cur_frame, frame_kind) for key, value in globals.items(): all_members[key] = self.get_type_name(value) for key, value in locals.items(): all_members[key] = self.get_type_name(value) return '', all_members, {} else: if sys.platform == 'cli': scope = Scope(cur_frame.f_globals) code = python_context.CreateSnippet(expression, None, SourceCodeKind.AutoDetect) val = code.Execute(scope) code = python_context.CreateSnippet('dir(' + expression + ')', None, SourceCodeKind.AutoDetect) members = code.Execute(scope) code = python_context.CreateSnippet('lambda value, name: getattr(value, name)', None, SourceCodeKind.AutoDetect) getattr_func = code.Execute(scope) else: val = eval(expression, cur_frame.f_globals, thread.get_locals(cur_frame, frame_kind)) members = dir(val) return self.collect_members(val, members, getattr_func) def get_signatures_for_frame(self, expression, thread, cur_frame, frame_kind): if sys.platform == 'cli': code = python_context.CreateSnippet(expression, None, SourceCodeKind.AutoDetect) val = code.Execute(Scope(cur_frame.f_globals)) else: val = eval(expression, cur_frame.f_globals, thread.get_locals(cur_frame, frame_kind)) return self.collect_signatures(val) def set_current_module(self, module): if module == '<CurrentFrame>': self.exec_mod = None else: BasicReplBackend.set_current_module(self, module) def check_for_exit_repl_loop(self): return self.disconnect_requested def check_for_exit_execution_loop(self): return self.disconnect_requested class _ReplOutput(object): """file like object which redirects output to the repl window.""" errors = None def __init__(self, backend, is_stdout, old_out = None): self.name = "<stdout>" if is_stdout else "<stderr>" self.backend = backend self.old_out = old_out self.is_stdout = is_stdout self.pipe = None def flush(self): if self.old_out: self.old_out.flush() def fileno(self): if self.pipe is None: self.pipe = os.pipe() thread.start_new_thread(self.pipe_thread, (), {}) return self.pipe[1] def pipe_thread(self): while True: data = os.read(self.pipe[0], 1) if data == '\r': data = os.read(self.pipe[0], 1) if data == '\n': self.write('\n') else: self.write('\r' + data) else: self.write(data) @property def encoding(self): return 'utf8' def writelines(self, lines): for line in lines: self.write(line) self.write('\n') def write(self, value): _debug_write('printing ' + repr(value) + '\n') if self.is_stdout: self.backend.write_stdout(value) else: self.backend.write_stderr(value) if self.old_out: self.old_out.write(value) def isatty(self): return True def next(self): pass class _ReplInput(object): """file like object which redirects input from the repl window""" def __init__(self, backend): self.backend = backend def readline(self): return self.backend.read_line() def readlines(self, size = None): res = [] while True: line = self.readline() if line is not None: res.append(line) else: break return res def xreadlines(self): return self def write(self, *args): raise IOError("File not open for writing") def flush(self): pass def isatty(self): return True def __iter__(self): return self def next(self): return self.readline() if sys.platform == 'cli': import System class DotNetOutput(System.IO.TextWriter): def __new__(cls, backend, is_stdout, old_out=None): return System.IO.TextWriter.__new__(cls) def __init__(self, backend, is_stdout, old_out=None): self.backend = backend self.is_stdout = is_stdout self.old_out = old_out def Write(self, value, *args): if self.old_out: self.old_out.Write(value, *args) if not args: if type(value) is str or type(value) is System.Char: if self.is_stdout: self.backend.write_stdout(str(value).replace('\r\n', '\n')) else: self.backend.write_stderr(str(value).replace('\r\n', '\n')) else: super(DotNetOutput, self).Write.Overloads[object](value) else: self.Write(System.String.Format(value, *args)) def WriteLine(self, value, *args): if self.old_out: self.old_out.WriteLine(value, *args) if not args: if type(value) is str or type(value) is System.Char: if self.is_stdout: self.backend.write_stdout(str(value).replace('\r\n', '\n') + '\n') else: self.backend.write_stderr(str(value).replace('\r\n', '\n') + '\n') else: super(DotNetOutput, self).WriteLine.Overloads[object](value) else: self.WriteLine(System.String.Format(value, *args)) @property def Encoding(self): return System.Text.Encoding.UTF8 BACKEND = None def _run_repl(): from optparse import OptionParser parser = OptionParser(prog='repl', description='Process REPL options') parser.add_option('--port', dest='port', help='the port to connect back to') parser.add_option('--launch_file', dest='launch_file', help='the script file to run on startup') parser.add_option('--execution_mode', dest='backend', help='the backend to use') parser.add_option('--enable-attach', dest='enable_attach', action="store_true", default=False, help='enable attaching the debugger via $attach') (options, args) = parser.parse_args() # kick off repl # make us available under our "normal" name, not just __main__ which we'll likely replace. sys.modules['visualstudio_py_repl'] = sys.modules['__main__'] global __name__ __name__ = 'visualstudio_py_repl' backend_type = BasicReplBackend backend_error = None if options.backend is not None and options.backend.lower() != 'standard': try: split_backend = options.backend.split('.') backend_mod_name = '.'.join(split_backend[:-1]) backend_name = split_backend[-1] backend_type = getattr(__import__(backend_mod_name), backend_name) except UnsupportedReplException: backend_error = sys.exc_info()[1].reason except: backend_error = traceback.format_exc() # fix sys.path so that cwd is where the project lives. sys.path[0] = '.' # remove all of our parsed args in case we have a launch file that cares... sys.argv = args or [''] global BACKEND BACKEND = backend_type(launch_file=options.launch_file) BACKEND.connect(int(options.port)) if options.enable_attach: BACKEND.init_debugger() if backend_error is not None: sys.stderr.write('Error using selected REPL back-end:\n') sys.stderr.write(backend_error + '\n') sys.stderr.write('Using standard backend instead\n') # execute code on the main thread which we can interrupt BACKEND.execution_loop() if __name__ == '__main__': try: _run_repl() except: if DEBUG: _debug_write(traceback.format_exc()) _debug_write('exiting') input() raise
apache-2.0
atopuzov/nitro-python
nssrc/com/citrix/netscaler/nitro/resource/config/appfw/appfwxmlerrorpage.py
3
6286
# # Copyright (c) 2008-2015 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response from nssrc.com.citrix.netscaler.nitro.service.options import options from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util class appfwxmlerrorpage(base_resource) : """ Configuration for xml error page resource. """ def __init__(self) : self._name = "" self._src = "" self._comment = "" self._overwrite = False self._response = "" @property def name(self) : ur"""Indicates name of the imported xml error page to be removed. <br/>Minimum length = 1<br/>Maximum length = 31. """ try : return self._name except Exception as e: raise e @name.setter def name(self, name) : ur"""Indicates name of the imported xml error page to be removed. <br/>Minimum length = 1<br/>Maximum length = 31 """ try : self._name = name except Exception as e: raise e @property def src(self) : ur"""URL (protocol, host, path, and name) for the location at which to store the imported XML error object. NOTE: The import fails if the object to be imported is on an HTTPS server that requires client certificate authentication for access.<br/>Minimum length = 1<br/>Maximum length = 2047. """ try : return self._src except Exception as e: raise e @src.setter def src(self, src) : ur"""URL (protocol, host, path, and name) for the location at which to store the imported XML error object. NOTE: The import fails if the object to be imported is on an HTTPS server that requires client certificate authentication for access.<br/>Minimum length = 1<br/>Maximum length = 2047 """ try : self._src = src except Exception as e: raise e @property def comment(self) : ur"""Any comments to preserve information about the XML error object.<br/>Maximum length = 128. """ try : return self._comment except Exception as e: raise e @comment.setter def comment(self, comment) : ur"""Any comments to preserve information about the XML error object.<br/>Maximum length = 128 """ try : self._comment = comment except Exception as e: raise e @property def overwrite(self) : ur"""Overwrite any existing XML error object of the same name. """ try : return self._overwrite except Exception as e: raise e @overwrite.setter def overwrite(self, overwrite) : ur"""Overwrite any existing XML error object of the same name. """ try : self._overwrite = overwrite except Exception as e: raise e @property def response(self) : try : return self._response except Exception as e: raise e def _get_nitro_response(self, service, response) : ur""" converts nitro response into object and returns the object array in case of get request. """ try : result = service.payload_formatter.string_to_resource(appfwxmlerrorpage_response, response, self.__class__.__name__) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == "ERROR") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.appfwxmlerrorpage except Exception as e : raise e def _get_object_name(self) : ur""" Returns the value of object identifier argument """ try : if self.name is not None : return str(self.name) return None except Exception as e : raise e @classmethod def delete(cls, client, resource) : ur""" Use this API to delete appfwxmlerrorpage. """ try : if type(resource) is not list : deleteresource = appfwxmlerrorpage() if type(resource) != type(deleteresource): deleteresource.name = resource else : deleteresource.name = resource.name return deleteresource.delete_resource(client) except Exception as e : raise e @classmethod def Import(cls, client, resource) : ur""" Use this API to Import appfwxmlerrorpage. """ try : if type(resource) is not list : Importresource = appfwxmlerrorpage() Importresource.src = resource.src Importresource.name = resource.name Importresource.comment = resource.comment Importresource.overwrite = resource.overwrite return Importresource.perform_operation(client,"Import") except Exception as e : raise e @classmethod def change(cls, client, resource) : ur""" Use this API to change appfwxmlerrorpage. """ try : if type(resource) is not list : changeresource = appfwxmlerrorpage() changeresource.name = resource.name return changeresource.perform_operation(client,"update") except Exception as e : raise e @classmethod def get(cls, client, name="", option_="") : ur""" Use this API to fetch all the appfwxmlerrorpage resources that are configured on netscaler. """ try : if not name : obj = appfwxmlerrorpage() response = obj.get_resources(client, option_) if type(name) != cls : if type(name) is not list : obj = appfwxmlerrorpage() obj.name = name response = obj.get_resource(client, option_) return response except Exception as e : raise e class appfwxmlerrorpage_response(base_response) : def __init__(self, length=1) : self.appfwxmlerrorpage = [] self.errorcode = 0 self.message = "" self.severity = "" self.sessionid = "" self.appfwxmlerrorpage = [appfwxmlerrorpage() for _ in range(length)]
apache-2.0
mavteam13/paparazzi
sw/tools/tcp_aircraft_server/phoenix/xmlobject.py
86
17644
#Copyright 2014, Antoine Drouin """ Allows XML files to be operated on like Python objects. Features: - load XML source from file pathnames, readable file objects or raw strings - add, get and set tag attributes like with python attributes - iterate over nodes - save the modified XMLFile or XMLObject to file Example XML file:: <?xml version="1.0" encoding="UTF-8"?> <rapsheets> <person name="John Smith" age="42"> <!-- John Smith has an appeal in process against his last conviction --> <crime name="Armed robbery" date="March 11, 1994"/> <crime name="Aggravated burglary" date="June 9, 2001"/> </person> <person name="Mary Jones" age="33"> <crime name="Prostitution" date="January 8, 1997"/> <crime name="Selling heroin" date="September 4, 2002"/> <crime name="Manslaughter" date="December 21, 2004"/> </person> </rapsheets> Example usage:: >>> from xmlobject import XMLFile >>> x = XMLFile(path="sample.xml") >>> print x <xmlobj.XMLFile instance at 0xb7ccc52c> >>> print x.root <XMLNode: rapsheets> >>> print x.root._children [<XMLNode: text>, <XMLNode: person>, <XMLNode: text>, <XMLNode: person>, <XMLNode: text>] >>> print x.root.person [<XMLNode: person>, <XMLNode: person>] >>> print x.root.person[0].name John Smith >>> john = x.root.person[0] >>> john.height = 184 >>> c = john._addNode("crime") >>> c.name = "Grand Theft Auto" >>> c.date = "4 May, 2005" >>> print x.toxml() <?xml version="1.0" ?> <rapsheets> <person age="42" height="184" name="John Smith"> <!-- John Smith has an appeal in process against his last conviction --> <crime date="March 11, 1994" name="Armed robbery"/> <crime date="June 9, 2001" name="Aggravated burglary"/> <crime date="4 May, 2005" name="Grand Theft Auto"/></person> <person age="33" name="Mary Jones"> <crime date="January 8, 1997" name="Prostitution"/> <crime date="September 4, 2002" name="Selling heroin"/> <crime date="December 21, 2004" name="Manslaughter"/> </person> </rapsheets> >>> """ import sys, os import xml.dom import xml.dom.minidom from xml.dom.minidom import parse, parseString, getDOMImplementation impl = getDOMImplementation() def ensure_list(obj): """ ensures the object passed is a list, so it is iterable. useful workaround until i decide if XMLNode.foo should always return a list of foo, even if there is only one foo child """ if len(obj): return obj else: return [obj] class MissingRootTag(Exception): """root tag name was not given""" class InvalidXML(Exception): """failed to parse XML input""" class CannotSave(Exception): """unable to save""" class InvalidNode(Exception): """not a valid minidom node""" class XMLFile: """ Allows an xml file to be viewed and operated on as a python object. (If you're viewing the epydoc-generated HTML documentation, click the 'show private' link at the top right of this page to see all the methods) Holds the root node in the .root attribute, also in an attribute with the same name as this root node. """ def __init__(self, **kw): """ Create an XMLFile Keywords: - path - a pathname from which the file can be read - file - an open file object from which the raw xml can be read - raw - the raw xml itself - root - name of root tag, if not reading content Usage scenarios: 1. Working with existing content - you must supply input in one of the following ways: - 'path' must be an existing file, or - 'file' must be a readable file object, or - 'raw' must contain raw xml as a string 2. Creating whole new content - you must give the name of the root tag in the 'root' keyword Notes: - Keyword precedence governing existing content is: 1. path (if existing file) 2. file 3. raw - If working with existing content: - if the 'root' is given, then the content's toplevel tag MUST match the value given for 'root' - trying to _save will raise an exception unless 'path' has been given - if not working with existing content: - 'root' must be given - _save() will raise an exception unless 'path' has been given """ path = kw.get("path", None) fobj = kw.get("file", None) raw = kw.get("raw", None) root = kw.get("root", None) if path: self.path = path try: fobj = file(path) except IOError: pass else: self.path = None if fobj: raw = fobj.read() if raw: self.dom = xml.dom.minidom.parseString(raw) else: # could not source content, so create a blank slate if not root: # in which case, must give a root node name raise MissingRootTag( "No existing content, so must specify root") # ok, create a blank dom self.dom = impl.createDocument(None, root, None) # get the root node, save it as attributes 'root' and name of node rootnode = self.dom.documentElement # now validate root tag if root: if rootnode.nodeName != root: raise IncorrectRootTag("Gave root='%s', input has root='%s'" % ( root, rootnode.nodeName)) # need this for recursion in XMLNode self._childrenByName = {} self._children = [] # add all the child nodes for child in self.dom.childNodes: childnode = XMLNode(self, child) #print "compare %s to %s" % (rootnode, child) if child == rootnode: #print "found root" self.root = childnode setattr(self, rootnode.nodeName, self.root) def save(self, where=None, obj=None): """ Saves the document. If argument 'where' is given, saves to it, otherwise tries to save to the original given 'path' (or barfs) Value can be a string (taken to be a file path), or an open file object. """ obj = obj or self.dom if not where: if self._root.path: where = self._root.path if isinstance(where, str): where = file(where, "w") if not where: raise CannotSave("No save destination, and no original path") where.write(obj.toxml()) where.flush() def saveAs(self, path): """ save this time, and all subsequent times, to filename 'path' """ self.path = path self.save() def toxml(self): return self.dom.toxml() def __len__(self): """ returns number of child nodes """ return len(self._children) def __getitem__(self, idx): if isinstance(idx, int): return self._children[idx] else: return self._childrenByName[idx] class XMLNode: """ This is the workhorse for the xml object interface (If you're viewing the epydoc-generated HTML documentation, click the 'show private' link at the top right of this page to see all the methods) """ # http://docs.python.org/reference/lexical_analysis.html#id6 __RESERVED_WORDS = ( "and","del","class","from","not","while" "as","elif","global","or","with","assert","else","if", "pass","yield","break","except","import","print", "class","exec","in","raise","continue","finally", "is","return","def","for","lambda","try" ) def __init__(self, parent, node): """ You shouldn't need to instantiate this directly """ self._parent = parent if isinstance(parent, XMLFile): self._root = parent else: self._root = parent._root self._node = node self._childrenByName = {} self._children = [] # add ourself to parent's children registry parent._children.append(self) # the deal with named subtags is that we store the first instance # as itself, and with second and subsequent instances, we make a list parentDict = self._parent._childrenByName # If the name of the node is a python reserved word then captilize it nodeName = node.nodeName if nodeName in self.__RESERVED_WORDS: nodeName = nodeName.upper() if nodeName not in parentDict: parentDict[nodeName] = parent.__dict__[nodeName] = self else: if isinstance(parentDict[nodeName], XMLNode): # this is the second child node of a given tag name, so convert # the instance to a list parentDict[nodeName] = parent.__dict__[nodeName] = [parentDict[nodeName]] parentDict[nodeName].append(self) # figure out our type self._value = None if isinstance(node, xml.dom.minidom.Text): self._type = "text" self._value = node.nodeValue elif isinstance(node, xml.dom.minidom.Element): self._type = "node" elif isinstance(node, xml.dom.minidom.Comment): self._type = "comment" self._value = node.nodeValue elif isinstance(node, xml.dom.minidom.DocumentType): #<!DOCTYPE protocol SYSTEM "messages.dtd"> #Ignore doctype, could possibly check it.... pass else: raise InvalidNode("node class %s" % node.__class__) # and wrap all the child nodes for child in node.childNodes: XMLNode(self, child) def _render(self): """ Produces well-formed XML of this node's contents, indented as required """ return self._node.toxml() def __repr__(self): if self._type == "node": return "<XMLNode: %s>" % self._node.nodeName else: return "<XMLNode: %s>" % self._type def __getattr__(self, attr): """ Fetches an attribute or child node of this tag If it's an attribute, then returns the attribute value as a string. If a child node, then: - if there is only one child node of that name, return it - if there is more than one child node of that name, return a list of child nodes of that tag name Supports some magic attributes: - _text - the value of the first child node of type text """ #print "%s: __getattr__: attr=%s" % (self, attr) if attr == '_text': # magic attribute to return text tnode = self['#text'] if isinstance(tnode, list): tnode = tnode[0] return tnode._value if self._type in ['text', 'comment']: if attr == '_value': return self._node.nodeValue else: raise AttributeError(attr) if self._node.hasAttribute(attr): return self._node.getAttribute(attr) elif attr in self._childrenByName: return self._childrenByName[attr] #elif attr == 'value': # magic attribute else: raise AttributeError(attr) def __setattr__(self, attr, val): """ Change the value of an attribute of this tag The magic attribute '_text' can be used to set the first child text node's value For example:: Consider: <somenode> <child>foo</child> </somenode> >>> somenode <XMLNODE: somenode> >>> somenode.child <XMLNODE: child> >>> somenode.child._text 'foo' >>> somenode._toxml() u'<somenode><child>foo</child></somenode>' >>> somenode.child._text = 'bar' >>> somenode.child._text 'bar' >>> somenode.child._toxml() u'<somenode><child>bar/child></somenode>' """ if attr.startswith("_"): # magic attribute for setting _text if attr == '_text': tnode = self['#text'] if isinstance(tnode, list): tnode = tnode[0] tnode._node.nodeValue = val tnode._value = val return self.__dict__[attr] = val elif self._type in ['text', 'comment']: self._node.nodeValue = val else: # discern between attribute and child node if attr in self._childrenByName: raise Exception("Attribute Exists") self._node.setAttribute(attr, str(val)) def _keys(self): """ Return a list of attribute names """ return list(self._node.attributes.keys()) def _values(self): """ Returns a list of (attrname, attrval) tuples for this tag """ return [self._node.getAttribute(k) for k in list(self._node.attributes.keys())] def _items(self): """ returns a list of attribute values for this tag """ return [(k, self._node.getAttribute(k)) for k in list(self._node.attributes.keys())] def _has_key(self, k): """ returns True if this tag has an attribute of the given name """ return self._node.hasAttribute(k) or k in self._childrenByName def _get(self, k, default=None): """ returns the value of attribute k, or default if no such attribute """ if self._has_key(k): return getattr(self, k) else: return default def __len__(self): """ returns number of child nodes """ return len(self._children) def __getitem__(self, idx): """ if given key is numeric, return the nth child, otherwise try to return the child tag (or list of child tags) having the key as the tag name """ #print "__getitem__: idx=%s" % str(idx) if isinstance(idx, slice) or isinstance(idx, int): return self._children[idx] elif isinstance(idx, str): return self._childrenByName[idx] else: raise IndexError(idx) def _addNode(self, child): """ Tries to append a child node to the tree, and returns it Value of 'child' must be one of: - a string (in which case it is taken to be the name of the new node's tag) - a dom object, in which case it will be wrapped and added - an XMLNode object, in which case it will be added without wrapping """ if isinstance(child, XMLNode): # add it to our children registry self._children.append(child) parentDict = self._childrenByName nodeName = child._node.nodeName if nodeName not in parentDict: parentDict[nodeName] = parent.__dict__[nodeName] = child else: if isinstance(parentDict[nodeName], XMLNode): # this is the second child node of a given tag name, so convert # the instance to a list parentDict[nodeName] = self.__dict__[nodeName] = [parentDict[nodeName]] parentDict[nodeName].append(child) # and stick it in the dom self._node.appendChild(child._node) return child elif isinstance(child, str): childNode = self._root.dom.createElement(child) self._node.appendChild(childNode) elif isinstance(child, xml.dom.minidom.Element): childNode = child child = childNode.nodeName self._node.appendChild(childNode) return XMLNode(self, childNode) def _addText(self, value): """ Tries to append a child text node, with the given text, to the tree, and returns the created node object """ childNode = self._root.dom.createTextNode(value) self._node.appendChild(childNode) return XMLNode(self, childNode) def _addComment(self, comment): """ Tries to append a child comment node (with the given text value) to the tree, and returns the create node object """ childNode = self._root.dom.createCommentNode(comment) self._node.appendChild(childNode) return XMLNode(self, childNode) def _save(self, where=None): """ Generates well-formed XML from just this node, and saves it to a file. Argument 'where' is either an open file object, or a pathname If 'where' is not given, then saves the entire document tree. """ if not where: self._root.save() else: self._root.save(where, self._node) def _toxml(self): """ renders just this node out to raw xml code """ return self._node.toxml()
gpl-2.0
grani/grpc
src/python/grpcio_tests/tests/unit/_rpc_test.py
21
36230
# Copyright 2016, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Test of RPCs made against gRPC Python's application-layer API.""" import itertools import threading import unittest from concurrent import futures import grpc from grpc.framework.foundation import logging_pool from tests.unit.framework.common import test_constants from tests.unit.framework.common import test_control _SERIALIZE_REQUEST = lambda bytestring: bytestring * 2 _DESERIALIZE_REQUEST = lambda bytestring: bytestring[len(bytestring) // 2:] _SERIALIZE_RESPONSE = lambda bytestring: bytestring * 3 _DESERIALIZE_RESPONSE = lambda bytestring: bytestring[:len(bytestring) // 3] _UNARY_UNARY = '/test/UnaryUnary' _UNARY_STREAM = '/test/UnaryStream' _STREAM_UNARY = '/test/StreamUnary' _STREAM_STREAM = '/test/StreamStream' class _Callback(object): def __init__(self): self._condition = threading.Condition() self._value = None self._called = False def __call__(self, value): with self._condition: self._value = value self._called = True self._condition.notify_all() def value(self): with self._condition: while not self._called: self._condition.wait() return self._value class _Handler(object): def __init__(self, control): self._control = control def handle_unary_unary(self, request, servicer_context): self._control.control() if servicer_context is not None: servicer_context.set_trailing_metadata((('testkey', 'testvalue',),)) # TODO(https://github.com/grpc/grpc/issues/8483): test the values # returned by these methods rather than only "smoke" testing that # the return after having been called. servicer_context.is_active() servicer_context.time_remaining() return request def handle_unary_stream(self, request, servicer_context): for _ in range(test_constants.STREAM_LENGTH): self._control.control() yield request self._control.control() if servicer_context is not None: servicer_context.set_trailing_metadata((('testkey', 'testvalue',),)) def handle_stream_unary(self, request_iterator, servicer_context): if servicer_context is not None: servicer_context.invocation_metadata() self._control.control() response_elements = [] for request in request_iterator: self._control.control() response_elements.append(request) self._control.control() if servicer_context is not None: servicer_context.set_trailing_metadata((('testkey', 'testvalue',),)) return b''.join(response_elements) def handle_stream_stream(self, request_iterator, servicer_context): self._control.control() if servicer_context is not None: servicer_context.set_trailing_metadata((('testkey', 'testvalue',),)) for request in request_iterator: self._control.control() yield request self._control.control() class _MethodHandler(grpc.RpcMethodHandler): def __init__(self, request_streaming, response_streaming, request_deserializer, response_serializer, unary_unary, unary_stream, stream_unary, stream_stream): self.request_streaming = request_streaming self.response_streaming = response_streaming self.request_deserializer = request_deserializer self.response_serializer = response_serializer self.unary_unary = unary_unary self.unary_stream = unary_stream self.stream_unary = stream_unary self.stream_stream = stream_stream class _GenericHandler(grpc.GenericRpcHandler): def __init__(self, handler): self._handler = handler def service(self, handler_call_details): if handler_call_details.method == _UNARY_UNARY: return _MethodHandler(False, False, None, None, self._handler.handle_unary_unary, None, None, None) elif handler_call_details.method == _UNARY_STREAM: return _MethodHandler(False, True, _DESERIALIZE_REQUEST, _SERIALIZE_RESPONSE, None, self._handler.handle_unary_stream, None, None) elif handler_call_details.method == _STREAM_UNARY: return _MethodHandler(True, False, _DESERIALIZE_REQUEST, _SERIALIZE_RESPONSE, None, None, self._handler.handle_stream_unary, None) elif handler_call_details.method == _STREAM_STREAM: return _MethodHandler(True, True, None, None, None, None, None, self._handler.handle_stream_stream) else: return None def _unary_unary_multi_callable(channel): return channel.unary_unary(_UNARY_UNARY) def _unary_stream_multi_callable(channel): return channel.unary_stream( _UNARY_STREAM, request_serializer=_SERIALIZE_REQUEST, response_deserializer=_DESERIALIZE_RESPONSE) def _stream_unary_multi_callable(channel): return channel.stream_unary( _STREAM_UNARY, request_serializer=_SERIALIZE_REQUEST, response_deserializer=_DESERIALIZE_RESPONSE) def _stream_stream_multi_callable(channel): return channel.stream_stream(_STREAM_STREAM) class RPCTest(unittest.TestCase): def setUp(self): self._control = test_control.PauseFailControl() self._handler = _Handler(self._control) self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY) self._server = grpc.server(self._server_pool) port = self._server.add_insecure_port('[::]:0') self._server.add_generic_rpc_handlers((_GenericHandler(self._handler),)) self._server.start() self._channel = grpc.insecure_channel('localhost:%d' % port) def tearDown(self): self._server.stop(None) self._server_pool.shutdown(wait=True) def testUnrecognizedMethod(self): request = b'abc' with self.assertRaises(grpc.RpcError) as exception_context: self._channel.unary_unary('NoSuchMethod')(request) self.assertEqual(grpc.StatusCode.UNIMPLEMENTED, exception_context.exception.code()) def testSuccessfulUnaryRequestBlockingUnaryResponse(self): request = b'\x07\x08' expected_response = self._handler.handle_unary_unary(request, None) multi_callable = _unary_unary_multi_callable(self._channel) response = multi_callable( request, metadata=(('test', 'SuccessfulUnaryRequestBlockingUnaryResponse'),)) self.assertEqual(expected_response, response) def testSuccessfulUnaryRequestBlockingUnaryResponseWithCall(self): request = b'\x07\x08' expected_response = self._handler.handle_unary_unary(request, None) multi_callable = _unary_unary_multi_callable(self._channel) response, call = multi_callable.with_call( request, metadata=(('test', 'SuccessfulUnaryRequestBlockingUnaryResponseWithCall'),)) self.assertEqual(expected_response, response) self.assertIs(grpc.StatusCode.OK, call.code()) def testSuccessfulUnaryRequestFutureUnaryResponse(self): request = b'\x07\x08' expected_response = self._handler.handle_unary_unary(request, None) multi_callable = _unary_unary_multi_callable(self._channel) response_future = multi_callable.future( request, metadata=(('test', 'SuccessfulUnaryRequestFutureUnaryResponse'),)) response = response_future.result() self.assertIsInstance(response_future, grpc.Future) self.assertIsInstance(response_future, grpc.Call) self.assertEqual(expected_response, response) self.assertIsNone(response_future.exception()) self.assertIsNone(response_future.traceback()) def testSuccessfulUnaryRequestStreamResponse(self): request = b'\x37\x58' expected_responses = tuple( self._handler.handle_unary_stream(request, None)) multi_callable = _unary_stream_multi_callable(self._channel) response_iterator = multi_callable( request, metadata=(('test', 'SuccessfulUnaryRequestStreamResponse'),)) responses = tuple(response_iterator) self.assertSequenceEqual(expected_responses, responses) def testSuccessfulStreamRequestBlockingUnaryResponse(self): requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)) expected_response = self._handler.handle_stream_unary( iter(requests), None) request_iterator = iter(requests) multi_callable = _stream_unary_multi_callable(self._channel) response = multi_callable( request_iterator, metadata=( ('test', 'SuccessfulStreamRequestBlockingUnaryResponse'),)) self.assertEqual(expected_response, response) def testSuccessfulStreamRequestBlockingUnaryResponseWithCall(self): requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)) expected_response = self._handler.handle_stream_unary( iter(requests), None) request_iterator = iter(requests) multi_callable = _stream_unary_multi_callable(self._channel) response, call = multi_callable.with_call( request_iterator, metadata=( ('test', 'SuccessfulStreamRequestBlockingUnaryResponseWithCall'),)) self.assertEqual(expected_response, response) self.assertIs(grpc.StatusCode.OK, call.code()) def testSuccessfulStreamRequestFutureUnaryResponse(self): requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)) expected_response = self._handler.handle_stream_unary( iter(requests), None) request_iterator = iter(requests) multi_callable = _stream_unary_multi_callable(self._channel) response_future = multi_callable.future( request_iterator, metadata=(('test', 'SuccessfulStreamRequestFutureUnaryResponse'),)) response = response_future.result() self.assertEqual(expected_response, response) self.assertIsNone(response_future.exception()) self.assertIsNone(response_future.traceback()) def testSuccessfulStreamRequestStreamResponse(self): requests = tuple(b'\x77\x58' for _ in range(test_constants.STREAM_LENGTH)) expected_responses = tuple( self._handler.handle_stream_stream(iter(requests), None)) request_iterator = iter(requests) multi_callable = _stream_stream_multi_callable(self._channel) response_iterator = multi_callable( request_iterator, metadata=(('test', 'SuccessfulStreamRequestStreamResponse'),)) responses = tuple(response_iterator) self.assertSequenceEqual(expected_responses, responses) def testSequentialInvocations(self): first_request = b'\x07\x08' second_request = b'\x0809' expected_first_response = self._handler.handle_unary_unary( first_request, None) expected_second_response = self._handler.handle_unary_unary( second_request, None) multi_callable = _unary_unary_multi_callable(self._channel) first_response = multi_callable( first_request, metadata=(('test', 'SequentialInvocations'),)) second_response = multi_callable( second_request, metadata=(('test', 'SequentialInvocations'),)) self.assertEqual(expected_first_response, first_response) self.assertEqual(expected_second_response, second_response) def testConcurrentBlockingInvocations(self): pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY) requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)) expected_response = self._handler.handle_stream_unary( iter(requests), None) expected_responses = [expected_response ] * test_constants.THREAD_CONCURRENCY response_futures = [None] * test_constants.THREAD_CONCURRENCY multi_callable = _stream_unary_multi_callable(self._channel) for index in range(test_constants.THREAD_CONCURRENCY): request_iterator = iter(requests) response_future = pool.submit( multi_callable, request_iterator, metadata=(('test', 'ConcurrentBlockingInvocations'),)) response_futures[index] = response_future responses = tuple(response_future.result() for response_future in response_futures) pool.shutdown(wait=True) self.assertSequenceEqual(expected_responses, responses) def testConcurrentFutureInvocations(self): requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)) expected_response = self._handler.handle_stream_unary( iter(requests), None) expected_responses = [expected_response ] * test_constants.THREAD_CONCURRENCY response_futures = [None] * test_constants.THREAD_CONCURRENCY multi_callable = _stream_unary_multi_callable(self._channel) for index in range(test_constants.THREAD_CONCURRENCY): request_iterator = iter(requests) response_future = multi_callable.future( request_iterator, metadata=(('test', 'ConcurrentFutureInvocations'),)) response_futures[index] = response_future responses = tuple(response_future.result() for response_future in response_futures) self.assertSequenceEqual(expected_responses, responses) def testWaitingForSomeButNotAllConcurrentFutureInvocations(self): pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY) request = b'\x67\x68' expected_response = self._handler.handle_unary_unary(request, None) response_futures = [None] * test_constants.THREAD_CONCURRENCY lock = threading.Lock() test_is_running_cell = [True] def wrap_future(future): def wrap(): try: return future.result() except grpc.RpcError: with lock: if test_is_running_cell[0]: raise return None return wrap multi_callable = _unary_unary_multi_callable(self._channel) for index in range(test_constants.THREAD_CONCURRENCY): inner_response_future = multi_callable.future( request, metadata=( ('test', 'WaitingForSomeButNotAllConcurrentFutureInvocations'),)) outer_response_future = pool.submit( wrap_future(inner_response_future)) response_futures[index] = outer_response_future some_completed_response_futures_iterator = itertools.islice( futures.as_completed(response_futures), test_constants.THREAD_CONCURRENCY // 2) for response_future in some_completed_response_futures_iterator: self.assertEqual(expected_response, response_future.result()) with lock: test_is_running_cell[0] = False def testConsumingOneStreamResponseUnaryRequest(self): request = b'\x57\x38' multi_callable = _unary_stream_multi_callable(self._channel) response_iterator = multi_callable( request, metadata=(('test', 'ConsumingOneStreamResponseUnaryRequest'),)) next(response_iterator) def testConsumingSomeButNotAllStreamResponsesUnaryRequest(self): request = b'\x57\x38' multi_callable = _unary_stream_multi_callable(self._channel) response_iterator = multi_callable( request, metadata=( ('test', 'ConsumingSomeButNotAllStreamResponsesUnaryRequest'),)) for _ in range(test_constants.STREAM_LENGTH // 2): next(response_iterator) def testConsumingSomeButNotAllStreamResponsesStreamRequest(self): requests = tuple(b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH)) request_iterator = iter(requests) multi_callable = _stream_stream_multi_callable(self._channel) response_iterator = multi_callable( request_iterator, metadata=(('test', 'ConsumingSomeButNotAllStreamResponsesStreamRequest'),)) for _ in range(test_constants.STREAM_LENGTH // 2): next(response_iterator) def testConsumingTooManyStreamResponsesStreamRequest(self): requests = tuple(b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH)) request_iterator = iter(requests) multi_callable = _stream_stream_multi_callable(self._channel) response_iterator = multi_callable( request_iterator, metadata=( ('test', 'ConsumingTooManyStreamResponsesStreamRequest'),)) for _ in range(test_constants.STREAM_LENGTH): next(response_iterator) for _ in range(test_constants.STREAM_LENGTH): with self.assertRaises(StopIteration): next(response_iterator) self.assertIsNotNone(response_iterator.initial_metadata()) self.assertIs(grpc.StatusCode.OK, response_iterator.code()) self.assertIsNotNone(response_iterator.details()) self.assertIsNotNone(response_iterator.trailing_metadata()) def testCancelledUnaryRequestUnaryResponse(self): request = b'\x07\x17' multi_callable = _unary_unary_multi_callable(self._channel) with self._control.pause(): response_future = multi_callable.future( request, metadata=(('test', 'CancelledUnaryRequestUnaryResponse'),)) response_future.cancel() self.assertTrue(response_future.cancelled()) with self.assertRaises(grpc.FutureCancelledError): response_future.result() with self.assertRaises(grpc.FutureCancelledError): response_future.exception() with self.assertRaises(grpc.FutureCancelledError): response_future.traceback() self.assertIs(grpc.StatusCode.CANCELLED, response_future.code()) def testCancelledUnaryRequestStreamResponse(self): request = b'\x07\x19' multi_callable = _unary_stream_multi_callable(self._channel) with self._control.pause(): response_iterator = multi_callable( request, metadata=(('test', 'CancelledUnaryRequestStreamResponse'),)) self._control.block_until_paused() response_iterator.cancel() with self.assertRaises(grpc.RpcError) as exception_context: next(response_iterator) self.assertIs(grpc.StatusCode.CANCELLED, exception_context.exception.code()) self.assertIsNotNone(response_iterator.initial_metadata()) self.assertIs(grpc.StatusCode.CANCELLED, response_iterator.code()) self.assertIsNotNone(response_iterator.details()) self.assertIsNotNone(response_iterator.trailing_metadata()) def testCancelledStreamRequestUnaryResponse(self): requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)) request_iterator = iter(requests) multi_callable = _stream_unary_multi_callable(self._channel) with self._control.pause(): response_future = multi_callable.future( request_iterator, metadata=(('test', 'CancelledStreamRequestUnaryResponse'),)) self._control.block_until_paused() response_future.cancel() self.assertTrue(response_future.cancelled()) with self.assertRaises(grpc.FutureCancelledError): response_future.result() with self.assertRaises(grpc.FutureCancelledError): response_future.exception() with self.assertRaises(grpc.FutureCancelledError): response_future.traceback() self.assertIsNotNone(response_future.initial_metadata()) self.assertIs(grpc.StatusCode.CANCELLED, response_future.code()) self.assertIsNotNone(response_future.details()) self.assertIsNotNone(response_future.trailing_metadata()) def testCancelledStreamRequestStreamResponse(self): requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)) request_iterator = iter(requests) multi_callable = _stream_stream_multi_callable(self._channel) with self._control.pause(): response_iterator = multi_callable( request_iterator, metadata=(('test', 'CancelledStreamRequestStreamResponse'),)) response_iterator.cancel() with self.assertRaises(grpc.RpcError): next(response_iterator) self.assertIsNotNone(response_iterator.initial_metadata()) self.assertIs(grpc.StatusCode.CANCELLED, response_iterator.code()) self.assertIsNotNone(response_iterator.details()) self.assertIsNotNone(response_iterator.trailing_metadata()) def testExpiredUnaryRequestBlockingUnaryResponse(self): request = b'\x07\x17' multi_callable = _unary_unary_multi_callable(self._channel) with self._control.pause(): with self.assertRaises(grpc.RpcError) as exception_context: multi_callable.with_call( request, timeout=test_constants.SHORT_TIMEOUT, metadata=( ('test', 'ExpiredUnaryRequestBlockingUnaryResponse'),)) self.assertIsInstance(exception_context.exception, grpc.Call) self.assertIsNotNone(exception_context.exception.initial_metadata()) self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code()) self.assertIsNotNone(exception_context.exception.details()) self.assertIsNotNone(exception_context.exception.trailing_metadata()) def testExpiredUnaryRequestFutureUnaryResponse(self): request = b'\x07\x17' callback = _Callback() multi_callable = _unary_unary_multi_callable(self._channel) with self._control.pause(): response_future = multi_callable.future( request, timeout=test_constants.SHORT_TIMEOUT, metadata=(('test', 'ExpiredUnaryRequestFutureUnaryResponse'),)) response_future.add_done_callback(callback) value_passed_to_callback = callback.value() self.assertIs(response_future, value_passed_to_callback) self.assertIsNotNone(response_future.initial_metadata()) self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code()) self.assertIsNotNone(response_future.details()) self.assertIsNotNone(response_future.trailing_metadata()) with self.assertRaises(grpc.RpcError) as exception_context: response_future.result() self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code()) self.assertIsInstance(response_future.exception(), grpc.RpcError) self.assertIsNotNone(response_future.traceback()) self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.exception().code()) def testExpiredUnaryRequestStreamResponse(self): request = b'\x07\x19' multi_callable = _unary_stream_multi_callable(self._channel) with self._control.pause(): with self.assertRaises(grpc.RpcError) as exception_context: response_iterator = multi_callable( request, timeout=test_constants.SHORT_TIMEOUT, metadata=(('test', 'ExpiredUnaryRequestStreamResponse'),)) next(response_iterator) self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code()) self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_iterator.code()) def testExpiredStreamRequestBlockingUnaryResponse(self): requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)) request_iterator = iter(requests) multi_callable = _stream_unary_multi_callable(self._channel) with self._control.pause(): with self.assertRaises(grpc.RpcError) as exception_context: multi_callable( request_iterator, timeout=test_constants.SHORT_TIMEOUT, metadata=( ('test', 'ExpiredStreamRequestBlockingUnaryResponse'),)) self.assertIsInstance(exception_context.exception, grpc.RpcError) self.assertIsInstance(exception_context.exception, grpc.Call) self.assertIsNotNone(exception_context.exception.initial_metadata()) self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code()) self.assertIsNotNone(exception_context.exception.details()) self.assertIsNotNone(exception_context.exception.trailing_metadata()) def testExpiredStreamRequestFutureUnaryResponse(self): requests = tuple(b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH)) request_iterator = iter(requests) callback = _Callback() multi_callable = _stream_unary_multi_callable(self._channel) with self._control.pause(): response_future = multi_callable.future( request_iterator, timeout=test_constants.SHORT_TIMEOUT, metadata=(('test', 'ExpiredStreamRequestFutureUnaryResponse'),)) with self.assertRaises(grpc.FutureTimeoutError): response_future.result(timeout=test_constants.SHORT_TIMEOUT / 2.0) response_future.add_done_callback(callback) value_passed_to_callback = callback.value() with self.assertRaises(grpc.RpcError) as exception_context: response_future.result() self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code()) self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code()) self.assertIsInstance(response_future.exception(), grpc.RpcError) self.assertIsNotNone(response_future.traceback()) self.assertIs(response_future, value_passed_to_callback) self.assertIsNotNone(response_future.initial_metadata()) self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code()) self.assertIsNotNone(response_future.details()) self.assertIsNotNone(response_future.trailing_metadata()) def testExpiredStreamRequestStreamResponse(self): requests = tuple(b'\x67\x18' for _ in range(test_constants.STREAM_LENGTH)) request_iterator = iter(requests) multi_callable = _stream_stream_multi_callable(self._channel) with self._control.pause(): with self.assertRaises(grpc.RpcError) as exception_context: response_iterator = multi_callable( request_iterator, timeout=test_constants.SHORT_TIMEOUT, metadata=(('test', 'ExpiredStreamRequestStreamResponse'),)) next(response_iterator) self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code()) self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_iterator.code()) def testFailedUnaryRequestBlockingUnaryResponse(self): request = b'\x37\x17' multi_callable = _unary_unary_multi_callable(self._channel) with self._control.fail(): with self.assertRaises(grpc.RpcError) as exception_context: multi_callable.with_call( request, metadata=( ('test', 'FailedUnaryRequestBlockingUnaryResponse'),)) self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code()) def testFailedUnaryRequestFutureUnaryResponse(self): request = b'\x37\x17' callback = _Callback() multi_callable = _unary_unary_multi_callable(self._channel) with self._control.fail(): response_future = multi_callable.future( request, metadata=(('test', 'FailedUnaryRequestFutureUnaryResponse'),)) response_future.add_done_callback(callback) value_passed_to_callback = callback.value() self.assertIsInstance(response_future, grpc.Future) self.assertIsInstance(response_future, grpc.Call) with self.assertRaises(grpc.RpcError) as exception_context: response_future.result() self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code()) self.assertIsInstance(response_future.exception(), grpc.RpcError) self.assertIsNotNone(response_future.traceback()) self.assertIs(grpc.StatusCode.UNKNOWN, response_future.exception().code()) self.assertIs(response_future, value_passed_to_callback) def testFailedUnaryRequestStreamResponse(self): request = b'\x37\x17' multi_callable = _unary_stream_multi_callable(self._channel) with self.assertRaises(grpc.RpcError) as exception_context: with self._control.fail(): response_iterator = multi_callable( request, metadata=(('test', 'FailedUnaryRequestStreamResponse'),)) next(response_iterator) self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code()) def testFailedStreamRequestBlockingUnaryResponse(self): requests = tuple(b'\x47\x58' for _ in range(test_constants.STREAM_LENGTH)) request_iterator = iter(requests) multi_callable = _stream_unary_multi_callable(self._channel) with self._control.fail(): with self.assertRaises(grpc.RpcError) as exception_context: multi_callable( request_iterator, metadata=( ('test', 'FailedStreamRequestBlockingUnaryResponse'),)) self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code()) def testFailedStreamRequestFutureUnaryResponse(self): requests = tuple(b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH)) request_iterator = iter(requests) callback = _Callback() multi_callable = _stream_unary_multi_callable(self._channel) with self._control.fail(): response_future = multi_callable.future( request_iterator, metadata=(('test', 'FailedStreamRequestFutureUnaryResponse'),)) response_future.add_done_callback(callback) value_passed_to_callback = callback.value() with self.assertRaises(grpc.RpcError) as exception_context: response_future.result() self.assertIs(grpc.StatusCode.UNKNOWN, response_future.code()) self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code()) self.assertIsInstance(response_future.exception(), grpc.RpcError) self.assertIsNotNone(response_future.traceback()) self.assertIs(response_future, value_passed_to_callback) def testFailedStreamRequestStreamResponse(self): requests = tuple(b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH)) request_iterator = iter(requests) multi_callable = _stream_stream_multi_callable(self._channel) with self._control.fail(): with self.assertRaises(grpc.RpcError) as exception_context: response_iterator = multi_callable( request_iterator, metadata=(('test', 'FailedStreamRequestStreamResponse'),)) tuple(response_iterator) self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code()) self.assertIs(grpc.StatusCode.UNKNOWN, response_iterator.code()) def testIgnoredUnaryRequestFutureUnaryResponse(self): request = b'\x37\x17' multi_callable = _unary_unary_multi_callable(self._channel) multi_callable.future( request, metadata=(('test', 'IgnoredUnaryRequestFutureUnaryResponse'),)) def testIgnoredUnaryRequestStreamResponse(self): request = b'\x37\x17' multi_callable = _unary_stream_multi_callable(self._channel) multi_callable( request, metadata=(('test', 'IgnoredUnaryRequestStreamResponse'),)) def testIgnoredStreamRequestFutureUnaryResponse(self): requests = tuple(b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH)) request_iterator = iter(requests) multi_callable = _stream_unary_multi_callable(self._channel) multi_callable.future( request_iterator, metadata=(('test', 'IgnoredStreamRequestFutureUnaryResponse'),)) def testIgnoredStreamRequestStreamResponse(self): requests = tuple(b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH)) request_iterator = iter(requests) multi_callable = _stream_stream_multi_callable(self._channel) multi_callable( request_iterator, metadata=(('test', 'IgnoredStreamRequestStreamResponse'),)) if __name__ == '__main__': unittest.main(verbosity=2)
bsd-3-clause
abadger/ansible
lib/ansible/module_utils/facts/virtual/linux.py
15
17328
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import glob import os import re from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector from ansible.module_utils.facts.utils import get_file_content, get_file_lines class LinuxVirtual(Virtual): """ This is a Linux-specific subclass of Virtual. It defines - virtualization_type - virtualization_role """ platform = 'Linux' # For more information, check: http://people.redhat.com/~rjones/virt-what/ def get_virtual_facts(self): virtual_facts = {} # We want to maintain compatibility with the old "virtualization_type" # and "virtualization_role" entries, so we need to track if we found # them. We won't return them until the end, but if we found them early, # we should avoid updating them again. found_virt = False # But as we go along, we also want to track virt tech the new way. host_tech = set() guest_tech = set() # lxc/docker if os.path.exists('/proc/1/cgroup'): for line in get_file_lines('/proc/1/cgroup'): if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line): guest_tech.add('docker') if not found_virt: virtual_facts['virtualization_type'] = 'docker' virtual_facts['virtualization_role'] = 'guest' found_virt = True if re.search('/lxc/', line) or re.search('/machine.slice/machine-lxc', line): guest_tech.add('lxc') if not found_virt: virtual_facts['virtualization_type'] = 'lxc' virtual_facts['virtualization_role'] = 'guest' found_virt = True if re.search('/system.slice/containerd.service', line): guest_tech.add('containerd') if not found_virt: virtual_facts['virtualization_type'] = 'containerd' virtual_facts['virtualization_role'] = 'guest' found_virt = True # lxc does not always appear in cgroups anymore but sets 'container=lxc' environment var, requires root privs if os.path.exists('/proc/1/environ'): for line in get_file_lines('/proc/1/environ', line_sep='\x00'): if re.search('container=lxc', line): guest_tech.add('lxc') if not found_virt: virtual_facts['virtualization_type'] = 'lxc' virtual_facts['virtualization_role'] = 'guest' found_virt = True if re.search('container=podman', line): guest_tech.add('podman') if not found_virt: virtual_facts['virtualization_type'] = 'podman' virtual_facts['virtualization_role'] = 'guest' found_virt = True if re.search('^container=.', line): guest_tech.add('container') if not found_virt: virtual_facts['virtualization_type'] = 'container' virtual_facts['virtualization_role'] = 'guest' found_virt = True if os.path.exists('/proc/vz') and not os.path.exists('/proc/lve'): virtual_facts['virtualization_type'] = 'openvz' if os.path.exists('/proc/bc'): host_tech.add('openvz') if not found_virt: virtual_facts['virtualization_role'] = 'host' else: guest_tech.add('openvz') if not found_virt: virtual_facts['virtualization_role'] = 'guest' found_virt = True systemd_container = get_file_content('/run/systemd/container') if systemd_container: guest_tech.add(systemd_container) if not found_virt: virtual_facts['virtualization_type'] = systemd_container virtual_facts['virtualization_role'] = 'guest' found_virt = True # ensure 'container' guest_tech is appropriately set if guest_tech.intersection(set(['docker', 'lxc', 'podman', 'openvz', 'containerd'])) or systemd_container: guest_tech.add('container') if os.path.exists("/proc/xen"): is_xen_host = False try: for line in get_file_lines('/proc/xen/capabilities'): if "control_d" in line: is_xen_host = True except IOError: pass if is_xen_host: host_tech.add('xen') if not found_virt: virtual_facts['virtualization_type'] = 'xen' virtual_facts['virtualization_role'] = 'host' else: if not found_virt: virtual_facts['virtualization_type'] = 'xen' virtual_facts['virtualization_role'] = 'guest' found_virt = True # assume guest for this block if not found_virt: virtual_facts['virtualization_role'] = 'guest' product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name') sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor') product_family = get_file_content('/sys/devices/virtual/dmi/id/product_family') if product_name in ('KVM', 'KVM Server', 'Bochs', 'AHV'): guest_tech.add('kvm') if not found_virt: virtual_facts['virtualization_type'] = 'kvm' found_virt = True if sys_vendor == 'oVirt': guest_tech.add('oVirt') if not found_virt: virtual_facts['virtualization_type'] = 'oVirt' found_virt = True if sys_vendor == 'Red Hat': if product_family == 'RHV': guest_tech.add('RHV') if not found_virt: virtual_facts['virtualization_type'] = 'RHV' found_virt = True elif product_name == 'RHEV Hypervisor': guest_tech.add('RHEV') if not found_virt: virtual_facts['virtualization_type'] = 'RHEV' found_virt = True if product_name in ('VMware Virtual Platform', 'VMware7,1'): guest_tech.add('VMware') if not found_virt: virtual_facts['virtualization_type'] = 'VMware' found_virt = True if product_name in ('OpenStack Compute', 'OpenStack Nova'): guest_tech.add('openstack') if not found_virt: virtual_facts['virtualization_type'] = 'openstack' found_virt = True bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor') if bios_vendor == 'Xen': guest_tech.add('xen') if not found_virt: virtual_facts['virtualization_type'] = 'xen' found_virt = True if bios_vendor == 'innotek GmbH': guest_tech.add('virtualbox') if not found_virt: virtual_facts['virtualization_type'] = 'virtualbox' found_virt = True if bios_vendor in ('Amazon EC2', 'DigitalOcean', 'Hetzner'): guest_tech.add('kvm') if not found_virt: virtual_facts['virtualization_type'] = 'kvm' found_virt = True KVM_SYS_VENDORS = ('QEMU', 'Amazon EC2', 'DigitalOcean', 'Google', 'Scaleway', 'Nutanix') if sys_vendor in KVM_SYS_VENDORS: guest_tech.add('kvm') if not found_virt: virtual_facts['virtualization_type'] = 'kvm' found_virt = True if sys_vendor == 'KubeVirt': guest_tech.add('KubeVirt') if not found_virt: virtual_facts['virtualization_type'] = 'KubeVirt' found_virt = True # FIXME: This does also match hyperv if sys_vendor == 'Microsoft Corporation': guest_tech.add('VirtualPC') if not found_virt: virtual_facts['virtualization_type'] = 'VirtualPC' found_virt = True if sys_vendor == 'Parallels Software International Inc.': guest_tech.add('parallels') if not found_virt: virtual_facts['virtualization_type'] = 'parallels' found_virt = True if sys_vendor == 'OpenStack Foundation': guest_tech.add('openstack') if not found_virt: virtual_facts['virtualization_type'] = 'openstack' found_virt = True # unassume guest if not found_virt: del virtual_facts['virtualization_role'] if os.path.exists('/proc/self/status'): for line in get_file_lines('/proc/self/status'): if re.match(r'^VxID:\s+\d+', line): if not found_virt: virtual_facts['virtualization_type'] = 'linux_vserver' if re.match(r'^VxID:\s+0', line): host_tech.add('linux_vserver') if not found_virt: virtual_facts['virtualization_role'] = 'host' else: guest_tech.add('linux_vserver') if not found_virt: virtual_facts['virtualization_role'] = 'guest' found_virt = True if os.path.exists('/proc/cpuinfo'): for line in get_file_lines('/proc/cpuinfo'): if re.match('^model name.*QEMU Virtual CPU', line): guest_tech.add('kvm') if not found_virt: virtual_facts['virtualization_type'] = 'kvm' elif re.match('^vendor_id.*User Mode Linux', line): guest_tech.add('uml') if not found_virt: virtual_facts['virtualization_type'] = 'uml' elif re.match('^model name.*UML', line): guest_tech.add('uml') if not found_virt: virtual_facts['virtualization_type'] = 'uml' elif re.match('^machine.*CHRP IBM pSeries .emulated by qemu.', line): guest_tech.add('kvm') if not found_virt: virtual_facts['virtualization_type'] = 'kvm' elif re.match('^vendor_id.*PowerVM Lx86', line): guest_tech.add('powervm_lx86') if not found_virt: virtual_facts['virtualization_type'] = 'powervm_lx86' elif re.match('^vendor_id.*IBM/S390', line): guest_tech.add('PR/SM') if not found_virt: virtual_facts['virtualization_type'] = 'PR/SM' lscpu = self.module.get_bin_path('lscpu') if lscpu: rc, out, err = self.module.run_command(["lscpu"]) if rc == 0: for line in out.splitlines(): data = line.split(":", 1) key = data[0].strip() if key == 'Hypervisor': tech = data[1].strip() guest_tech.add(tech) if not found_virt: virtual_facts['virtualization_type'] = tech else: guest_tech.add('ibm_systemz') if not found_virt: virtual_facts['virtualization_type'] = 'ibm_systemz' else: continue if virtual_facts['virtualization_type'] == 'PR/SM': if not found_virt: virtual_facts['virtualization_role'] = 'LPAR' else: if not found_virt: virtual_facts['virtualization_role'] = 'guest' if not found_virt: found_virt = True # Beware that we can have both kvm and virtualbox running on a single system if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK): modules = [] for line in get_file_lines("/proc/modules"): data = line.split(" ", 1) modules.append(data[0]) if 'kvm' in modules: host_tech.add('kvm') if not found_virt: virtual_facts['virtualization_type'] = 'kvm' virtual_facts['virtualization_role'] = 'host' if os.path.isdir('/rhev/'): # Check whether this is a RHEV hypervisor (is vdsm running ?) for f in glob.glob('/proc/[0-9]*/comm'): try: with open(f) as virt_fh: comm_content = virt_fh.read().rstrip() if comm_content in ('vdsm', 'vdsmd'): # We add both kvm and RHEV to host_tech in this case. # It's accurate. RHEV uses KVM. host_tech.add('RHEV') if not found_virt: virtual_facts['virtualization_type'] = 'RHEV' break except Exception: pass found_virt = True if 'vboxdrv' in modules: host_tech.add('virtualbox') if not found_virt: virtual_facts['virtualization_type'] = 'virtualbox' virtual_facts['virtualization_role'] = 'host' found_virt = True if 'virtio' in modules: host_tech.add('kvm') if not found_virt: virtual_facts['virtualization_type'] = 'kvm' virtual_facts['virtualization_role'] = 'guest' found_virt = True # In older Linux Kernel versions, /sys filesystem is not available # dmidecode is the safest option to parse virtualization related values dmi_bin = self.module.get_bin_path('dmidecode') # We still want to continue even if dmidecode is not available if dmi_bin is not None: (rc, out, err) = self.module.run_command('%s -s system-product-name' % dmi_bin) if rc == 0: # Strip out commented lines (specific dmidecode output) vendor_name = ''.join([line.strip() for line in out.splitlines() if not line.startswith('#')]) if vendor_name.startswith('VMware'): guest_tech.add('VMware') if not found_virt: virtual_facts['virtualization_type'] = 'VMware' virtual_facts['virtualization_role'] = 'guest' found_virt = True if 'BHYVE' in out: guest_tech.add('bhyve') if not found_virt: virtual_facts['virtualization_type'] = 'bhyve' virtual_facts['virtualization_role'] = 'guest' found_virt = True if os.path.exists('/dev/kvm'): host_tech.add('kvm') if not found_virt: virtual_facts['virtualization_type'] = 'kvm' virtual_facts['virtualization_role'] = 'host' found_virt = True # If none of the above matches, return 'NA' for virtualization_type # and virtualization_role. This allows for proper grouping. if not found_virt: virtual_facts['virtualization_type'] = 'NA' virtual_facts['virtualization_role'] = 'NA' found_virt = True virtual_facts['virtualization_tech_guest'] = guest_tech virtual_facts['virtualization_tech_host'] = host_tech return virtual_facts class LinuxVirtualCollector(VirtualCollector): _fact_class = LinuxVirtual _platform = 'Linux'
gpl-3.0
cgalleguillosm/accasim
accasim/utils/plot_factory.py
1
51706
""" MIT License Copyright (c) 2017 cgalleguillosm, AlessioNetti Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import matplotlib.pyplot as plt import matplotlib.patches as patches from matplotlib.backends.backend_pdf import PdfPages from math import floor from accasim.utils.reader_class import DefaultReader from accasim.utils.misc import load_config, from_isodatetime_2_timestamp as timestamp_func, str_resources from accasim.utils.file import path_leaf, load_jsonfile from accasim.base.resource_manager_class import Resources from accasim.experimentation.schedule_parser import define_result_parser from accasim.utils.misc import DEFAULT_SIMULATION from copy import deepcopy from os.path import splitext, join from scipy.signal import savgol_filter from os.path import isfile import numpy as np from matplotlib.pyplot import boxplot class PlotFactory: """ A class for plot production and schedule files pre-processing. In this class, some basic algorithms are implemented for pre-processing the schedule files produced through simulation, and for producing some common evaluation plots. """ SCHEDULE_CLASS = 'schedule' BENCHMARK_CLASS = 'benchmark' SLOWDOWN_PLOT = 'slowdown' QUEUE_SIZE_PLOT = 'queue_size' LOAD_RATIO_PLOT = 'load_ratio' EFFICIENCY_PLOT = 'efficiency' SCALABILITY_PLOT = 'scalability' SIMULATION_TIME_PLOT = 'sim_time' SIMULAION_MEMORY_PLOT = 'sim_memory' PLOT_TYPES = { SCHEDULE_CLASS: [SLOWDOWN_PLOT, QUEUE_SIZE_PLOT, LOAD_RATIO_PLOT, EFFICIENCY_PLOT], BENCHMARK_CLASS: [SCALABILITY_PLOT, SIMULATION_TIME_PLOT, SIMULAION_MEMORY_PLOT] } def __init__(self, plot_class, sim_params_fname=None, config=None, resource=None, workload_parser=None, debug=False): """ The constructor for the class. :param plot_class: the plot_class of files to be analyzed. Can be either 'schedule', if schedule files are going to be analyzed, or 'benchmark' if resource usage log files will be analyzed; :params sim_params_fname: :param config: The path to a system configuration file. Needed for the schedule meta-simulation; :param resource: a resource type in the system to be considered. If specified, all resource-related statistics will be computed in regards to this resource alone; :param workload_parser: :param debug: Debug flag. """ self._debug = debug if not (plot_class in self.PLOT_TYPES.keys()): if self._debug: print('Wrong Plot plot_class chosen. Selecting schedule plot_class by default...') plot_class = self.SCHEDULE_CLASS self._plot_class = plot_class self._sim_params_fname = sim_params_fname # if sim_params_fname is not None and isfile(sim_params_fname) else None self._config = config self._resource = resource self._workload_parser = workload_parser self._preprocessed = False self._filepaths = [] self._labels = [] self._slowdowns = [] self._queuesizes = [] self._loadratiosX = [] self._loadratiosY = [] self._efficiencies = [] self._simdata = [] self._schedtimes = [] self._mantimes = [] self._simmemory = [] self._scalabilitydataX = [] self._scalabilitydataY = [] self._resource_order = None if self._sim_params_fname is None: self._resource_order = DEFAULT_SIMULATION['RESOURCE_ORDER'] # Base resource availability per-node (never changes) self._base_res = {} # Current resource availability per-node self._sys_res = {} # Aggregated used resources for all nodes self._used_res_sum = {} # Aggregate base resource availability for used nodes only self._avl_res_sum = {} # Aggregated base resource availability for all nodes self._base_res_sum = {} # Amount of currently used nodes self._used_nodes = 0 # Number of total nodes in the system self._total_nodes = 0 def set_files(self, paths, labels): """ Set the paths and labels of the files to be analyzed. :param paths: A list of filepaths related to the files to be analyzed; :param labels: the labels associated to each single file, used in the plots; must have the same length as paths; """ self._preprocessed = False if not isinstance(paths, (list, tuple)): self._filepaths = [paths] self._labels = [labels] else: self._filepaths = paths self._labels = labels if len(self._filepaths) != len(self._labels): if self._debug: print("Filepaths and Labels lists must have the same lengths.") self._labels = [] self._filepaths = [] def pre_process(self, trimSlowdown=True, trimQueueSize=False): """ Performs pre-processing on all specified files, according to their type. If the files are of the schedule type, a meta-simulation is run for each of them, computing data like slowdown, queue size, load ratios and such. If the data is of the benchmark type, the files are simply parsed and their information stored. :param: trimSlowdown: boolean flag. If True, slowdown values equal to 1 will be discarded. Default is True :param: trimQueueSize: boolean flag. If True, queue size values equal to 0 will be discarded. Default is False """ if not self._preprocessed: # Perform pre-processing for schedule files if self._plot_class == self.SCHEDULE_CLASS: self._slowdowns = [] self._queuesizes = [] self._loadratiosX = [] self._loadratiosY = [] self._efficiencies = [] self._preprocessed = True for f in self._filepaths: # If an error is encountered on one of the files, the process is aborted if not self._getScheduleData(f, self._config, self._resource, trimSlowdown, trimQueueSize): self._preprocessed = False break # Perform pre-processing for benchmark files elif self._plot_class == self.BENCHMARK_CLASS: self._simdata = [] self._schedtimes = [] self._mantimes = [] self._simmemory = [] self._scalabilitydataX = [] self._scalabilitydataY = [] self._preprocessed = True for f in self._filepaths: if not self._getBenchmarkData(f): self._preprocessed = False break if not self._preprocessed: print("Could not process files, please ensure they are in the correct path and format.") return self._preprocessed def produce_plot(self, type, title='', scale='linear', xlim=(None, None), ylim=(None, None), legend=True, figsize=(7, 5), meansonly=False, alpha=0.005, smooth=30, output='Output.pdf', groups=1, **kwargs): """ Produces a single plot on the pre-processed files. The user can produce plots among the available types. These are: - slowdown: a box-plot distribution plot for slowdown values across test instances - queue_size: a box-plot for queue size in the simulation across test instances - load_ratio: a distribution scatter plot for the load ratio in function of the number of used nodes, for test instances separately; - efficiency: a box-plot for resource allocation efficiency across test instances - scalability: a scalability plot for dispatching methods across test instances - sim_time: a bar plot for the simulation timings across test instances - sim_memory: a bar plot for memory usage across test instances :param type: the type of the plot, must be one of the above; :param title: the title of the plot; :param scale: the scale of the plot (see matplotlib documentation); :param xlim: the left-right bounds for axis scaling, is a tuple; :param ylim: the bottom-top bounds for axis scaling, is a tuple; :param legend: activates the legend, is a boolean; :param figsize: the size of the figure, is a tuple; :param meansonly: triggers the plot of mean values alone in box-plots, is a boolean; :param alpha: the alpha of certain features in plots, in particular for distribution scatter plots; :param smooth: smoothing factor used for the Savitzky-Golay filter in the scalabily plot. The lower the number, the higher the smoothing; :param output: path of the output PDF file; """ if not self._preprocessed: self.pre_process() print("Plot_factory: Files were not pre-processed yet. Calling the pre_process method.") if type == self.SLOWDOWN_PLOT and self._plot_class == self.SCHEDULE_CLASS: self.box_plot(self._slowdowns, title=title, ylabel='Slowdown', scale=scale, xlim=xlim, ylim=ylim, figsize=figsize, meansonly=meansonly, output=output, groups=groups, **kwargs) elif type == self.QUEUE_SIZE_PLOT and self._plot_class == self.SCHEDULE_CLASS: self.box_plot(self._queuesizes, title=title, ylabel='Queue size', scale=scale, xlim=xlim, ylim=(0, None), figsize=figsize, meansonly=meansonly, output=output, groups=groups, **kwargs) elif type == self.LOAD_RATIO_PLOT and self._plot_class == self.SCHEDULE_CLASS: self.distribution_scatter_plot(self._loadratiosX, self._loadratiosY, title=title, scale=scale, xlim=(-0.01, 1.01), ylim=(-0.01, 1.01), figsize=figsize, alpha=alpha, output=output, **kwargs) elif type == self.EFFICIENCY_PLOT and self._plot_class == self.SCHEDULE_CLASS: self.box_plot(self._efficiencies, title=title, ylabel='Resource efficiency', scale=scale, xlim=xlim, ylim=ylim, figsize=figsize, meansonly=meansonly, output=output, groups=groups, **kwargs) elif type == self.SCALABILITY_PLOT and self._plot_class == self.BENCHMARK_CLASS: self.scalability_plot(self._scalabilitydataX, self._scalabilitydataY, title, scale=scale, xlim=xlim, ylim=ylim, figsize=figsize, legend=legend, smooth=smooth, output=output, **kwargs) elif type == self.SIMULATION_TIME_PLOT and self._plot_class == self.BENCHMARK_CLASS: self.box_plot_times(self._mantimes, self._schedtimes, title=title, scale=scale, xlim=xlim, ylim=ylim, figsize=figsize, legend=legend, output=output, **kwargs) elif type == self.SIMULAION_MEMORY_PLOT and self._plot_class == self.BENCHMARK_CLASS: self.box_plot_memory(self._simmemory, title=title, scale=scale, xlim=xlim, ylim=ylim, figsize=figsize, legend=legend, output=output, **kwargs) else: raise Exception("Plot type specified is not valid. Review the documentation for valid plot types.") def _getBenchmarkData(self, filepath): """ Pre-processes a resource usage log file. :param filepath: the path to the log file; :return: True if successful, False otherwise; """ if self._debug: print("- Pre-processing file " + filepath + "...") # Tries to read from the file, aborts if an error is encountered try: f = open(filepath) mantimes = [] schedtimes = [] mems = [] simtime = 0 disptime = 0 maxqueuesize = 0 for line in f: # Each line is parsed and values are extracted from it attrs = line.split(';') mantimes.append(float(attrs[4])) schedtimes.append((int(attrs[1]), float(attrs[3]))) mems.append(float(attrs[5])) simtime += float(attrs[2]) disptime += float(attrs[3]) if int(attrs[1]) > maxqueuesize: maxqueuesize = int(attrs[1]) f.close() except Exception as e: raise Exception("Error encountered while pre-processing: " + str(e)) # Certain statistics are computed from the data data = {} data['avgman'] = np.average(np.array(mantimes)) data['avgsched'] = np.average(np.array([el[1] for el in schedtimes])) data['simtime'] = simtime / 1000.0 data['schedtime'] = disptime / 1000.0 data['mantime'] = data['simtime'] - data['schedtime'] data['avgmem'] = np.average(np.array(mems)) data['maxmem'] = np.max(np.array(mems)) # The scalability data is computed through binning: we want to obtain an X, Y set, where in X are the distinct # queue sizes, and in Y are the average times in ms to perform dispatching on such queue sizes binningfactor = 1 bins = int(floor(maxqueuesize / binningfactor)) queuevalues = np.linspace(0, maxqueuesize, bins) mappinglist = [] for i in range(bins): mappinglist.append([]) step = (maxqueuesize) / (bins - 1) for qsize, stime in schedtimes: index = int(floor(qsize / step)) mappinglist[index].append(stime) finallist = [] finalqueuevalues = [] for i in range(len(mappinglist)): l = mappinglist[i] if len(l) > 0: finallist.append(sum(l) / len(l)) finalqueuevalues.append(queuevalues[i]) self._mantimes.append(mantimes) self._schedtimes.append([el[1] for el in schedtimes]) self._simmemory.append(mems) self._simdata.append(data) self._scalabilitydataX.append(finalqueuevalues) self._scalabilitydataY.append(finallist) return True def _getScheduleData(self, filepath, config, resource=None, trimSlowdown=True, trimQueueSize=False): """ Performs pre-processing on a schedule file through a meta-simulation process. :param filepath: The path of the file to be analyzed; :param config: The path to the system configuration file; :param resource: A resource to be considered for resource-related metrics; if none is specified, all resource types are used; :param: trimSlowdown: boolean flag. If True, slowdown values equal to 1 will be discarded. Default is True :param: trimQueueSize: boolean flag. If True, queue size values equal to 0 will be discarded. Default is False :return: True if successful, False otherwise; """ if self._debug: print("- Pre-processing file " + filepath + "...") # Generates the dictionary of system resources from the config file resobject, equiv = self._generateSystemConfig(config) self._base_res = resobject.availability() res_types = resobject._system_resource_types # Makes sure the resource type exists in the system if resource is not None and resource not in resobject._system_resource_types: if self._debug: print("Resource type " + resource + "is not valid. Using all available resources...") resource = None # Tries to read from the log file, aborts if an error is encountered try: _sim_params_path = None # If the simulator config path points to a file, it is considered as is if self._sim_params_fname is not None and isfile(self._sim_params_fname): _sim_params_path = self._sim_params_fname # If it is a plain string, it is used as a token for config files in the experimentation elif self._sim_params_fname is not None: _path, _filename = path_leaf(filepath) _sim_params_path = join(_path, self._sim_params_fname) # If it is none, the default_result_parser will use the DEFAULT_SIMULATION config if _sim_params_path is not None: _resource_order = load_jsonfile(_sim_params_path)['RESOURCE_ORDER'] else: _resource_order = self._resource_order if self._workload_parser is not None: reader = DefaultReader(filepath, parser=self._workload_parser, equivalence=equiv) else: reader = DefaultReader(filepath, parser=define_result_parser(_sim_params_path), equivalence=equiv) slowdowns = [] timePoints = set() jobs = {} rev_timePoints = {} if self._debug: print("Loading jobs...") while True: # Jobs are read and their slowdown values are stored job = reader._read() if job is not None: job['start_time'] = timestamp_func(job['start_time']) job['end_time'] = timestamp_func(job['end_time']) job['queue_time'] = timestamp_func(job['queue_time']) _start_time = job['start_time'] _end_time = job['end_time'] _queued_time = job['queue_time'] duration = _end_time - _start_time wait = _start_time - _queued_time slowdown = (wait + duration) / duration if duration != 0 else wait if wait != 0 else 1.0 if slowdown > 1.0 or not trimSlowdown: slowdowns.append(slowdown) job_id = job['job_id'] jobs[job_id] = job # Timepoints for use in the simulation are stored timePoints.add(_queued_time) self._addToDictAsList(rev_timePoints, _queued_time, job_id, 'queue') timePoints.add(_start_time) self._addToDictAsList(rev_timePoints, _start_time, job_id, 'start') if duration > 0: timePoints.add(_end_time) self._addToDictAsList(rev_timePoints, _end_time, job_id, 'end') else: break except Exception as e: raise Exception("Error encountered while pre-processing: " + str(e)) # It may happen that the slowdown list is empty if all jobs have a value equal to 1. In this case we add # a fake value, equal to 1 as well if trimSlowdown and len(slowdowns) == 0: slowdowns.append(1) if self._debug: print("Jobs loaded. Sorting...") # We compute the final set of distinct, ordered timepoints timePoints = sorted(timePoints) timePointsIDX = 0 self._sys_res = deepcopy(self._base_res) self._base_res_sum = {k: sum(self._base_res[n][k] for n in self._base_res) for k in res_types} self._used_res_sum = {k: 0 for k in res_types} self._avl_res_sum = {k: 0 for k in res_types} self._used_nodes = 0 self._total_nodes = len(self._base_res.values()) queue = set() running = set() # Pre-allocating the lists to store performance metrics, for efficiency queued = [0] * len(timePoints) # [] resources = [0] * len(timePoints) # [] run = [0] * len(timePoints) # [] efficiency = [0] * len(timePoints) # [] efficiencyperjob = [0] * len(jobs) # [] efficiencyIDX = 0 if self._debug: print("Sorting done. Starting simulation...") # Meta-simulation: goes on until there are no more timepoints to consider while timePointsIDX < len(timePoints): point = timePoints[timePointsIDX] timePointsIDX += 1 # Adds to the queue jobs that were submitted in this timepoint jobstoqueue = rev_timePoints[point]['queue'] # queue += len(jobstoqueue) queue.update(jobstoqueue) # Jobs that have terminated release their resources jobstoend = rev_timePoints[point]['end'] if len(jobstoend) > 0: for j_id in jobstoend: j = jobs[j_id] req, assignations = self._getRequestedResources(_resource_order, j['assignations']) self._deallocate_resources(req, assignations, resource) # running -= len(jobstoend) running = running - jobstoend # Jobs that have to start take their resources from the system jobstostart = rev_timePoints[point]['start'] if len(jobstostart) > 0: for j_id in jobstostart: j = jobs[j_id] if j['end_time'] - j['start_time'] > 0: req, assignations = self._getRequestedResources(_resource_order, j['assignations']) self._allocate_resources(req, assignations, resource) # running += 1 running.add(j_id) # queue -= len(jobstostart) queue = queue - jobstostart # Additionally, we store for every started job its resource allocation efficiency for j_id in jobstostart: j = jobs[j_id] if j['end_time'] - j['start_time'] > 0: req, assignations = self._getRequestedResources(_resource_order, j['assignations']) eff = self._getResourceEfficiency(req, assignations, self._sys_res, resource) efficiencyperjob[efficiencyIDX] = eff efficiencyIDX += 1 # System metrics are computed AFTER dispatching queued[timePointsIDX - 1] = len(queue) # queue run[timePointsIDX - 1] = len(running) # running resources[timePointsIDX - 1] = self._getLoadRatio(resource) efficiency[timePointsIDX - 1] = self._getLoadRatioSelective(resource) if self._debug: print("Simulation done!") if trimQueueSize: queued = [q for q in queued if q != 0] run = [r for r in run if r != 0] # The metrics values for this instance are added to the internal variables self._slowdowns.append(slowdowns) self._queuesizes.append(queued) self._efficiencies.append(efficiencyperjob) self._loadratiosX.append([el[0] for el in efficiency]) self._loadratiosY.append([el[1] for el in efficiency]) return True def _addToDictAsList(self, dict, key, el, type): """ Simple method that adds an element to a dictionary and creates sub-entries if needed. :param dict: The target dictionary :param key: The key of the element to add :param el: The element to add :param type: The type of the element to add, used in the sub-dictionary for the key entry :return: None """ if key not in dict: dict[key] = {'queue': set(), 'start': set(), 'end': set()} dict[key][type].add(el) def _allocate_resources(self, req, assignations, resource=None): """ Method that allocates the resources for a certain starting job and updates all data structures related to resource usage :param req: The resource request of the job :param assignations: The list of nodes assigned to the job :param resource: A resource type to be considered for performance metrics (optional) :return: None """ for node in assignations: # If the node goes from the unused to the used state, we update the number of used nodes and the amount # of available resources among the used nodes, for the efficiency plots if resource is None and all(self._sys_res[node][k] == self._base_res[node][k] for k in self._base_res[node].keys()): self._used_nodes += 1 for k, v in self._base_res[node].items(): self._avl_res_sum[k] += v # If a specific resource type is considered, the same condition is triggered only if such resource is used elif resource is not None and self._sys_res[node][resource] == self._base_res[node][resource] and req[resource] > 0: self._used_nodes += 1 self._avl_res_sum[resource] += self._base_res[node][resource] # Updating the per-node currently available resources for k, val in req.items(): self._sys_res[node][k] -= val if self._sys_res[node][k] < 0: self._sys_res[node][k] = 0 if self._debug: print("Caution: resource " + k + " is going below zero.") # Updating the dictionary of per-type currently used resources for k, v in req.items(): self._used_res_sum[k] += v * len(assignations) if self._used_res_sum[k] > self._avl_res_sum[k]: self._used_res_sum[k] = self._avl_res_sum[k] def _deallocate_resources(self, req, assignations, resource): """ Method that de-allocates the resources for a certain starting job and updates all data structures related to resource usage :param req: The resource request of the job :param assignations: The list of nodes assigned to the job :param resource: A resource type to be considered for performance metrics (optional) :return: None """ for node in assignations: for k, val in req.items(): self._sys_res[node][k] += val if self._sys_res[node][k] > self._base_res[node][k]: self._sys_res[node][k] = self._base_res[node][k] if self._debug: print("Caution: resource " + k + " is going beyond its base capacity.") # In this case the check for used-unused nodes must be performed after the resources are de-allocated if resource is None and all(self._sys_res[node][k] == self._base_res[node][k] for k in self._base_res[node].keys()): self._used_nodes -= 1 for k, v in self._base_res[node].items(): self._avl_res_sum[k] -= v elif resource is not None and self._sys_res[node][resource] == self._base_res[node][resource] and req[resource] > 0: self._used_nodes -= 1 self._avl_res_sum[resource] -= self._base_res[node][resource] # The method is specular to allocate_resources and works identically for k, v in req.items(): self._used_res_sum[k] -= v * len(assignations) if self._used_res_sum[k] < 0: self._used_res_sum[k] = 0 def _generateSystemConfig(self, config_path): """ Generates a Resources object from a system configuration file. :param config_path: the path to the config file; :return: the Resources object and the resource equivalence; """ try: config = load_config(config_path) equiv = config.pop('equivalence', {}) # PEP 448 - Additional Unpacking Generalizations # python 3.5 and newer if not('node_prefix' in config): config['node_prefix'] = '' resources = Resources(**config) return resources, equiv except Exception as e: if config_path != '': print("Could not load system config: " + str(e)) else: print("A system configuration file must be specified.") exit() return None, None def _getRequestedResources(self, _resource_order, assignations_str): """ TO BE IMPLEMENTED: returns the requested resources for the input job. :param job: the dictionary related to the current job; :return: the dictionary of resources needed by each job unit, and the list of node assignations; """ _assignations_list = assignations_str.split(str_resources.SEPARATOR)[0:-1] _nodes_list = [assign.split(';')[0] for assign in _assignations_list] _request = { k:int(v) for k, v in zip(_resource_order, _assignations_list[0].split(';')[1:])} return _request, _nodes_list def _getResourceEfficiency(self, reqres, nodes, sys_res, resource): """ Computes the resource allocation efficiency metric for a certain input job. This method computed the resource allocation efficiency AFTER dispatching is performed, not before. :param reqres: the dictionary of resources requested by each job unit; :param nodes: the list of node assignations; :param sys_res: the dictionary of system resources; :param resource: the resource type to be considered (if present); :return: the resource allocation efficiency; """ # Computing the amount of used resources by the job if resource is None: used = sum(r * len(nodes) for r in reqres.values()) else: used = reqres[resource] * len(nodes) avl = 0 # Computing the amount of available resources in nodes used by the job for node in set(nodes): if resource is None: avl += sum(r for r in sys_res[node].values()) else: avl += sys_res[node][resource] return used / (avl + used) def _getLoadRatio(self, resource): """ Returns the standard load ratio for the system. :param resource: the resource type to be considered (if present); :return: the load ratio; """ loadratio = 0 if resource is None: loadratio = sum(self._used_res_sum.values()) / sum(self._base_res_sum.values()) elif resource in self._base_res_sum: loadratio = self._used_res_sum[resource] / self._base_res_sum[resource] return loadratio def _getLoadRatioSelective(self, resource): """ Returns the per-step resource allocation efficiency. This is defined as a X,Y pair where X expresses the fraction of used nodes, and Y defines the fraction of used resources in such nodes. :param resource: the resource type to be considered (if present); :return: an X,Y pair expressing the per-step resource allocation efficiency; """ loadratio = 0 if self._used_nodes > 0: if resource is None: loadratio = sum(self._used_res_sum.values()) / sum(self._avl_res_sum.values()) elif resource in self._avl_res_sum: loadratio = self._used_res_sum[resource] / self._avl_res_sum[resource] return self._used_nodes / self._total_nodes, loadratio else: return 0, 0 def _getDistributionStats(self, data): """ Returns some useful distribution statistics for the input data. The mean, minimum, maximum, median, and quartiles for the data are computed. :param data: The iterable for the input data; :return: a dictionary of statistics for the data distribution; """ stats = {} stats['avg'] = np.average(data) stats['min'] = np.min(data) stats['max'] = np.max(data) stats['median'] = np.median(data) stats['quartiles'] = np.percentile(data, range(0, 100, 25)) return stats def box_plot(self, data, title='', ylabel='', scale='linear', figsize=(7, 5), meansonly=False, output='Output.pdf', groups=1, **kwargs): """ Produces a box-and-whiskers plot for the input data's distributions. :param data: the input data; must be a list, in which each element is again a list containing all of the data regarding a certain test instance; the ordering must be that of the labels; :param title: the title of the plot; :param ylabel: the Y-axis label; :param scale: the scale of the plot; :param figsize: the size of the figure, is a tuple; :param meansonly: if True only the mean values for each distribution are depicted; :param output: the path to the output file; :param **kwargs: - fig_format: { 'format': eps or pdf, 'dpi': Int number } - xlim: the left-right axis boundaries, is a tuple; - ylim: the bottom-top axis boundaries, is a tuple; """ color_cycler = ['b', 'r', 'y', 'g', 'c', 'm', 'k', 'w'] hatch_cycler = ['/', '\\', '|', '-', '+', 'x', 'o', 'O', '.', '*'] ncycle = 2 fontsize = 12 plt.rc('xtick', labelsize=fontsize) plt.rc('ytick', labelsize=fontsize) N = len(data) ylim = kwargs.pop('ylim', None) xlim = kwargs.pop('xlim', None) show_legend = kwargs.pop('show_legend', False) spacing = 0.2 ind = [i * spacing for i in np.arange(N)] width = 0.1 markersize = 250 linecol = 'black' tricol = 'black' vertlinecol = 'gray' fig, ax = plt.subplots(figsize=figsize) c_group = 0 c = groups r_hatch = len(hatch_cycler) color_list = [] hatch_list = [] for i, d in enumerate(data): color_list.append(color_cycler[c_group]) hatch_list.append(hatch_cycler[len(hatch_cycler) - r_hatch] * ncycle) c -= 1 if c == 0: c_group += 1 c = groups r_hatch -= 1 if r_hatch == 0: ncycle += 1 r_hatch = len(hatch_cycler) bp = ax.boxplot(data, labels=self._labels, patch_artist=True, sym="", whis=[0, 100], showmeans=True, showfliers=False) for patch, color, hatch in zip(bp['boxes'], color_list, hatch_list): patch.set_facecolor(color) patch.set_alpha(0.75) patch.set_hatch(hatch) # add some text for labels, title and axes ticks ax.set_ylabel(ylabel, fontsize=fontsize) ax.set_xlabel('Dispatching method', fontsize=fontsize) ax.set_title(title) ax.set_yscale(scale) if show_legend: ax.legend(bp['boxes'], self._labels, bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=len(self._labels) // 2, mode="expand", borderaxespad=0.) if ylim: ax.set_ylim(top=ylim[1], bottom=ylim[0], emit=True, auto=False) if xlim: ax.set_xlim(left=xlim[0], right=xlim[1], emit=True, auto=False) plt.tight_layout() plt.grid(linestyle=':', color='gray', zorder=0) plt.show() fig_format = kwargs.pop('fig_format', {}) fig.savefig(output, **fig_format) def box_plot_times(self, dataman, datasched, title='', scale='linear', xlim=(None, None), ylim=(None, None), figsize=(7, 5), legend=True, output='Output.pdf'): """ Produces a bar plot for the timings in the simulations, across test instances. The bars will depict the average time required to perform dispatching in each simulation step, and the time required to perform simulation-related tasks in the simulation. :param dataman: the data for the time required in each step to perform simulation-related tasks. Is a list, where each element is again a list containing the data for a certain test instance; :param datasched: the data for the time required in each step to perform dispatching. Is a list, where each element is again a list containing the data for a certain test instance; :param title: the title of the plot; :param scale: the scale of the plot; :param xlim: the left-right boundaries for the plot, is a tuple; :param ylim: the bottom-top boundaries for the plot, is a tuple; :param figsize: the size of the figure, is a tuple; :param legend: enables or disables visualization of the legend; :param output: the path to the output file; """ fontsize = 12 plt.rc('xtick', labelsize=fontsize) plt.rc('ytick', labelsize=fontsize) N = len(dataman) spacing = 0.2 ind = [i * spacing for i in np.arange(N)] width = 0.1 markersize = 250 fig, ax = plt.subplots(figsize=figsize) for i in range(N): avgman = np.average(np.array(dataman[i])) avgsched = np.average(np.array(datasched[i])) if i == 0: ax.add_patch(patches.Rectangle((ind[i], 0), width, avgman, facecolor='orange', edgecolor='black', hatch='//', alpha=0.75)) # , label='Simulation')) ax.add_patch(patches.Rectangle((ind[i], avgman), width, avgsched, facecolor='blue', edgecolor='black', hatch='\\', alpha=0.75, label='Dispatching decision')) else: ax.add_patch(patches.Rectangle((ind[i], 0), width, avgman, facecolor='orange', edgecolor='black', hatch='//', alpha=0.75)) ax.add_patch(patches.Rectangle((ind[i], avgman), width, avgsched, facecolor='blue', edgecolor='black', hatch='\\', alpha=0.75)) ax.scatter(ind[i] + width / 2, avgman + avgsched, marker='_', s=markersize / 4, zorder=0, color='black') # add some text for labels, title and axes ticks ax.set_ylabel('Time [ms]', fontsize=fontsize) ax.set_xlabel('Dispatching method', fontsize=fontsize) ax.set_title(title) ax.set_xticks([i + width / 2 for i in ind]) if legend: ax.legend() ax.set_xticklabels(self._labels) ax.set_yscale(scale) ax.set_ylim(top=ylim[1], bottom=ylim[0], emit=True, auto=False) ax.set_xlim(left=xlim[0], right=xlim[1], emit=True, auto=False) plt.grid(linestyle=':', color='gray', zorder=0) plt.setp(plt.gca().get_legend().get_texts(), fontsize=fontsize) plt.show() ff = PdfPages(output) ff.savefig(fig) ff.close() def box_plot_memory(self, data, title='', scale='linear', xlim=(None, None), ylim=(None, None), figsize=(7, 5), legend=True, output='Output.pdf'): """ Produces a bar plot for the memory usage in the simulations, across test instances. The bars depict average and maximum memory usage in the simulation. :param data: the data for memory usage in each simulation step. Is a list, where each element is again a list containing the data for a certain test instance; :param title: the title of the plot; :param scale: the scale of the plot; :param xlim: the left-right boundaries for the plot, is a tuple; :param ylim: the bottom-top boundaries for the plot, is a tuple; :param figsize: the size of the figure, is a tuple; :param legend: enables or disables visualization of the legend; :param output: the path to the output file; """ fontsize = 12 plt.rc('xtick', labelsize=fontsize) plt.rc('ytick', labelsize=fontsize) N = len(data) spacing = 0.2 ind = [i * spacing for i in np.arange(N)] width = 0.1 markersize = 250 fig, ax = plt.subplots(figsize=figsize) for i in range(N): avgmem = np.average(np.array(data[i])) maxmem = np.max(np.array(data[i])) if i == 0: ax.add_patch(patches.Rectangle((ind[i], 0), width, avgmem, facecolor='orange', edgecolor='black', hatch='//', alpha=0.75, label='Avg. Mem')) ax.add_patch(patches.Rectangle((ind[i], avgmem), width, maxmem - avgmem, facecolor='blue', edgecolor='black', hatch='\\', alpha=0.75, label='Max. Mem')) else: ax.add_patch(patches.Rectangle((ind[i], 0), width, avgmem, facecolor='orange', edgecolor='black', hatch='//', alpha=0.75)) ax.add_patch(patches.Rectangle((ind[i], avgmem), width, maxmem - avgmem, facecolor='blue', edgecolor='black', hatch='\\', alpha=0.75)) ax.scatter(ind[i] + width / 2, maxmem, marker='_', s=markersize / 4, zorder=0, color='black') ax.set_ylabel('Average Memory Usage [MB]', fontsize=fontsize) ax.set_xlabel('Dispatching method', fontsize=fontsize) ax.set_title(title) ax.set_xticks([i + width / 2 for i in ind]) if legend: ax.legend() ax.set_xticklabels(self._labels) ax.set_yscale(scale) ax.set_ylim(top=ylim[1], bottom=ylim[0], emit=True, auto=False) ax.set_xlim(left=xlim[0], right=xlim[1], emit=True, auto=False) plt.grid(linestyle=':', color='gray', zorder=0) plt.setp(plt.gca().get_legend().get_texts(), fontsize=fontsize) plt.show() ff = PdfPages(output) ff.savefig(fig) ff.close() def scalability_plot(self, xdata, ydata, title='', scale='linear', xlim=(None, None), ylim=(None, None), figsize=(7, 5), legend=True, smooth=30, linestyles=None, markers=None, output='Output.pdf'): """ Creates a scalability plot for all test instances, where X represents the queue size, and Y the average time required by each dispatching method in the instances. :param xdata: the X data, containing the queue sizes for each test instance; is a list, where each element contains a list with the data for each test instance; :param ydata: the Y data, containing the average times required to perform dispatching in each test instance; is a list, where each element contains a list with the data for each test instance; :param title: the title of the plot; :param scale: the scale of the plot; :param xlim: the left-right boundaries for the plot, is a tuple; :param ylim: the bottom-top boundaries for the plot, is a tuple; :param figsize: the size of the figure, is a tuple; :param legend: enables or disables visualization of the legend; :param smooth: smoothing factor for the Savitzky-Golay filter. The lower the number, the higher the smoothing; :param output: the path of the output file; """ fontsize = 12 plt.rc('xtick', labelsize=fontsize) plt.rc('ytick', labelsize=fontsize) if not linestyles: linestyles = ('-', '-', '--', '--', '-.', '-.', ':', ':') if not markers: markers = (None, 'o', None, '^', None, 's', None, 'p') numstyles = len(linestyles) fig, ax = plt.subplots(figsize=figsize) divideFactor = smooth for i in range(len(xdata)): markeroffset = floor(max(xdata[i]) / 20 + i * 2) if divideFactor > 1 and len(ydata[i]) >= divideFactor: win_len = floor(len(ydata[i]) / divideFactor) win_len += (win_len + 1) % 2 if win_len < 5: win_len = 5 yfiltered = savgol_filter(ydata[i], win_len, 3) else: yfiltered = ydata[i] ax.plot(xdata[i], yfiltered, label=self._labels[i], linestyle=linestyles[i % numstyles], marker=markers[i % numstyles], markevery=markeroffset, zorder=2 if markers[i % numstyles] is None else 0) ax.set_ylabel('Time [ms]', fontsize=fontsize) ax.set_xlabel('Queue size', fontsize=fontsize) ax.set_title(title) if legend: ax.legend() ax.set_yscale(scale) ax.set_ylim(top=ylim[1], bottom=ylim[0], emit=True, auto=False) ax.set_xlim(left=xlim[0], right=xlim[1], emit=True, auto=False) plt.grid(linestyle=':', color='gray', zorder=0) plt.setp(plt.gca().get_legend().get_texts(), fontsize=fontsize) plt.show() ff = PdfPages(output) ff.savefig(fig) ff.close() def distribution_scatter_plot(self, xdata, ydata, title='', scale='linear', xlim=(0, 1.05), ylim=(0, 1.05), figsize=(7, 5), alpha=0.005, output='Output.pdf'): """ Creates a distribution scatter plot for the system's resource efficiency. The X values represent the amount of used nodes in a certain time step, while the Y values represent the fraction of used resources in such nodes. Darker areas of the plot represent values with higher frequency. The method creates one plot per test instance, automatically. :param xdata: :param ydata: :param alpha: the alpha to be used for each dot in the plot; :param title: the title of the plot; :param scale: the scale of the plot; :param xlim: the left-right boundaries for the plot, is a tuple; :param ylim: the bottom-top boundaries for the plot, is a tuple; :param figsize: the size of the figure, is a tuple; :param output: the path to the output files: the label for each test instance will be automatically added for each file; """ for i in range(len(xdata)): fig, ax = plt.subplots(figsize=figsize) ax.scatter(xdata[i], ydata[i], color='black', alpha=alpha, s=5) ax.set_title(title) ax.set_xlabel('Used Nodes') ax.set_ylabel('Used Resources') ax.set_yscale(scale) ax.set_ylim(top=ylim[1], bottom=ylim[0], emit=True, auto=False) ax.set_xlim(left=xlim[0], right=xlim[1], emit=True, auto=False) ax.grid(True) plt.show() splitoutput = splitext(output) ff = PdfPages(splitoutput[0] + '-' + self._labels[i] + '.pdf') ff.savefig(fig) ff.close() def get_preprocessed_benchmark_data(self): """ Returns all of the pre-processed benchmark-related data. A tuple is returned; each element of the tuple is related to a specific kind of metric that was processed. Also, each element of the tuple is a list, with as many entries as the files that were processed, in the same order. Each element of these lists contains then the data related to a specific metric, for a specific test instance. All data is stored in standard Python lists. :return: a tuple in which every element is a list containing, in each element, a specific kind of data regarding one of the test instances. The tuple contains, in this order: - the resource usage statistics' dictionaries; - the lists of dispatching times for each time step; - the lists of management times for each time step; - the lists of memory usage values for each time step; - the X scalability data containing the queue size for each test instance; - the Y scalability data containing the average dispatching times for each test instance; """ if not self._preprocessed or self._plot_class != self.BENCHMARK_CLASS: return None, None, None, None, None, None else: return self._simdata, self._schedtimes, self._mantimes, self._simmemory, self._scalabilitydataX, self._scalabilitydataY def get_preprocessed_schedule_data(self): """ Returns all of the pre-processed schedule-related data. A tuple is returned; each element of the tuple is related to a specific kind of metric that was processed. Also, each element of the tuple is a list, with as many entries as the files that were processed, in the same order. Each element of these lists contains then the data related to a specific metric, for a specific test instance. All data is stored in standard Python lists. :return: a tuple in which every element is a list containing, in each element, the data regarding one of the test instances. The tuple contains, in this order: - the slowdown values for jobs; - the queue sizes for all time steps; - the resource allocation efficiencies for all jobs; - the X data regarding the load ratios (fraction of used nodes) for all time steps; - the Y data regarding the load ratios (fraction of used resources) for all time steps; """ if not self._preprocessed or self._plot_class != self.SCHEDULE_CLASS: return None, None, None, None, None else: return self._slowdowns, self._queuesizes, self._efficiencies, self._loadratiosX, self._loadratiosY if __name__ == '__main__': # This is an example. It should not be executed here, but in a script in the project's root, where also # basic_example.py is, so that all imports can be resolved correctly. resultpath = ['Path/to/benchmark/file', 'Path/to/benchmark/file2'] resultlabel = ['Label', 'Label2'] plots = PlotFactory('benchmark') plots.set_files(resultpath, resultlabel) plots.pre_process() plots.produce_plot(type='scalability', title='My Scalability Plot')
mit
dmsimard/ansible
test/support/windows-integration/plugins/action/win_template.py
269
1198
# (c) 2012-2014, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.plugins.action import ActionBase from ansible.plugins.action.template import ActionModule as TemplateActionModule # Even though TemplateActionModule inherits from ActionBase, we still need to # directly inherit from ActionBase to appease the plugin loader. class ActionModule(TemplateActionModule, ActionBase): DEFAULT_NEWLINE_SEQUENCE = '\r\n'
gpl-3.0
asterisk/ari-py
ari/client.py
1
11612
# # Copyright (c) 2013, Digium, Inc. # """ARI client library. """ import json import logging import urlparse import swaggerpy.client from ari.model import * log = logging.getLogger(__name__) class Client(object): """ARI Client object. :param base_url: Base URL for accessing Asterisk. :param http_client: HTTP client interface. """ def __init__(self, base_url, http_client): url = urlparse.urljoin(base_url, "ari/api-docs/resources.json") self.swagger = swaggerpy.client.SwaggerClient( url, http_client=http_client) self.repositories = { name: Repository(self, name, api) for (name, api) in self.swagger.resources.items()} # Extract models out of the events resource events = [api['api_declaration'] for api in self.swagger.api_docs['apis'] if api['name'] == 'events'] if events: self.event_models = events[0]['models'] else: self.event_models = {} self.websockets = set() self.event_listeners = {} self.exception_handler = \ lambda ex: log.exception("Event listener threw exception") def __getattr__(self, item): """Exposes repositories as fields of the client. :param item: Field name """ repo = self.get_repo(item) if not repo: raise AttributeError( "'%r' object has no attribute '%s'" % (self, item)) return repo def close(self): """Close this ARI client. This method will close any currently open WebSockets, and close the underlying Swaggerclient. """ for ws in self.websockets: ws.send_close() self.swagger.close() def get_repo(self, name): """Get a specific repo by name. :param name: Name of the repo to get :return: Repository, or None if not found. :rtype: ari.model.Repository """ return self.repositories.get(name) def __run(self, ws): """Drains all messages from a WebSocket, sending them to the client's listeners. :param ws: WebSocket to drain. """ # TypeChecker false positive on iter(callable, sentinel) -> iterator # Fixed in plugin v3.0.1 # noinspection PyTypeChecker for msg_str in iter(lambda: ws.recv(), None): msg_json = json.loads(msg_str) if not isinstance(msg_json, dict) or 'type' not in msg_json: log.error("Invalid event: %s" % msg_str) continue listeners = list(self.event_listeners.get(msg_json['type'], [])) for listener in listeners: # noinspection PyBroadException try: callback, args, kwargs = listener args = args or () kwargs = kwargs or {} callback(msg_json, *args, **kwargs) except Exception as e: self.exception_handler(e) def run(self, apps): """Connect to the WebSocket and begin processing messages. This method will block until all messages have been received from the WebSocket, or until this client has been closed. :param apps: Application (or list of applications) to connect for :type apps: str or list of str """ if isinstance(apps, list): apps = ','.join(apps) ws = self.swagger.events.eventWebsocket(app=apps) self.websockets.add(ws) try: self.__run(ws) finally: ws.close() self.websockets.remove(ws) def on_event(self, event_type, event_cb, *args, **kwargs): """Register callback for events with given type. :param event_type: String name of the event to register for. :param event_cb: Callback function :type event_cb: (dict) -> None :param args: Arguments to pass to event_cb :param kwargs: Keyword arguments to pass to event_cb """ listeners = self.event_listeners.setdefault(event_type, list()) for cb in listeners: if event_cb == cb[0]: listeners.remove(cb) callback_obj = (event_cb, args, kwargs) listeners.append(callback_obj) client = self class EventUnsubscriber(object): """Class to allow events to be unsubscribed. """ def close(self): """Unsubscribe the associated event callback. """ if callback_obj in client.event_listeners[event_type]: client.event_listeners[event_type].remove(callback_obj) return EventUnsubscriber() def on_object_event(self, event_type, event_cb, factory_fn, model_id, *args, **kwargs): """Register callback for events with the given type. Event fields of the given model_id type are passed along to event_cb. If multiple fields of the event have the type model_id, a dict is passed mapping the field name to the model object. :param event_type: String name of the event to register for. :param event_cb: Callback function :type event_cb: (Obj, dict) -> None or (dict[str, Obj], dict) -> :param factory_fn: Function for creating Obj from JSON :param model_id: String id for Obj from Swagger models. :param args: Arguments to pass to event_cb :param kwargs: Keyword arguments to pass to event_cb """ # Find the associated model from the Swagger declaration event_model = self.event_models.get(event_type) if not event_model: raise ValueError("Cannot find event model '%s'" % event_type) # Extract the fields that are of the expected type obj_fields = [k for (k, v) in event_model['properties'].items() if v['type'] == model_id] if not obj_fields: raise ValueError("Event model '%s' has no fields of type %s" % (event_type, model_id)) def extract_objects(event, *args, **kwargs): """Extract objects of a given type from an event. :param event: Event :param args: Arguments to pass to the event callback :param kwargs: Keyword arguments to pass to the event callback """ # Extract the fields which are of the expected type obj = {obj_field: factory_fn(self, event[obj_field]) for obj_field in obj_fields if event.get(obj_field)} # If there's only one field in the schema, just pass that along if len(obj_fields) == 1: if obj: obj = obj.values()[0] else: obj = None event_cb(obj, event, *args, **kwargs) return self.on_event(event_type, extract_objects, *args, **kwargs) def on_channel_event(self, event_type, fn, *args, **kwargs): """Register callback for Channel related events :param event_type: String name of the event to register for. :param fn: Callback function :type fn: (Channel, dict) -> None or (list[Channel], dict) -> None :param args: Arguments to pass to fn :param kwargs: Keyword arguments to pass to fn """ return self.on_object_event(event_type, fn, Channel, 'Channel', *args, **kwargs) def on_bridge_event(self, event_type, fn, *args, **kwargs): """Register callback for Bridge related events :param event_type: String name of the event to register for. :param fn: Callback function :type fn: (Bridge, dict) -> None or (list[Bridge], dict) -> None :param args: Arguments to pass to fn :param kwargs: Keyword arguments to pass to fn """ return self.on_object_event(event_type, fn, Bridge, 'Bridge', *args, **kwargs) def on_playback_event(self, event_type, fn, *args, **kwargs): """Register callback for Playback related events :param event_type: String name of the event to register for. :param fn: Callback function :type fn: (Playback, dict) -> None or (list[Playback], dict) -> None :param args: Arguments to pass to fn :param kwargs: Keyword arguments to pass to fn """ return self.on_object_event(event_type, fn, Playback, 'Playback', *args, **kwargs) def on_live_recording_event(self, event_type, fn, *args, **kwargs): """Register callback for LiveRecording related events :param event_type: String name of the event to register for. :param fn: Callback function :type fn: (LiveRecording, dict) -> None or (list[LiveRecording], dict) -> None :param args: Arguments to pass to fn :param kwargs: Keyword arguments to pass to fn """ return self.on_object_event(event_type, fn, LiveRecording, 'LiveRecording', *args, **kwargs) def on_stored_recording_event(self, event_type, fn, *args, **kwargs): """Register callback for StoredRecording related events :param event_type: String name of the event to register for. :param fn: Callback function :type fn: (StoredRecording, dict) -> None or (list[StoredRecording], dict) -> None :param args: Arguments to pass to fn :param kwargs: Keyword arguments to pass to fn """ return self.on_object_event(event_type, fn, StoredRecording, 'StoredRecording', *args, **kwargs) def on_endpoint_event(self, event_type, fn, *args, **kwargs): """Register callback for Endpoint related events :param event_type: String name of the event to register for. :param fn: Callback function :type fn: (Endpoint, dict) -> None or (list[Endpoint], dict) -> None :param args: Arguments to pass to fn :param kwargs: Keyword arguments to pass to fn """ return self.on_object_event(event_type, fn, Endpoint, 'Endpoint', *args, **kwargs) def on_device_state_event(self, event_type, fn, *args, **kwargs): """Register callback for DeviceState related events :param event_type: String name of the event to register for. :param fn: Callback function :type fn: (DeviceState, dict) -> None or (list[DeviceState], dict) -> None :param args: Arguments to pass to fn :param kwargs: Keyword arguments to pass to fn """ return self.on_object_event(event_type, fn, DeviceState, 'DeviceState', *args, **kwargs) def on_sound_event(self, event_type, fn, *args, **kwargs): """Register callback for Sound related events :param event_type: String name of the event to register for. :param fn: Sound function :type fn: (Sound, dict) -> None or (list[Sound], dict) -> None :param args: Arguments to pass to fn :param kwargs: Keyword arguments to pass to fn """ return self.on_object_event(event_type, fn, Sound, 'Sound', *args, **kwargs)
bsd-3-clause
KISSMonX/micropython
tests/extmod/uctypes_native_le.py
10
2037
# This test is exactly like uctypes_le.py, but uses native structure layout. # Codepaths for packed vs native structures are different. This test only works # on little-endian machine (no matter if 32 or 64 bit). import sys import uctypes if sys.byteorder != "little": print("SKIP") sys.exit() desc = { "s0": uctypes.UINT16 | 0, "sub": (0, { "b0": uctypes.UINT8 | 0, "b1": uctypes.UINT8 | 1, }), "arr": (uctypes.ARRAY | 0, uctypes.UINT8 | 2), "arr2": (uctypes.ARRAY | 0, 2, {"b": uctypes.UINT8 | 0}), "bitf0": uctypes.BFUINT16 | 0 | 0 << uctypes.BF_POS | 8 << uctypes.BF_LEN, "bitf1": uctypes.BFUINT16 | 0 | 8 << uctypes.BF_POS | 8 << uctypes.BF_LEN, "bf0": uctypes.BFUINT16 | 0 | 0 << uctypes.BF_POS | 4 << uctypes.BF_LEN, "bf1": uctypes.BFUINT16 | 0 | 4 << uctypes.BF_POS | 4 << uctypes.BF_LEN, "bf2": uctypes.BFUINT16 | 0 | 8 << uctypes.BF_POS | 4 << uctypes.BF_LEN, "bf3": uctypes.BFUINT16 | 0 | 12 << uctypes.BF_POS | 4 << uctypes.BF_LEN, "ptr": (uctypes.PTR | 0, uctypes.UINT8), "ptr2": (uctypes.PTR | 0, {"b": uctypes.UINT8 | 0}), } data = bytearray(b"01") S = uctypes.struct(desc, uctypes.addressof(data), uctypes.NATIVE) #print(S) print(hex(S.s0)) assert hex(S.s0) == "0x3130" #print(S.sub.b0) print(S.sub.b0, S.sub.b1) assert S.sub.b0, S.sub.b1 == (0x30, 0x31) try: S[0] assert False, "Can't index struct" except TypeError: print("TypeError") print("arr:", S.arr[0], S.arr[1]) assert (S.arr[0], S.arr[1]) == (0x30, 0x31) print("arr of struct:", S.arr2[0].b, S.arr2[1].b) assert (S.arr2[0].b, S.arr2[1].b) == (0x30, 0x31) try: S.arr[2] assert False, "Out of bounds index" except IndexError: print("IndexError") print("bf:", S.bitf0, S.bitf1) assert (S.bitf0, S.bitf1) == (0x30, 0x31) print("bf 4bit:", S.bf3, S.bf2, S.bf1, S.bf0) assert (S.bf3, S.bf2, S.bf1, S.bf0) == (3, 1, 3, 0) # Write access S.sub.b0 = ord("2") print(data) assert bytes(data) == b"21" S.bf3 = 5 print(data) assert bytes(data) == b"2Q"
mit
williamsandrew/kubernetes
hack/jenkins/test-history/gen_html_test.py
12
2491
#!/usr/bin/env python # Copyright 2016 The Kubernetes Authors All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for gen_html.""" import json import os import shutil import tempfile import unittest import gen_html TEST_DATA = { "test1": {"kubernetes-release": [{"build": 3, "failed": False, "time": 3.52}, {"build": 4, "failed": True, "time": 63.21}], "kubernetes-debug": [{"build": 5, "failed": False, "time": 7.56}, {"build": 6, "failed": False, "time": 8.43}], }, "test2": {"kubernetes-debug": [{"build": 6, "failed": True, "time": 3.53}]}, } class GenHtmlTest(unittest.TestCase): def gen_html(self, *args): return gen_html.gen_html(TEST_DATA, *args)[0] def testGenHtml(self): html = self.gen_html('') self.assertIn("test1", html) self.assertIn("test2", html) self.assertIn("release", html) self.assertIn("debug", html) def testGenHtmlFilter(self): html = self.gen_html('release') self.assertIn("release", html) self.assertIn('skipped">\ntest2', html) self.assertNotIn("debug", html) def testGenHtmlFilterExact(self): html = self.gen_html('release', True) self.assertNotIn('debug', html) def testMain(self): temp_dir = tempfile.mkdtemp(prefix='kube-test-hist-') try: tests_json = os.path.join(temp_dir, 'tests.json') with open(tests_json, 'w') as f: json.dump(TEST_DATA, f) gen_html.main(['--suites', '--prefixes', ',rel,deb', '--output-dir', temp_dir, '--input', tests_json]) for page in ('index', 'suite-kubernetes-debug', 'tests', 'tests-rel', 'tests-deb'): self.assertTrue(os.path.exists('%s/%s.html' % (temp_dir, page))) finally: shutil.rmtree(temp_dir) if __name__ == '__main__': unittest.main()
apache-2.0
cjhak/b2share
invenio/ext/session/legacy_session.py
12
3341
# -*- coding: utf-8 -*- # This file is part of Invenio. # Copyright (C) 2011, 2012, 2013, 2014, 2015 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Implementation of legacy Invenio methods for Flask session.""" from flask import current_app, request from flask.sessions import SessionMixin from flask_login import current_user from werkzeug.datastructures import CallbackDict class Session(CallbackDict, SessionMixin): """Implement compatible legacy Invenio session.""" def __init__(self, initial=None, sid=None): """Initialize session with optional default value.""" self.sid = sid self.logging_in = False self.modified = initial is not None def _on_update(d): d.modified = True CallbackDict.__init__(self, initial, _on_update) def need_https(self): """Check if the user was previously authenticated. If True session identifier need to be sent via HTTPS. """ return request.cookies.get( current_app.session_cookie_name + 'stub', 'NO') == 'HTTPS' def delete(self, clear=True): """Delete the session.""" if clear: self.clear() def invalidate(self): """Declare the session as invalid.""" self._invalid = 1 def set_remember_me(self, remember_me=True): """Set or unset the ``_remember_me`` flag. :param remember_me: True if the session cookie should last one day or until the browser is closed. """ self._remember_me = remember_me self['_permanent'] = remember_me def save_ip(self, request): """Save IP for current scheme.""" remote_ip = request.remote_addr scheme_a = '_http_ip' if request.scheme == 'http' else '_https_ip' scheme_b = '_https_ip' if request.scheme == 'http' else '_http_ip' if scheme_a not in self: self[scheme_a] = remote_ip if scheme_b not in self: self[scheme_b] = None def check_ip(self, request): """Check that session is used from the same IP where it was created.""" remote_ip = request.remote_addr if self.get('_{0}_ip'.format(request.scheme), remote_ip) != remote_ip: return False return True def _get_uid(self): return self.get('user_id', -1) def _set_uid(self, uid): if self.get('user_id') != uid: self.logging_in = True self['user_id'] = self['_uid'] = self['uid'] = uid def _get_user_info(self): return current_user uid = property(_get_uid, _set_uid) user_info = property(_get_user_info) del _get_uid, _set_uid, _get_user_info
gpl-2.0
motion2015/a3
openedx/core/djangoapps/course_groups/migrations/0003_auto__add_coursecohort__add_coursecohortssettings.py
100
7464
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'CourseCohort' db.create_table('course_groups_coursecohort', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('course_user_group', self.gf('django.db.models.fields.related.OneToOneField')(related_name='cohort', unique=True, to=orm['course_groups.CourseUserGroup'])), ('assignment_type', self.gf('django.db.models.fields.CharField')(default='manual', max_length=20)), )) db.send_create_signal('course_groups', ['CourseCohort']) # Adding model 'CourseCohortsSettings' db.create_table('course_groups_coursecohortssettings', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('is_cohorted', self.gf('django.db.models.fields.BooleanField')(default=False)), ('course_id', self.gf('xmodule_django.models.CourseKeyField')(unique=True, max_length=255, db_index=True)), ('cohorted_discussions', self.gf('django.db.models.fields.TextField')(null=True, blank=True)), ('always_cohort_inline_discussions', self.gf('django.db.models.fields.BooleanField')(default=True)), )) db.send_create_signal('course_groups', ['CourseCohortsSettings']) def backwards(self, orm): # Deleting model 'CourseCohort' db.delete_table('course_groups_coursecohort') # Deleting model 'CourseCohortsSettings' db.delete_table('course_groups_coursecohortssettings') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'course_groups.coursecohort': { 'Meta': {'object_name': 'CourseCohort'}, 'assignment_type': ('django.db.models.fields.CharField', [], {'default': "'manual'", 'max_length': '20'}), 'course_user_group': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'cohort'", 'unique': 'True', 'to': "orm['course_groups.CourseUserGroup']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'course_groups.coursecohortssettings': { 'Meta': {'object_name': 'CourseCohortsSettings'}, 'always_cohort_inline_discussions': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'cohorted_discussions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_cohorted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'course_groups.courseusergroup': { 'Meta': {'unique_together': "(('name', 'course_id'),)", 'object_name': 'CourseUserGroup'}, 'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}), 'group_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'users': ('django.db.models.fields.related.ManyToManyField', [], {'db_index': 'True', 'related_name': "'course_groups'", 'symmetrical': 'False', 'to': "orm['auth.User']"}) }, 'course_groups.courseusergrouppartitiongroup': { 'Meta': {'object_name': 'CourseUserGroupPartitionGroup'}, 'course_user_group': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['course_groups.CourseUserGroup']", 'unique': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'group_id': ('django.db.models.fields.IntegerField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'partition_id': ('django.db.models.fields.IntegerField', [], {}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) } } complete_apps = ['course_groups']
agpl-3.0
JamesLiAndroid/django-blog-practice
blogproject/blog/migrations/0001_initial.py
1
1880
# -*- coding: utf-8 -*- # Generated by Django 1.11.3 on 2017-07-14 06:16 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ], ), migrations.CreateModel( name='Post', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=70)), ('body', models.TextField()), ('create_time', models.DateTimeField()), ('modified_time', models.DateTimeField()), ('excerpt', models.CharField(blank=True, max_length=200)), ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category')), ], ), migrations.CreateModel( name='Tag', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ], ), migrations.AddField( model_name='post', name='tags', field=models.ManyToManyField(blank=True, to='blog.Tag'), ), ]
gpl-3.0
drufat/sympy
sympy/geometry/tests/test_point.py
17
8537
from __future__ import division from sympy import I, Rational, Symbol, pi, sqrt from sympy.geometry import Line, Point, Point2D, Point3D, Line3D from sympy.geometry.entity import rotate, scale, translate from sympy.matrices import Matrix from sympy.utilities.pytest import raises def test_point(): x = Symbol('x', real=True) y = Symbol('y', real=True) x1 = Symbol('x1', real=True) x2 = Symbol('x2', real=True) y1 = Symbol('y1', real=True) y2 = Symbol('y2', real=True) half = Rational(1, 2) p1 = Point(x1, x2) p2 = Point(y1, y2) p3 = Point(0, 0) p4 = Point(1, 1) p5 = Point(0, 1) assert p1 in p1 assert p1 not in p2 assert p2.y == y2 assert (p3 + p4) == p4 assert (p2 - p1) == Point(y1 - x1, y2 - x2) assert p4*5 == Point(5, 5) assert -p2 == Point(-y1, -y2) raises(ValueError, lambda: Point(3, I)) raises(ValueError, lambda: Point(2*I, I)) raises(ValueError, lambda: Point(3 + I, I)) assert Point(34.05, sqrt(3)) == Point(Rational(681, 20), sqrt(3)) assert Point.midpoint(p3, p4) == Point(half, half) assert Point.midpoint(p1, p4) == Point(half + half*x1, half + half*x2) assert Point.midpoint(p2, p2) == p2 assert p2.midpoint(p2) == p2 assert Point.distance(p3, p4) == sqrt(2) assert Point.distance(p1, p1) == 0 assert Point.distance(p3, p2) == sqrt(p2.x**2 + p2.y**2) assert Point.taxicab_distance(p4, p3) == 2 p1_1 = Point(x1, x1) p1_2 = Point(y2, y2) p1_3 = Point(x1 + 1, x1) assert Point.is_collinear(p3) assert Point.is_collinear(p3, p4) assert Point.is_collinear(p3, p4, p1_1, p1_2) assert Point.is_collinear(p3, p4, p1_1, p1_3) is False assert Point.is_collinear(p3, p3, p4, p5) is False line = Line(Point(1,0), slope = 1) raises(TypeError, lambda: Point.is_collinear(line)) raises(TypeError, lambda: p1_1.is_collinear(line)) assert p3.intersection(Point(0, 0)) == [p3] assert p3.intersection(p4) == [] x_pos = Symbol('x', real=True, positive=True) p2_1 = Point(x_pos, 0) p2_2 = Point(0, x_pos) p2_3 = Point(-x_pos, 0) p2_4 = Point(0, -x_pos) p2_5 = Point(x_pos, 5) assert Point.is_concyclic(p2_1) assert Point.is_concyclic(p2_1, p2_2) assert Point.is_concyclic(p2_1, p2_2, p2_3, p2_4) assert Point.is_concyclic(p2_1, p2_2, p2_3, p2_5) is False assert Point.is_concyclic(p4, p4 * 2, p4 * 3) is False assert p4.scale(2, 3) == Point(2, 3) assert p3.scale(2, 3) == p3 assert p4.rotate(pi, Point(0.5, 0.5)) == p3 assert p1.__radd__(p2) == p1.midpoint(p2).scale(2, 2) assert (-p3).__rsub__(p4) == p3.midpoint(p4).scale(2, 2) assert p4 * 5 == Point(5, 5) assert p4 / 5 == Point(0.2, 0.2) raises(ValueError, lambda: Point(0, 0) + 10) # Point differences should be simplified assert Point(x*(x - 1), y) - Point(x**2 - x, y + 1) == Point(0, -1) a, b = Rational(1, 2), Rational(1, 3) assert Point(a, b).evalf(2) == \ Point(a.n(2), b.n(2)) raises(ValueError, lambda: Point(1, 2) + 1) # test transformations p = Point(1, 0) assert p.rotate(pi/2) == Point(0, 1) assert p.rotate(pi/2, p) == p p = Point(1, 1) assert p.scale(2, 3) == Point(2, 3) assert p.translate(1, 2) == Point(2, 3) assert p.translate(1) == Point(2, 1) assert p.translate(y=1) == Point(1, 2) assert p.translate(*p.args) == Point(2, 2) # Check invalid input for transform raises(ValueError, lambda: p3.transform(p3)) raises(ValueError, lambda: p.transform(Matrix([[1, 0], [0, 1]]))) def test_point3D(): x = Symbol('x', real=True) y = Symbol('y', real=True) x1 = Symbol('x1', real=True) x2 = Symbol('x2', real=True) x3 = Symbol('x3', real=True) y1 = Symbol('y1', real=True) y2 = Symbol('y2', real=True) y3 = Symbol('y3', real=True) half = Rational(1, 2) p1 = Point3D(x1, x2, x3) p2 = Point3D(y1, y2, y3) p3 = Point3D(0, 0, 0) p4 = Point3D(1, 1, 1) p5 = Point3D(0, 1, 2) assert p1 in p1 assert p1 not in p2 assert p2.y == y2 assert (p3 + p4) == p4 assert (p2 - p1) == Point3D(y1 - x1, y2 - x2, y3 - x3) assert p4*5 == Point3D(5, 5, 5) assert -p2 == Point3D(-y1, -y2, -y3) assert Point(34.05, sqrt(3)) == Point(Rational(681, 20), sqrt(3)) assert Point3D.midpoint(p3, p4) == Point3D(half, half, half) assert Point3D.midpoint(p1, p4) == Point3D(half + half*x1, half + half*x2, half + half*x3) assert Point3D.midpoint(p2, p2) == p2 assert p2.midpoint(p2) == p2 assert Point3D.distance(p3, p4) == sqrt(3) assert Point3D.distance(p1, p1) == 0 assert Point3D.distance(p3, p2) == sqrt(p2.x**2 + p2.y**2 + p2.z**2) p1_1 = Point3D(x1, x1, x1) p1_2 = Point3D(y2, y2, y2) p1_3 = Point3D(x1 + 1, x1, x1) # according to the description in the docs, points are collinear # if they like on a single line. Thus a single point should always # be collinear assert Point3D.are_collinear(p3) assert Point3D.are_collinear(p3, p4) assert Point3D.are_collinear(p3, p4, p1_1, p1_2) assert Point3D.are_collinear(p3, p4, p1_1, p1_3) is False assert Point3D.are_collinear(p3, p3, p4, p5) is False assert p3.intersection(Point3D(0, 0, 0)) == [p3] assert p3.intersection(p4) == [] assert p4 * 5 == Point3D(5, 5, 5) assert p4 / 5 == Point3D(0.2, 0.2, 0.2) raises(ValueError, lambda: Point3D(0, 0, 0) + 10) # Point differences should be simplified assert Point3D(x*(x - 1), y, 2) - Point3D(x**2 - x, y + 1, 1) == \ Point3D(0, -1, 1) a, b = Rational(1, 2), Rational(1, 3) assert Point(a, b).evalf(2) == \ Point(a.n(2), b.n(2)) raises(ValueError, lambda: Point(1, 2) + 1) # test transformations p = Point3D(1, 1, 1) assert p.scale(2, 3) == Point3D(2, 3, 1) assert p.translate(1, 2) == Point3D(2, 3, 1) assert p.translate(1) == Point3D(2, 1, 1) assert p.translate(z=1) == Point3D(1, 1, 2) assert p.translate(*p.args) == Point3D(2, 2, 2) # Test __new__ assert Point3D(Point3D(1, 2, 3), 4, 5, evaluate=False) == Point3D(1, 2, 3) # Test length property returns correctly assert p.length == 0 assert p1_1.length == 0 assert p1_2.length == 0 # Test are_colinear type error raises(TypeError, lambda: Point3D.are_collinear(p, x)) # Test are_coplanar planar2 = Point3D(1, -1, 1) planar3 = Point3D(-1, 1, 1) assert Point3D.are_coplanar(p, planar2, planar3) == True assert Point3D.are_coplanar(p, planar2, planar3, p3) == False raises(ValueError, lambda: Point3D.are_coplanar(p, planar2)) planar2 = Point3D(1, 1, 2) planar3 = Point3D(1, 1, 3) raises(ValueError, lambda: Point3D.are_coplanar(p, planar2, planar3)) # Test Intersection assert planar2.intersection(Line3D(p, planar3)) == [Point3D(1, 1, 2)] # Test Scale assert planar2.scale(1, 1, 1) == planar2 assert planar2.scale(2, 2, 2, planar3) == Point3D(1, 1, 1) assert planar2.scale(1, 1, 1, p3) == planar2 # Test Transform identity = Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) assert p.transform(identity) == p trans = Matrix([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1], [0, 0, 0, 1]]) assert p.transform(trans) == Point3D(2, 2, 2) raises(ValueError, lambda: p.transform(p)) raises(ValueError, lambda: p.transform(Matrix([[1, 0], [0, 1]]))) # Test Equals assert p.equals(x1) == False # Test __sub__ p_2d = Point(0, 0) raises(ValueError, lambda: (p - p_2d)) def test_Point2D(): # Test Distance p1 = Point2D(1, 5) p2 = Point2D(4, 2.5) p3 = (6, 3) assert p1.distance(p2) == sqrt(61)/2 assert p2.distance(p3) == sqrt(17)/2 def test_issue_9214(): p1 = Point3D(4, -2, 6) p2 = Point3D(1, 2, 3) p3 = Point3D(7, 2, 3) assert Point3D.are_collinear(p1, p2, p3) is False def test_transform(): p = Point(1, 1) assert p.transform(rotate(pi/2)) == Point(-1, 1) assert p.transform(scale(3, 2)) == Point(3, 2) assert p.transform(translate(1, 2)) == Point(2, 3) assert Point(1, 1).scale(2, 3, (4, 5)) == \ Point(-2, -7) assert Point(1, 1).translate(4, 5) == \ Point(5, 6) def test_concyclic_doctest_bug(): p1, p2 = Point(-1, 0), Point(1, 0) p3, p4 = Point(0, 1), Point(-1, 2) assert Point.is_concyclic(p1, p2, p3) assert not Point.is_concyclic(p1, p2, p3, p4)
bsd-3-clause
sfinktah/iTerm2
tools/ply/ply-3.4/test/yacc_error4.py
174
1562
# ----------------------------------------------------------------------------- # yacc_error4.py # # Attempt to define a rule named 'error' # ----------------------------------------------------------------------------- import sys if ".." not in sys.path: sys.path.insert(0,"..") import ply.yacc as yacc from calclex import tokens # Parsing rules precedence = ( ('left','PLUS','MINUS'), ('left','TIMES','DIVIDE'), ('right','UMINUS'), ) # dictionary of names names = { } def p_statement_assign(t): 'statement : NAME EQUALS expression' names[t[1]] = t[3] def p_statement_expr(t): 'statement : expression' print(t[1]) def p_expression_binop(t): '''expression : expression PLUS expression | expression MINUS expression | expression TIMES expression | expression DIVIDE expression''' if t[2] == '+' : t[0] = t[1] + t[3] elif t[2] == '-': t[0] = t[1] - t[3] elif t[2] == '*': t[0] = t[1] * t[3] elif t[2] == '/': t[0] = t[1] / t[3] def p_expression_uminus(t): 'expression : MINUS expression %prec UMINUS' t[0] = -t[2] def p_expression_group(t): 'expression : LPAREN expression RPAREN' t[0] = t[2] def p_expression_number(t): 'expression : NUMBER' t[0] = t[1] def p_expression_name(t): 'expression : NAME' try: t[0] = names[t[1]] except LookupError: print("Undefined name '%s'" % t[1]) t[0] = 0 def p_error_handler(t): 'error : NAME' pass def p_error(t): pass yacc.yacc()
gpl-2.0
grocsvs/grocsvs
src/grocsvs/stages/refine_grid_search_breakpoints.py
1
13463
# FOR GRID SEARCH CANDIDATES import itertools import numpy import os import pandas import scipy.stats from grocsvs import step from grocsvs import structuralvariants from grocsvs.stages import sv_candidates class CombineRefinedBreakpointsStep(step.StepChunk): @staticmethod def get_steps(options): yield CombineRefinedBreakpointsStep(options) def __init__(self, options): self.options = options def __str__(self): return ".".join([self.__class__.__name__]) def outpaths(self, final): directory = self.results_dir if final \ else self.working_dir paths = { "refined_pairs": os.path.join(directory, "refined_pairs.tsv") } return paths def run(self): inputs = [] chroms = self.options.reference.chroms for chromx, chromy in itertools.product(chroms, chroms): if self.options.reference.compare_chroms(chromx, chromy) < 0: continue input_step = RefineGridSearchBreakpointsStep(self.options, chromx, chromy) inpath = input_step.outpaths(final=True)["refined_pairs"] try: inputs.append(pandas.read_table(inpath)) except pandas.io.common.EmptyDataError: pass if len(inputs) == 0: raise Exception("No candidate SVs discovered.") combined = pandas.concat(inputs) combined["chromx"] = combined["chromx"].astype("string") combined["chromy"] = combined["chromy"].astype("string") combined.to_csv(self.outpaths(final=False)["refined_pairs"], sep="\t", index=False) class RefineGridSearchBreakpointsStep(step.StepChunk): """ Takes the rough grid search (aka barcode overlaps) candidates, then performs breakpoint refinement to find the best potential breakpoint in the expected orientation """ # TODO: refactor so that this and the final refine breakpoints steps share # most of the refinement code @staticmethod def get_steps(options): chroms = options.reference.chroms for chromx, chromy in itertools.product(chroms, chroms): if options.reference.compare_chroms(chromx, chromy) < 0: continue yield RefineGridSearchBreakpointsStep(options, chromx, chromy) def __init__(self, options, chromx, chromy): self.options = options self.chromx = chromx self.chromy = chromy def __str__(self): return ".".join([self.__class__.__name__, self.chromx, self.chromy]) def outpaths(self, final): directory = self.results_dir if final \ else self.working_dir paths = { "refined_pairs": os.path.join(directory, "refined_pairs.{}.{}.tsv".format(self.chromx, self.chromy)) } return paths def run(self): outpath = self.outpaths(final=False)["refined_pairs"] events = self.load_events() if len(events) > 0: refined = refine_events(events, self.options, self.logger) refined.to_csv(outpath, sep="\t", index=False) else: open(outpath, "w") def load_events(self): significant_events = [] cur_events = [] for sample, dataset in self.options.iter_10xdatasets(): if self.options.reference.compare_chroms(self.chromx, self.chromy) < 0: continue input_step = sv_candidates.SVCandidatesStep(self.options, sample, dataset, self.chromx, self.chromy) inpath = input_step.outpaths(final=True)["svs"] try: sample_events = pandas.read_table(inpath) if len(sample_events) > 0: cur_events.append(sample_events) except pandas.io.common.EmptyDataError: pass if len(cur_events) > 0: significant_events = combine_nearby_events(pandas.concat(cur_events)) significant_events = significant_events[["chromx", "x", "chromy", "y", "orientation"]] significant_events["chromx"] = significant_events["chromx"].astype("string") significant_events["chromy"] = significant_events["chromy"].astype("string") return significant_events else: return [] def combine_nearby_events(table, max_distance=5000): """ 2d-clustering of breakpoints (ie pairs of breakENDs) """ if len(table) == 0: return table combined_tables = [] table = table.reset_index(drop=True) for orientation, cur_table in table.groupby("orientation"): # it's already a copy, but this will suppress a warning cur_table = cur_table.copy() points = [(row.x, row.y, row.Index) for row in cur_table.itertuples()] clusters = structuralvariants.do_free_clustering(points, max_dist=5000) cur_table["cluster"] = 0 for i, cluster in enumerate(clusters): for point in cluster: cur_table.loc[point[2], "cluster"] = i cur_combined = cur_table.groupby("cluster").aggregate( {"chromx": lambda x:x.iloc[0], "chromy": lambda x:x.iloc[0], "x": numpy.mean, "y": numpy.mean, "orientation": lambda x:x.iloc[0], }) combined_tables.append(cur_combined) combined_table = pandas.concat(combined_tables, ignore_index=True) combined_table["x"] = combined_table["x"].astype(int) combined_table["y"] = combined_table["y"].astype(int) return combined_table def refine_events(events, options, logger): # TODO: gah refinement_dist1 = -20000 refinement_dist2 = 20000 refinement_extend = 20000 quantification_dist1 = -500 quantification_dist2 = 5000 good_bc_counts_by_dataset, barcode_frequencies_by_dataset = get_barcode_info(options) results = [] count = 0 for i, event in events.iterrows(): print ">>>", i, event.dtypes logger.log("{}:{}::{}:{}{}".format(event["chromx"], event["x"], event["chromy"], event["y"], event["orientation"])) if count % 10 == 0: logger.log("{} of {} ({:.0%})".format(count, len(events), count/float(len(events)))) count += 1 # First get better breakpoints refined = refine_breakpoint( event["chromx"], event["x"], event["chromy"], event["y"], event["orientation"], options, refinement_dist1, refinement_dist2, refinement_extend) if refined is None: continue newx, newy = refined # Next quantify the event based on the better breakpoint loci quantification = quantify_breakpoint( event["chromx"], newx, event["chromy"], newy, event["orientation"], options, good_bc_counts_by_dataset, barcode_frequencies_by_dataset, quantification_dist1, quantification_dist2) quantification["original_x"] = event["x"] quantification["original_y"] = event["y"] results.append(quantification) return pandas.DataFrame(results) def get_shared_frags(options, sample, dataset, chromx, x, chromy, y, orientation, dist1, dist2): fragsx, fragsy, merged = structuralvariants.get_supporting_fragments_new( options, sample, dataset, chromx, x, chromy, y, orientation, dist1, dist2) bcx = set(fragsx["bc"]) bcy = set(fragsy["bc"]) common_barcodes = bcx.intersection(bcy) shared_fragsx = fragsx.loc[fragsx["bc"].isin(common_barcodes)] shared_fragsy = fragsy.loc[fragsy["bc"].isin(common_barcodes)] return shared_fragsx, shared_fragsy def refine_breakpoint(chromx, x, chromy, y, orientation, options, dist1, dist2, extend): shared_fragsx = [] shared_fragsy = [] # because all we're concerned with for refinement is the fragments # with common barcodes across the breakpoint, we'll do refinement # with all datasets without worrying if a given dataset supports # the event for sample, dataset in options.iter_10xdatasets(): cur_fragsx, cur_fragsy = get_shared_frags( options, sample, dataset, chromx, x, chromy, y, orientation, dist1, dist2) shared_fragsx.append(cur_fragsx) shared_fragsy.append(cur_fragsy) shared_fragsx = pandas.concat(shared_fragsx) shared_fragsy = pandas.concat(shared_fragsy) if len(shared_fragsx) < 1: return None breakpointx = get_breakpoint(shared_fragsx, x, orientation[0], extend) breakpointy = get_breakpoint(shared_fragsy, y, orientation[1], extend) return breakpointx, breakpointy def get_breakpoint(frags, pos, orientation, extend=20000): density = numpy.zeros(extend*2) for i, frag in frags.iterrows(): curstart = max(frag["start_pos"]-(pos-extend), 0) curend = min(frag["end_pos"]-(pos-extend), len(density)) density[int(curstart):int(curend)] += 1 peaks = numpy.where(density>(0.9*density.max()))[0] if orientation == "+": peak = peaks[0] elif orientation == "-": peak = peaks[-1] else: raise Exception("unknown orientation: {}".format(orientation)) diff = density[peak] - density dist = numpy.sqrt(numpy.abs(numpy.arange(len(density))-peak)) score = numpy.ma.masked_array(diff / dist.astype(float), mask=False) score.mask[numpy.isnan(score)] = True if orientation == "+": score.mask[numpy.arange(0, peak)] = True elif orientation == "-": score.mask[numpy.arange(peak, len(score))] = True else: raise Exception("unknown orientation: {}".format(orientation)) breakpoint = numpy.where(score==score.max())[0][0] breakpoint += pos - extend return breakpoint def get_barcode_info(options): good_bc_counts_by_dataset = {} barcode_frequencies_by_dataset = {} for sample, dataset in options.iter_10xdatasets(): sample_info = options.sample_info(sample.name) dataset_info = sample_info[dataset.id] good_bc_counts_by_dataset[dataset.id] = dataset_info["good_bc_count"] sample_info = options.sample_info(sample.name) dataset_info = sample_info[dataset.id] barcode_frequencies = dataset_info["barcode_read_totals"] barcode_frequencies /= numpy.array(barcode_frequencies.sum()).astype(float) barcode_frequencies = barcode_frequencies.values barcode_frequencies_by_dataset[dataset.id] = barcode_frequencies return good_bc_counts_by_dataset, barcode_frequencies_by_dataset def quantify_breakpoint(chromx, x, chromy, y, orientation, options, good_bc_counts_by_dataset, barcode_frequencies_by_dataset, dist1, dist2, with_phasing=False): cur_result = {} cur_result["chromx"] = chromx cur_result["new_x"] = x cur_result["chromy"] = chromy cur_result["new_y"] = y cur_result["orientation"] = orientation cur_result["shared"] = 0 cur_result["total"] = 0 for sample, dataset in options.iter_10xdatasets(): barcode_frequencies = barcode_frequencies_by_dataset[dataset.id] fragsx, fragsy, merged = structuralvariants.get_supporting_fragments_new( options, sample, dataset, chromx, x, chromy, y, orientation, dist1, dist2, with_phasing=with_phasing) bcx = set(fragsx["bc"]) bcy = set(fragsy["bc"]) common_barcodes = bcx.intersection(bcy) total_barcodes = bcx.union(bcy) cur_result["{}_total".format(sample.name)] = len(total_barcodes) if len(common_barcodes) < 1: continue good_bc_count = good_bc_counts_by_dataset[dataset.id] contingency_table = numpy.array([[len(common_barcodes), len(bcx-bcy)], [len(bcy-bcx), good_bc_count-len(total_barcodes)]]) p_fisher = scipy.stats.fisher_exact(contingency_table, alternative="greater")[1] p_resampling = structuralvariants.score_event( len(bcx), len(bcy), len(common_barcodes), barcode_frequencies, resamples=100) cur_result["{}_shared".format(sample.name)] = len(common_barcodes) cur_result["{}_p_fisher".format(sample.name)] = p_fisher cur_result["{}_p_resampling".format(sample.name)] = p_resampling if with_phasing: cur_result["{}_x_hap0".format(sample.name)] = (merged["hap_x"].isin([0,2])).sum() cur_result["{}_x_hap1".format(sample.name)] = (merged["hap_x"] == 1).sum() cur_result["{}_y_hap0".format(sample.name)] = (merged["hap_y"].isin([0,2])).sum() cur_result["{}_y_hap1".format(sample.name)] = (merged["hap_y"] == 1).sum() # TODO: constants should be constant across steps! if (p_resampling < 1e-4) and (len(common_barcodes)/float(len(total_barcodes)) > 0.10): cur_result["shared"] += len(common_barcodes) cur_result["total"] += len(total_barcodes) cur_result["p_resampling"] = min(cur_result.get("{}_p_resampling".format(sample_name), 1.0) for sample_name in options.samples) return pandas.Series(cur_result)
mit
henn/hil
haas/cli.py
2
22801
# Copyright 2013-2014 Massachusetts Open Cloud Contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the # License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an "AS # IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language # governing permissions and limitations under the License. """This module implements the HaaS command line tool.""" from haas import config, server from haas.config import cfg import inspect import json import os import requests import sys import urllib import schema import abc from functools import wraps command_dict = {} usage_dict = {} MIN_PORT_NUMBER = 1 MAX_PORT_NUMBER = 2**16 - 1 class HTTPClient(object): """An HTTP client. Makes HTTP requests on behalf of the HaaS CLI. Responsible for adding authentication information to the request. """ __metaclass__ = abc.ABCMeta @abc.abstractmethod def request(method, url, data=None): """Make an HTTP request Makes an HTTP request on URL `url` with method `method` and request body `data` (if supplied). May add authentication or other backend-specific information to the request. Parameters ---------- method : str The HTTP method to use, e.g. 'GET', 'PUT', 'POST'... url : str The URL to act on data : str, optional The body of the request Returns ------- requests.Response The HTTP response """ class RequestsHTTPClient(requests.Session, HTTPClient): """An HTTPClient which uses the requests library. Note that this doesn't do anything over `requests.Session`; that class already implements the required interface. We declare it only for clarity. """ class KeystoneHTTPClient(HTTPClient): """An HTTPClient which authenticates with Keystone. This uses an instance of python-keystoneclient's Session class to do its work. """ def __init__(self, session): """Create a KeystoneHTTPClient Parameters ---------- session : keystoneauth1.Session A keystone session to make the requests with """ self.session = session def request(self, method, url, data=None): """Make an HTTP request using keystone for authentication. Smooths over the differences between python-keystoneclient's request method that specified by HTTPClient """ # We have to import this here, since we can't assume the library # is available from global scope. from keystoneauth1.exceptions.http import HttpError try: # The order of these parameters is different that what # we expect, but the names are the same: return self.session.request(method=method, url=url, data=data) except HttpError as e: return e.response # An instance of HTTPClient, which will be used to make the request. http_client = None def cmd(f): """A decorator for CLI commands. This decorator firstly adds the function to a dictionary of valid CLI commands, secondly adds exception handling for when the user passes the wrong number of arguments, and thirdly generates a 'usage' description and puts it in the usage dictionary. """ @wraps(f) def wrapped(*args, **kwargs): try: f(*args, **kwargs) except TypeError: # TODO TypeError is probably too broad here. sys.stderr.write('Invalid arguements. Usage:\n') help(f.__name__) command_dict[f.__name__] = wrapped def get_usage(f): args, varargs, _, _ = inspect.getargspec(f) showee = [f.__name__] + ['<%s>' % name for name in args] args = ' '.join(['<%s>' % name for name in args]) if varargs: showee += ['<%s...>' % varargs] return ' '.join(showee) usage_dict[f.__name__] = get_usage(f) return wrapped def setup_http_client(): """Set `http_client` to a valid instance of `HTTPClient` Sets http_client to an object which makes HTTP requests with authentication. It chooses an authentication backend as follows: 1. If the environment variables HAAS_USERNAME and HAAS_PASSWORD are defined, it will use HTTP basic auth, with the corresponding user name and password. 2. If the `python-keystoneclient` library is installed, and the environment variables: * OS_AUTH_URL * OS_USERNAME * OS_PASSWORD * OS_PROJECT_NAME are defined, Keystone is used. 3. Oterwise, do not supply authentication information. This may be extended with other backends in the future. """ global http_client # First try basic auth: basic_username = os.getenv('HAAS_USERNAME') basic_password = os.getenv('HAAS_PASSWORD') if basic_username is not None and basic_password is not None: http_client = RequestsHTTPClient() http_client.auth = (basic_username, basic_password) return # Next try keystone: try: from keystoneauth1.identity import v3 from keystoneauth1 import session os_auth_url = os.getenv('OS_AUTH_URL') os_password = os.getenv('OS_PASSWORD') os_username = os.getenv('OS_USERNAME') os_user_domain_id = os.getenv('OS_USER_DOMAIN_ID') or 'default' os_project_name = os.getenv('OS_PROJECT_NAME') os_project_domain_id = os.getenv('OS_PROJECT_DOMAIN_ID') or 'default' if None in (os_auth_url, os_username, os_password, os_project_name): raise KeyError("Required openstack environment variable not set.") auth = v3.Password(auth_url=os_auth_url, username=os_username, password=os_password, project_name=os_project_name, user_domain_id=os_user_domain_id, project_domain_id=os_project_domain_id) sess = session.Session(auth=auth) http_client = KeystoneHTTPClient(sess) return except (ImportError, KeyError): pass # Finally, fall back to no authentication: http_client = requests.Session() def check_status_code(response): if response.status_code < 200 or response.status_code >= 300: sys.stderr.write('Unexpected status code: %d\n' % response.status_code) sys.stderr.write('Response text:\n') sys.stderr.write(response.text + "\n") else: sys.stdout.write(response.text + "\n") # TODO: This function's name is no longer very accurate. As soon as it is # safe, we should change it to something more generic. def object_url(*args): # Prefer an environmental variable for getting the endpoint if available. url = os.environ.get('HAAS_ENDPOINT') if url is None: url = cfg.get('client', 'endpoint') for arg in args: url += '/' + urllib.quote(arg,'') return url def do_request(method, url, data={}): """Helper function for making HTTP requests against the API. Uses the global variable `http_client` to make the request. Arguments: `method` - the http method, as a string: 'GET', 'PUT', 'POST'... `url` - The url to make the request to `data` - the body of the request. """ return check_status_code(http_client.request(method, url, data=data)) def do_put(url, data={}): return do_request('PUT', url, data=json.dumps(data)) def do_post(url, data={}): return do_request('POST', url, data=json.dumps(data)) def do_get(url): return do_request('GET', url) def do_delete(url): return do_request('DELETE', url) @cmd def serve(port): try: port = schema.And(schema.Use(int), lambda n: MIN_PORT_NUMBER <= n <= MAX_PORT_NUMBER).validate(port) except schema.SchemaError: sys.exit('Error: Invaid port. Must be in the range 1-65535.') except Exception as e: sys.exit('Unxpected Error!!! \n %s' % e) """Start the HaaS API server""" if cfg.has_option('devel', 'debug'): debug = cfg.getboolean('devel', 'debug') else: debug = False # We need to import api here so that the functions within it get registered # (via `rest_call`), though we don't use it directly: from haas import model, api, rest server.init(stop_consoles=True) rest.serve(port, debug=debug) @cmd def serve_networks(): """Start the HaaS networking server""" from haas import model, deferred from time import sleep server.init() server.register_drivers() server.validate_state() model.init_db() while True: # Empty the journal until it's empty; then delay so we don't tight # loop. while deferred.apply_networking(): pass sleep(2) @cmd def user_create(username, password, is_admin): """Create a user <username> with password <password>. <is_admin> may be either "admin" or "regular", and determines whether the user has administrative priveledges. """ url = object_url('/auth/basic/user', username) if is_admin not in ('admin', 'regular'): raise TypeError("is_admin must be either 'admin' or 'regular'") do_put(url, data={ 'password': password, 'is_admin': is_admin == 'admin', }) @cmd def network_create(network, creator, access, net_id): """Create a link-layer <network>. See docs/networks.md for details""" url = object_url('network', network) do_put(url, data={'creator': creator, 'access': access, 'net_id': net_id}) @cmd def network_create_simple(network, project): """Create <network> owned by project. Specific case of network_create""" url = object_url('network', network) do_put(url, data={'creator': project, 'access': project, 'net_id': ""}) @cmd def network_delete(network): """Delete a <network>""" url = object_url('network', network) do_delete(url) @cmd def user_delete(username): """Delete the user <username>""" url = object_url('/auth/basic/user', username) do_delete(url) @cmd def list_projects(): """List all projects""" url = object_url('projects') do_get(url) @cmd def user_add_project(user, project): """Add <user> to <project>""" url = object_url('/auth/basic/user', user, 'add_project') do_post(url, data={'project': project}) @cmd def user_remove_project(user, project): """Remove <user> from <project>""" url = object_url('/auth/basic/user', user, 'remove_project') do_post(url, data={'project': project}) @cmd def project_create(project): """Create a <project>""" url = object_url('project', project) do_put(url) @cmd def project_delete(project): """Delete <project>""" url = object_url('project', project) do_delete(url) @cmd def headnode_create(headnode, project, base_img): """Create a <headnode> in a <project> with <base_img>""" url = object_url('headnode', headnode) do_put(url, data={'project': project, 'base_img': base_img}) @cmd def headnode_delete(headnode): """Delete <headnode>""" url = object_url('headnode', headnode) do_delete(url) @cmd def project_connect_node(project, node): """Connect <node> to <project>""" url = object_url('project', project, 'connect_node') do_post(url, data={'node': node}) @cmd def project_detach_node(project, node): """Detach <node> from <project>""" url = object_url('project', project, 'detach_node') do_post(url, data={'node': node}) @cmd def headnode_start(headnode): """Start <headnode>""" url = object_url('headnode', headnode, 'start') do_post(url) @cmd def headnode_stop(headnode): """Stop <headnode>""" url = object_url('headnode', headnode, 'stop') do_post(url) @cmd def node_register(node, subtype, *args): """Register a node named <node>, with the given type if obm is of type: ipmi then provide arguments "ipmi", <hostname>, <ipmi-username>, <ipmi-password> """ obm_api = "http://schema.massopencloud.org/haas/v0/obm/" obm_types = [ "ipmi", "mock" ] #Currently the classes are hardcoded #In principle this should come from api.py #In future an api call to list which plugins are active will be added. if subtype in obm_types: if len(args) == 3: obminfo = {"type": obm_api+subtype, "host": args[0], "user": args[1], "password": args[2] } else: sys.stderr.write('ERROR: subtype '+subtype+' requires exactly 3 arguments\n') sys.stderr.write('<hostname> <ipmi-username> <ipmi-password>\n') return else: sys.stderr.write('ERROR: Wrong OBM subtype supplied\n') sys.stderr.write('Supported OBM sub-types: ipmi, mock\n') return url = object_url('node', node) do_put(url, data={"obm": obminfo}) @cmd def node_delete(node): """Delete <node>""" url = object_url('node', node) do_delete(url) @cmd def node_power_cycle(node): """Power cycle <node>""" url = object_url('node', node, 'power_cycle') do_post(url) @cmd def node_power_off(node): """Power off <node>""" url = object_url('node', node, 'power_off') do_post(url) @cmd def node_register_nic(node, nic, macaddr): """Register existence of a <nic> with the given <macaddr> on the given <node>""" url = object_url('node', node, 'nic', nic) do_put(url, data={'macaddr':macaddr}) @cmd def node_delete_nic(node, nic): """Delete a <nic> on a <node>""" url = object_url('node', node, 'nic', nic) do_delete(url) @cmd def headnode_create_hnic(headnode, nic): """Create a <nic> on the given <headnode>""" url = object_url('headnode', headnode, 'hnic', nic) do_put(url) @cmd def headnode_delete_hnic(headnode, nic): """Delete a <nic> on a <headnode>""" url = object_url('headnode', headnode, 'hnic', nic) do_delete(url) @cmd def node_connect_network(node, nic, network, channel): """Connect <node> to <network> on given <nic> and <channel>""" url = object_url('node', node, 'nic', nic, 'connect_network') do_post(url, data={'network': network, 'channel': channel}) @cmd def node_detach_network(node, nic, network): """Detach <node> from the given <network> on the given <nic>""" url = object_url('node', node, 'nic', nic, 'detach_network') do_post(url, data={'network': network}) @cmd def headnode_connect_network(headnode, nic, network): """Connect <headnode> to <network> on given <nic>""" url = object_url('headnode', headnode, 'hnic', nic, 'connect_network') do_post(url, data={'network':network}) @cmd def headnode_detach_network(headnode, hnic): """Detach <headnode> from the network on given <nic>""" url = object_url('headnode', headnode, 'hnic', hnic, 'detach_network') do_post(url) @cmd def switch_register(switch, subtype, *args): """Register a switch with name <switch> and <subtype>, <hostname>, <username>, <password> eg. haas switch_register mock03 mock mockhost01 mockuser01 mockpass01 """ switch_api = "http://schema.massopencloud.org/haas/v0/switches/" @cmd def switch_register(switch, subtype, *args): """Register a switch with name <switch> and <subtype>, <hostname>, <username>, <password> eg. haas switch_register mock03 mock mockhost01 mockuser01 mockpass01 FIXME: current design needs to change. CLI should not know about every backend. ideally, this should be taken care of in the driver itself or client library (work-in-progress) should manage it. """ switch_api = "http://schema.massopencloud.org/haas/v0/switches/" if subtype == "nexus": if len(args) == 4: switchinfo = { "type": switch_api+subtype, "hostname": args[0], "username": args[1], "password": args[2], "dummy_vlan": args[3] } else: sys.stderr.write('ERROR: subtype '+subtype+' requires exactly 4 arguments\n') sys.stderr.write('<hostname> <username> <password> <dummy_vlan_no>\n') return elif subtype == "mock": if len(args) == 3: switchinfo = { "type": switch_api+subtype, "hostname": args[0], "username": args[1], "password": args[2] } else: sys.stderr.write('ERROR: subtype '+subtype+' requires exactly 3 arguments\n') sys.stderr.write('<hostname> <username> <password>\n') return elif subtype == "powerconnect55xx": if len(args) == 3: switchinfo = { "type": switch_api+subtype, "hostname": args[0], "username": args[1], "password": args[2] } else: sys.stderr.write('ERROR: subtype '+subtype+' requires exactly 3 arguments\n') sys.stderr.write('<hostname> <username> <password>\n') return elif subtype == "brocade": if len(args) == 4: switchinfo = { "type": switch_api+subtype, "hostname": args[0], "username": args[1], "password": args[2], "interface_type": args[3] } else: sys.stderr.write('ERROR: subtype '+ subtype+' requires exactly 4 arguments\n') sys.stderr.write('<hostname> <username> <password> <interface_type>\n') sys.stderr.write('NOTE: interface_type refers to the speed of the switchports\n') sys.stderr.write('ex. TenGigabitEthernet, FortyGigabitEthernet, etc.\n') return else: sys.stderr.write('ERROR: Invalid subtype supplied\n') return url = object_url('switch', switch) do_put(url, data=switchinfo) @cmd def switch_delete(switch): """Delete a <switch> """ url = object_url('switch', switch) do_delete(url) @cmd def list_switches(): """List all switches""" url = object_url('switches') do_get(url) @cmd def port_register(switch, port): """Register a <port> with <switch> """ url = object_url('switch', switch, 'port', port) do_put(url) @cmd def port_delete(switch, port): """Delete a <port> from a <switch>""" url = object_url('switch', switch, 'port', port) do_delete(url) @cmd def port_connect_nic(switch, port, node, nic): """Connect a <port> on a <switch> to a <nic> on a <node>""" url = object_url('switch', switch, 'port', port, 'connect_nic') do_post(url, data={'node': node, 'nic': nic}) @cmd def port_detach_nic(switch, port): """Detach a <port> on a <switch> from whatever's connected to it""" url = object_url('switch', switch, 'port', port, 'detach_nic') do_post(url) @cmd def list_nodes(is_free): """List all nodes or all free nodes <is_free> may be either "all" or "free", and determines whether to list all nodes or all free nodes. """ if is_free not in ('all', 'free'): raise TypeError("is_free must be either 'all' or 'free'") url = object_url('node', is_free) do_get(url) @cmd def list_project_nodes(project): """List all nodes attached to a <project>""" url = object_url('project', project, 'nodes') do_get(url) @cmd def list_project_networks(project): """List all networks attached to a <project>""" url = object_url('project', project, 'networks') do_get(url) @cmd def show_network(network): """Display information about <network>""" url = object_url('network', network) do_get(url) @cmd def show_node(node): """Display information about a <node>""" url = object_url('node', node) do_get(url) @cmd def list_project_headnodes(project): """List all headnodes attached to a <project>""" url = object_url('project', project, 'headnodes') do_get(url) @cmd def show_headnode(headnode): """Display information about a <headnode>""" url = object_url('headnode', headnode) do_get(url) @cmd def list_headnode_images(): """Display registered headnode images""" url = object_url('headnode_images') do_get(url) @cmd def show_console(node): """Display console log for <node>""" url = object_url('node', node, 'console') do_get(url) @cmd def start_console(node): """Start logging console output from <node>""" url = object_url('node', node, 'console') do_put(url) @cmd def stop_console(node): """Stop logging console output from <node> and delete the log""" url = object_url('node', node, 'console') do_delete(url) @cmd def create_admin_user(username, password): """Create an admin user. Only valid for the database auth backend. This must be run on the HaaS API server, with access to haas.cfg and the database. It will create an user named <username> with password <password>, who will have administrator priviledges. This command should only be used for bootstrapping the system; once you have an initial admin, you can (and should) create additional users via the API. """ if not config.cfg.has_option('extensions', 'haas.ext.auth.database'): sys.exit("'make_inital_admin' is only valid with the database auth backend.") from haas import model from haas.model import db from haas.ext.auth.database import User model.init_db() db.session.add(User(label=username, password=password, is_admin=True)) db.session.commit() @cmd def help(*commands): """Display usage of all following <commands>, or of all commands if none are given""" if not commands: sys.stdout.write('Usage: %s <command> <arguments...> \n' % sys.argv[0]) sys.stdout.write('Where <command> is one of:\n') commands = sorted(command_dict.keys()) for name in commands: # For each command, print out a summary including the name, arguments, # and the docstring (as a #comment). sys.stdout.write(' %s\n' % usage_dict[name]) sys.stdout.write(' %s\n' % command_dict[name].__doc__) def main(): """Entry point to the CLI. There is a script located at ${source_tree}/scripts/haas, which invokes this function. """ config.setup() if len(sys.argv) < 2 or sys.argv[1] not in command_dict: # Display usage for all commands help() sys.exit(1) else: setup_http_client() command_dict[sys.argv[1]](*sys.argv[2:])
apache-2.0
brayan15/Events
events/users/tests/test_admin.py
117
1391
from test_plus.test import TestCase from ..admin import MyUserCreationForm class TestMyUserCreationForm(TestCase): def setUp(self): self.user = self.make_user('notalamode', 'notalamodespassword') def test_clean_username_success(self): # Instantiate the form with a new username form = MyUserCreationForm({ 'username': 'alamode', 'password1': '7jefB#f@Cc7YJB]2v', 'password2': '7jefB#f@Cc7YJB]2v', }) # Run is_valid() to trigger the validation valid = form.is_valid() self.assertTrue(valid) # Run the actual clean_username method username = form.clean_username() self.assertEqual('alamode', username) def test_clean_username_false(self): # Instantiate the form with the same username as self.user form = MyUserCreationForm({ 'username': self.user.username, 'password1': 'notalamodespassword', 'password2': 'notalamodespassword', }) # Run is_valid() to trigger the validation, which is going to fail # because the username is already taken valid = form.is_valid() self.assertFalse(valid) # The form.errors dict should contain a single error called 'username' self.assertTrue(len(form.errors) == 1) self.assertTrue('username' in form.errors)
mit
zarlant/ansible_core
cloud/rackspace/rax_cbs.py
37
7072
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments DOCUMENTATION = ''' --- module: rax_cbs short_description: Manipulate Rackspace Cloud Block Storage Volumes description: - Manipulate Rackspace Cloud Block Storage Volumes version_added: 1.6 options: description: description: - Description to give the volume being created default: null image: description: - image to use for bootable volumes. Can be an C(id), C(human_id) or C(name). This option requires C(pyrax>=1.9.3) default: null version_added: 1.9 meta: description: - A hash of metadata to associate with the volume default: null name: description: - Name to give the volume being created default: null required: true size: description: - Size of the volume to create in Gigabytes default: 100 required: true snapshot_id: description: - The id of the snapshot to create the volume from default: null state: description: - Indicate desired state of the resource choices: - present - absent default: present required: true volume_type: description: - Type of the volume being created choices: - SATA - SSD default: SATA required: true wait: description: - wait for the volume to be in state 'available' before returning default: "no" choices: - "yes" - "no" wait_timeout: description: - how long before wait gives up, in seconds default: 300 author: Christopher H. Laco, Matt Martz extends_documentation_fragment: rackspace.openstack ''' EXAMPLES = ''' - name: Build a Block Storage Volume gather_facts: False hosts: local connection: local tasks: - name: Storage volume create request local_action: module: rax_cbs credentials: ~/.raxpub name: my-volume description: My Volume volume_type: SSD size: 150 region: DFW wait: yes state: present meta: app: my-cool-app register: my_volume ''' from distutils.version import LooseVersion try: import pyrax HAS_PYRAX = True except ImportError: HAS_PYRAX = False def cloud_block_storage(module, state, name, description, meta, size, snapshot_id, volume_type, wait, wait_timeout, image): changed = False volume = None instance = {} cbs = pyrax.cloud_blockstorage if cbs is None: module.fail_json(msg='Failed to instantiate client. This ' 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') if image: # pyrax<1.9.3 did not have support for specifying an image when # creating a volume which is required for bootable volumes if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'): module.fail_json(msg='Creating a bootable volume requires ' 'pyrax>=1.9.3') image = rax_find_image(module, pyrax, image) volume = rax_find_volume(module, pyrax, name) if state == 'present': if not volume: kwargs = dict() if image: kwargs['image'] = image try: volume = cbs.create(name, size=size, volume_type=volume_type, description=description, metadata=meta, snapshot_id=snapshot_id, **kwargs) changed = True except Exception, e: module.fail_json(msg='%s' % e.message) else: if wait: attempts = wait_timeout / 5 pyrax.utils.wait_for_build(volume, interval=5, attempts=attempts) volume.get() instance = rax_to_dict(volume) result = dict(changed=changed, volume=instance) if volume.status == 'error': result['msg'] = '%s failed to build' % volume.id elif wait and volume.status not in VOLUME_STATUS: result['msg'] = 'Timeout waiting on %s' % volume.id if 'msg' in result: module.fail_json(**result) else: module.exit_json(**result) elif state == 'absent': if volume: instance = rax_to_dict(volume) try: volume.delete() changed = True except Exception, e: module.fail_json(msg='%s' % e.message) module.exit_json(changed=changed, volume=instance) def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( description=dict(type='str'), image=dict(type='str'), meta=dict(type='dict', default={}), name=dict(required=True), size=dict(type='int', default=100), snapshot_id=dict(), state=dict(default='present', choices=['present', 'absent']), volume_type=dict(choices=['SSD', 'SATA'], default='SATA'), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=300) ) ) module = AnsibleModule( argument_spec=argument_spec, required_together=rax_required_together() ) if not HAS_PYRAX: module.fail_json(msg='pyrax is required for this module') description = module.params.get('description') image = module.params.get('image') meta = module.params.get('meta') name = module.params.get('name') size = module.params.get('size') snapshot_id = module.params.get('snapshot_id') state = module.params.get('state') volume_type = module.params.get('volume_type') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') setup_rax_module(module, pyrax) cloud_block_storage(module, state, name, description, meta, size, snapshot_id, volume_type, wait, wait_timeout, image) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.rax import * # invoke the module main()
gpl-3.0
Xeralux/tensorflow
tensorflow/contrib/receptive_field/python/util/receptive_field.py
34
17297
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functions to compute receptive field of a fully-convolutional network. Please refer to the following g3doc for detailed explanation on how this computation is performed, and why it is important: g3doc/photos/vision/features/delf/g3doc/rf_computation.md """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.receptive_field.python.util import graph_compute_order from tensorflow.contrib.receptive_field.python.util import parse_layer_parameters from tensorflow.python.framework import ops as framework_ops from tensorflow.python.platform import tf_logging as logging def _get_rf_size_node_input(stride, kernel_size, rf_size_output): """Computes RF size at the input of a given layer. Args: stride: Stride of given layer (integer). kernel_size: Kernel size of given layer (integer). rf_size_output: RF size at output of given layer (integer). Returns: rf_size_input: RF size at input of given layer (integer). """ return stride * rf_size_output + kernel_size - stride def _get_effective_stride_node_input(stride, effective_stride_output): """Computes effective stride at the input of a given layer. Args: stride: Stride of given layer (integer). effective_stride_output: Effective stride at output of given layer (integer). Returns: effective_stride_input: Effective stride at input of given layer (integer). """ return stride * effective_stride_output def _get_effective_padding_node_input(stride, padding, effective_padding_output): """Computes effective padding at the input of a given layer. Args: stride: Stride of given layer (integer). padding: Padding of given layer (integer). effective_padding_output: Effective padding at output of given layer (integer). Returns: effective_padding_input: Effective padding at input of given layer (integer). """ return stride * effective_padding_output + padding class ReceptiveField(object): """Receptive field of a convolutional neural network. Args: size: Receptive field size. stride: Effective stride. padding: Effective padding. """ def __init__(self, size, stride, padding): self.size = np.asarray(size) self.stride = np.asarray(stride) self.padding = np.asarray(padding) def compute_input_center_coordinates(self, y, axis=None): """Computes the center of the receptive field that generated a feature. Args: y: An array of feature coordinates with shape `(..., d)`, where `d` is the number of dimensions of the coordinates. axis: The dimensions for which to compute the input center coordinates. If `None` (the default), compute the input center coordinates for all dimensions. Returns: x: Center of the receptive field that generated the features, at the input of the network. Raises: ValueError: If the number of dimensions of the feature coordinates does not match the number of elements in `axis`. """ # Use all dimensions. if axis is None: axis = range(self.size.size) # Ensure axis is a list because tuples have different indexing behavior. axis = list(axis) y = np.asarray(y) if y.shape[-1] != len(axis): raise ValueError("Dimensionality of the feature coordinates `y` (%d) " "does not match dimensionality of `axis` (%d)" % (y.shape[-1], len(axis))) return -self.padding[axis] + y * self.stride[axis] + ( self.size[axis] - 1) / 2 def compute_feature_coordinates(self, x, axis=None): """Computes the position of a feature given the center of a receptive field. Args: x: An array of input center coordinates with shape `(..., d)`, where `d` is the number of dimensions of the coordinates. axis: The dimensions for which to compute the feature coordinates. If `None` (the default), compute the feature coordinates for all dimensions. Returns: y: Coordinates of the features. Raises: ValueError: If the number of dimensions of the input center coordinates does not match the number of elements in `axis`. """ # Use all dimensions. if axis is None: axis = range(self.size.size) # Ensure axis is a list because tuples have different indexing behavior. axis = list(axis) x = np.asarray(x) if x.shape[-1] != len(axis): raise ValueError("Dimensionality of the input center coordinates `x` " "(%d) does not match dimensionality of `axis` (%d)" % (x.shape[-1], len(axis))) return (x + self.padding[axis] + (1 - self.size[axis]) / 2) / self.stride[axis] def __iter__(self): return iter(np.concatenate([self.size, self.stride, self.padding])) def compute_receptive_field_from_graph_def(graph_def, input_node, output_node, stop_propagation=None, input_resolution=None): """Computes receptive field (RF) parameters from a Graph or GraphDef object. The algorithm stops the calculation of the receptive field whenever it encounters an operation in the list `stop_propagation`. Stopping the calculation early can be useful to calculate the receptive field of a subgraph such as a single branch of the [inception network](https://arxiv.org/abs/1512.00567). Args: graph_def: Graph or GraphDef object. input_node: Name of the input node or Tensor object from graph. output_node: Name of the output node or Tensor object from graph. stop_propagation: List of operations or scope names for which to stop the propagation of the receptive field. input_resolution: 2D list. If the input resolution to the model is fixed and known, this may be set. This is helpful for cases where the RF parameters vary depending on the input resolution (this happens since SAME padding in tensorflow depends on input resolution in general). If this is None, it is assumed that the input resolution is unknown, so some RF parameters may be unknown (depending on the model architecture). Returns: rf_size_x: Receptive field size of network in the horizontal direction, with respect to specified input and output. rf_size_y: Receptive field size of network in the vertical direction, with respect to specified input and output. effective_stride_x: Effective stride of network in the horizontal direction, with respect to specified input and output. effective_stride_y: Effective stride of network in the vertical direction, with respect to specified input and output. effective_padding_x: Effective padding of network in the horizontal direction, with respect to specified input and output. effective_padding_y: Effective padding of network in the vertical direction, with respect to specified input and output. Raises: ValueError: If network is not aligned or if either input or output nodes cannot be found. For network criterion alignment, see photos/vision/features/delf/g3doc/rf_computation.md """ # Convert a graph to graph_def if necessary. if isinstance(graph_def, framework_ops.Graph): graph_def = graph_def.as_graph_def() # Convert tensors to names. if isinstance(input_node, framework_ops.Tensor): input_node = input_node.op.name if isinstance(output_node, framework_ops.Tensor): output_node = output_node.op.name stop_propagation = stop_propagation or [] # Computes order of computation for a given graph. node_info, name_to_node = graph_compute_order.get_compute_order( graph_def=graph_def, input_node_name=input_node, input_node_size=input_resolution) # Sort in reverse topological order. ordered_node_info = sorted(node_info.items(), key=lambda x: -x[1].order) # Dictionaries to keep track of receptive field, effective stride and # effective padding of different nodes. rf_sizes_x = {} rf_sizes_y = {} effective_strides_x = {} effective_strides_y = {} effective_paddings_x = {} effective_paddings_y = {} # Initialize dicts for output_node. rf_sizes_x[output_node] = 1 rf_sizes_y[output_node] = 1 effective_strides_x[output_node] = 1 effective_strides_y[output_node] = 1 effective_paddings_x[output_node] = 0 effective_paddings_y[output_node] = 0 # Flag to denote if we found output node yet. If we have not, we skip nodes # until the output node is found. found_output_node = False # Flag to denote if padding is undefined. This happens when SAME padding mode # is used in conjunction with stride and kernel sizes which make it such that # the padding to be applied would depend on the input size. In this case, # alignment checks are skipped, and the effective padding is None. undefined_padding = False for _, (o, node, _, _) in ordered_node_info: if node: logging.vlog(3, "%10d %-100s %-20s" % (o, node.name[:90], node.op)) else: continue # When we find input node, we can stop. if node.name == input_node: break # Loop until we find the output node. All nodes before finding the output # one are irrelevant, so they can be skipped. if not found_output_node: if node.name == output_node: found_output_node = True if found_output_node: if node.name not in rf_sizes_x: assert node.name not in rf_sizes_y, ("Node %s is in rf_sizes_y, but " "not in rf_sizes_x" % node.name) # In this case, node is not relevant since it's not part of the # computation we're interested in. logging.vlog(3, "Irrelevant node %s, skipping it...", node.name) continue # Get params for this layer. (kernel_size_x, kernel_size_y, stride_x, stride_y, padding_x, padding_y, _, _) = parse_layer_parameters.get_layer_params( node, name_to_node, node_info[node.name].input_size) logging.vlog(3, "kernel_size_x = %s, kernel_size_y = %s, " "stride_x = %s, stride_y = %s, " "padding_x = %s, padding_y = %s, input size = %s" % (kernel_size_x, kernel_size_y, stride_x, stride_y, padding_x, padding_y, node_info[node.name].input_size)) if padding_x is None or padding_y is None: undefined_padding = True # Get parameters at input of this layer which may or may not be propagated # to the input layers. rf_size_input_x = _get_rf_size_node_input(stride_x, kernel_size_x, rf_sizes_x[node.name]) rf_size_input_y = _get_rf_size_node_input(stride_y, kernel_size_y, rf_sizes_y[node.name]) effective_stride_input_x = _get_effective_stride_node_input( stride_x, effective_strides_x[node.name]) effective_stride_input_y = _get_effective_stride_node_input( stride_y, effective_strides_y[node.name]) if not undefined_padding: effective_padding_input_x = _get_effective_padding_node_input( stride_x, padding_x, effective_paddings_x[node.name]) effective_padding_input_y = _get_effective_padding_node_input( stride_y, padding_y, effective_paddings_y[node.name]) else: effective_padding_input_x = None effective_padding_input_y = None logging.vlog( 4, "rf_size_input_x = %s, rf_size_input_y = %s, " "effective_stride_input_x = %s, effective_stride_input_y = %s, " "effective_padding_input_x = %s, effective_padding_input_y = %s" % (rf_size_input_x, rf_size_input_y, effective_stride_input_x, effective_stride_input_y, effective_padding_input_x, effective_padding_input_y)) # Loop over this node's inputs and potentially propagate information down. for inp_name in node.input: # Stop the propagation of the receptive field. if any(inp_name.startswith(stop) for stop in stop_propagation): logging.vlog(3, "Skipping explicitly ignored node %s.", inp_name) continue logging.vlog(4, "inp_name = %s", inp_name) if inp_name.startswith("^"): # The character "^" denotes a control dependency, so this input node # can be safely ignored. continue inp_node = name_to_node[inp_name] logging.vlog(4, "inp_node = \n%s", inp_node) if inp_name in rf_sizes_x: assert inp_name in rf_sizes_y, ("Node %s is in rf_sizes_x, but " "not in rf_sizes_y" % inp_name) logging.vlog( 4, "rf_sizes_x[inp_name] = %s," " rf_sizes_y[inp_name] = %s, " "effective_strides_x[inp_name] = %s," " effective_strides_y[inp_name] = %s, " "effective_paddings_x[inp_name] = %s," " effective_paddings_y[inp_name] = %s" % (rf_sizes_x[inp_name], rf_sizes_y[inp_name], effective_strides_x[inp_name], effective_strides_y[inp_name], effective_paddings_x[inp_name], effective_paddings_y[inp_name])) # This node was already discovered through a previous path, so we need # to make sure that graph is aligned. This alignment check is skipped # if the padding is not defined, since in this case alignment cannot # be checked. if not undefined_padding: if effective_strides_x[inp_name] != effective_stride_input_x: raise ValueError( "Graph is not aligned since effective stride from different " "paths is different in horizontal direction") if effective_strides_y[inp_name] != effective_stride_input_y: raise ValueError( "Graph is not aligned since effective stride from different " "paths is different in vertical direction") if (rf_sizes_x[inp_name] - 1 ) / 2 - effective_paddings_x[inp_name] != ( rf_size_input_x - 1) / 2 - effective_padding_input_x: raise ValueError( "Graph is not aligned since center shift from different " "paths is different in horizontal direction") if (rf_sizes_y[inp_name] - 1 ) / 2 - effective_paddings_y[inp_name] != ( rf_size_input_y - 1) / 2 - effective_padding_input_y: raise ValueError( "Graph is not aligned since center shift from different " "paths is different in vertical direction") # Keep track of path with largest RF, for both directions. if rf_sizes_x[inp_name] < rf_size_input_x: rf_sizes_x[inp_name] = rf_size_input_x effective_strides_x[inp_name] = effective_stride_input_x effective_paddings_x[inp_name] = effective_padding_input_x if rf_sizes_y[inp_name] < rf_size_input_y: rf_sizes_y[inp_name] = rf_size_input_y effective_strides_y[inp_name] = effective_stride_input_y effective_paddings_y[inp_name] = effective_padding_input_y else: assert inp_name not in rf_sizes_y, ("Node %s is in rf_sizes_y, but " "not in rf_sizes_x" % inp_name) # In this case, it is the first time we encounter this node. So we # propagate the RF parameters. rf_sizes_x[inp_name] = rf_size_input_x rf_sizes_y[inp_name] = rf_size_input_y effective_strides_x[inp_name] = effective_stride_input_x effective_strides_y[inp_name] = effective_stride_input_y effective_paddings_x[inp_name] = effective_padding_input_x effective_paddings_y[inp_name] = effective_padding_input_y if not found_output_node: raise ValueError("Output node was not found") if input_node not in rf_sizes_x: raise ValueError("Input node was not found") return ReceptiveField( (rf_sizes_x[input_node], rf_sizes_y[input_node]), (effective_strides_x[input_node], effective_strides_y[input_node]), (effective_paddings_x[input_node], effective_paddings_y[input_node]))
apache-2.0
melipharo/stru-python19
generator/contact.py
1
3075
import os import jsonpickle import argparse from model import Contact from model.utils import random_phone, random_email, random_string class ContactGenerator: def __init__(self, name_max_len=10, tel_max_len=10, email_max_len=15, data_max_len=15): self.name_max_len = name_max_len self.tel_max_len = tel_max_len self.email_max_len = email_max_len self.data_max_len = data_max_len def get_contacts_count(self, count): return [ Contact( firstname=random_string("", self.name_max_len), lastname=random_string("", self.name_max_len), company=random_string("", self.data_max_len), home_tel=random_phone("", self.tel_max_len), mobile_tel=random_phone("", self.tel_max_len), work_tel=random_phone("", self.tel_max_len), sec_tel=random_phone("", self.tel_max_len), email=random_email("", self.email_max_len), email2=random_email("", self.email_max_len), email3=random_email("", self.email_max_len), note=random_string("", self.data_max_len), address=random_string("", self.data_max_len), homepage=random_string("", self.data_max_len), ) for _ in range(count) ] def get_contact(self): return self.get_contacts_count(1)[0] def get_test_selection(self): return [ Contact( firstname=firstname, lastname=lastname, company=random_string("", self.data_max_len), home_tel=random_phone("", self.tel_max_len), mobile_tel=random_phone("", self.tel_max_len), work_tel=random_phone("", self.tel_max_len), sec_tel=random_phone("", self.tel_max_len), email=random_email("", self.email_max_len), email2=random_email("", self.email_max_len), email3=random_email("", self.email_max_len), note=random_string("", self.data_max_len), address=address, homepage=random_string("", self.data_max_len), ) for firstname in ["", random_string("", self.name_max_len)] for lastname in ["", random_string("", self.name_max_len)] for address in ["", random_string("", self.data_max_len)] ] if __name__ == "__main__": ap = argparse.ArgumentParser() ap.add_argument("-n", help="number of contact", default=5, type=int) ap.add_argument("-f", help="output filename", default="data/contacts.json") args = ap.parse_args() datafile = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", args.f) with open(datafile, "w") as f: jsonpickle.set_encoder_options("json", indent=2) f.write(jsonpickle.encode(ContactGenerator().get_test_selection()))
bsd-2-clause
hbrunn/OpenUpgrade
addons/account/tests/test_tax.py
449
1740
from openerp.tests.common import TransactionCase class TestTax(TransactionCase): """Tests for taxes (account.tax) We don't really need at this point to link taxes to tax codes (account.tax.code) nor to companies (base.company) to check computation results. """ def setUp(self): super(TestTax, self).setUp() self.tax_model = self.registry('account.tax') def test_programmatic_tax(self): cr, uid = self.cr, self.uid tax_id = self.tax_model.create(cr, uid, dict( name="Programmatic tax", type='code', python_compute='result = 12.0', python_compute_inv='result = 11.0', )) tax_records = self.tax_model.browse(cr, uid, [tax_id]) res = self.tax_model.compute_all(cr, uid, tax_records, 50.0, 2) tax_detail = res['taxes'][0] self.assertEquals(tax_detail['amount'], 24.0) self.assertEquals(res['total_included'], 124.0) def test_percent_tax(self): """Test computations done by a 10 percent tax.""" cr, uid = self.cr, self.uid tax_id = self.tax_model.create(cr, uid, dict( name="Percent tax", type='percent', amount='0.1', )) tax_records = self.tax_model.browse(cr, uid, [tax_id]) res = self.tax_model.compute_all(cr, uid, tax_records, 50.0, 2) tax_detail = res['taxes'][0] self.assertEquals(tax_detail['amount'], 10.0) self.assertEquals(res['total_included'], 110.0) # now the inverse computation res = self.tax_model.compute_inv(cr, uid, tax_records, 55.0, 2) self.assertEquals(res[0]['amount'], 10.0)
agpl-3.0
hugobowne/scikit-learn
sklearn/metrics/tests/test_regression.py
272
6066
from __future__ import division, print_function import numpy as np from itertools import product from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.metrics import explained_variance_score from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error from sklearn.metrics import median_absolute_error from sklearn.metrics import r2_score from sklearn.metrics.regression import _check_reg_targets def test_regression_metrics(n_samples=50): y_true = np.arange(n_samples) y_pred = y_true + 1 assert_almost_equal(mean_squared_error(y_true, y_pred), 1.) assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.) assert_almost_equal(median_absolute_error(y_true, y_pred), 1.) assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2) assert_almost_equal(explained_variance_score(y_true, y_pred), 1.) def test_multioutput_regression(): y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]]) y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]]) error = mean_squared_error(y_true, y_pred) assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.) # mean_absolute_error and mean_squared_error are equal because # it is a binary problem. error = mean_absolute_error(y_true, y_pred) assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.) error = r2_score(y_true, y_pred, multioutput='variance_weighted') assert_almost_equal(error, 1. - 5. / 2) error = r2_score(y_true, y_pred, multioutput='uniform_average') assert_almost_equal(error, -.875) def test_regression_metrics_at_limits(): assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2) assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2) assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2) assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2) assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2) def test__check_reg_targets(): # All of length 3 EXAMPLES = [ ("continuous", [1, 2, 3], 1), ("continuous", [[1], [2], [3]], 1), ("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2), ("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2), ("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3), ] for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES, repeat=2): if type1 == type2 and n_out1 == n_out2: y_type, y_check1, y_check2, multioutput = _check_reg_targets( y1, y2, None) assert_equal(type1, y_type) if type1 == 'continuous': assert_array_equal(y_check1, np.reshape(y1, (-1, 1))) assert_array_equal(y_check2, np.reshape(y2, (-1, 1))) else: assert_array_equal(y_check1, y1) assert_array_equal(y_check2, y2) else: assert_raises(ValueError, _check_reg_targets, y1, y2, None) def test_regression_multioutput_array(): y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]] y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]] mse = mean_squared_error(y_true, y_pred, multioutput='raw_values') mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values') r = r2_score(y_true, y_pred, multioutput='raw_values') evs = explained_variance_score(y_true, y_pred, multioutput='raw_values') assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2) assert_array_almost_equal(mae, [0.25, 0.625], decimal=2) assert_array_almost_equal(r, [0.95, 0.93], decimal=2) assert_array_almost_equal(evs, [0.95, 0.93], decimal=2) # mean_absolute_error and mean_squared_error are equal because # it is a binary problem. y_true = [[0, 0]]*4 y_pred = [[1, 1]]*4 mse = mean_squared_error(y_true, y_pred, multioutput='raw_values') mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values') r = r2_score(y_true, y_pred, multioutput='raw_values') assert_array_almost_equal(mse, [1., 1.], decimal=2) assert_array_almost_equal(mae, [1., 1.], decimal=2) assert_array_almost_equal(r, [0., 0.], decimal=2) r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values') assert_array_almost_equal(r, [0, -3.5], decimal=2) assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='uniform_average')) evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values') assert_array_almost_equal(evs, [0, -1.25], decimal=2) # Checking for the condition in which both numerator and denominator is # zero. y_true = [[1, 3], [-1, 2]] y_pred = [[1, 4], [-1, 1]] r2 = r2_score(y_true, y_pred, multioutput='raw_values') assert_array_almost_equal(r2, [1., -3.], decimal=2) assert_equal(np.mean(r2), r2_score(y_true, y_pred, multioutput='uniform_average')) evs = explained_variance_score(y_true, y_pred, multioutput='raw_values') assert_array_almost_equal(evs, [1., -3.], decimal=2) assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred)) def test_regression_custom_weights(): y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]] y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]] msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6]) maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6]) rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6]) evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6]) assert_almost_equal(msew, 0.39, decimal=2) assert_almost_equal(maew, 0.475, decimal=3) assert_almost_equal(rw, 0.94, decimal=2) assert_almost_equal(evsw, 0.94, decimal=2)
bsd-3-clause
ROMFactory/android_external_chromium_org
net/tools/testserver/echo_message.py
187
13195
# Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Provides utility functions for TCP/UDP echo servers and clients. This program has classes and functions to encode, decode, calculate checksum and verify the "echo request" and "echo response" messages. "echo request" message is an echo message sent from the client to the server. "echo response" message is a response from the server to the "echo request" message from the client. The format of "echo request" message is <version><checksum><payload_size><payload>. <version> is the version number of the "echo request" protocol. <checksum> is the checksum of the <payload>. <payload_size> is the size of the <payload>. <payload> is the echo message. The format of "echo response" message is <version><checksum><payload_size><key><encoded_payload>.<version>, <checksum> and <payload_size> are same as what is in the "echo request" message. <encoded_payload> is encoded version of the <payload>. <key> is a randomly generated key that is used to encode/decode the <payload>. """ __author__ = '[email protected] (Raman Tenneti)' from itertools import cycle from itertools import izip import random class EchoHeader(object): """Class to keep header info of the EchoRequest and EchoResponse messages. This class knows how to parse the checksum, payload_size from the "echo request" and "echo response" messages. It holds the checksum, payload_size of the "echo request" and "echo response" messages. """ # This specifies the version. VERSION_STRING = '01' # This specifies the starting position of the checksum and length of the # checksum. Maximum value for the checksum is less than (2 ** 31 - 1). CHECKSUM_START = 2 CHECKSUM_LENGTH = 10 CHECKSUM_FORMAT = '%010d' CHECKSUM_END = CHECKSUM_START + CHECKSUM_LENGTH # This specifies the starting position of the <payload_size> and length of the # <payload_size>. Maximum number of bytes that can be sent in the <payload> is # 9,999,999. PAYLOAD_SIZE_START = CHECKSUM_END PAYLOAD_SIZE_LENGTH = 7 PAYLOAD_SIZE_FORMAT = '%07d' PAYLOAD_SIZE_END = PAYLOAD_SIZE_START + PAYLOAD_SIZE_LENGTH def __init__(self, checksum=0, payload_size=0): """Initializes the checksum and payload_size of self (EchoHeader). Args: checksum: (int) The checksum of the payload. payload_size: (int) The size of the payload. """ self.checksum = checksum self.payload_size = payload_size def ParseAndInitialize(self, echo_message): """Parses the echo_message and initializes self with the parsed data. This method extracts checksum, and payload_size from the echo_message (echo_message could be either echo_request or echo_response messages) and initializes self (EchoHeader) with checksum and payload_size. Args: echo_message: (string) The string representation of EchoRequest or EchoResponse objects. Raises: ValueError: Invalid data """ if not echo_message or len(echo_message) < EchoHeader.PAYLOAD_SIZE_END: raise ValueError('Invalid data:%s' % echo_message) self.checksum = int(echo_message[ EchoHeader.CHECKSUM_START:EchoHeader.CHECKSUM_END]) self.payload_size = int(echo_message[ EchoHeader.PAYLOAD_SIZE_START:EchoHeader.PAYLOAD_SIZE_END]) def InitializeFromPayload(self, payload): """Initializes the EchoHeader object with the payload. It calculates checksum for the payload and initializes self (EchoHeader) with the calculated checksum and size of the payload. This method is used by the client code during testing. Args: payload: (string) The payload is the echo string (like 'hello'). Raises: ValueError: Invalid data """ if not payload: raise ValueError('Invalid data:%s' % payload) self.payload_size = len(payload) self.checksum = Checksum(payload, self.payload_size) def __str__(self): """String representation of the self (EchoHeader). Returns: A string representation of self (EchoHeader). """ checksum_string = EchoHeader.CHECKSUM_FORMAT % self.checksum payload_size_string = EchoHeader.PAYLOAD_SIZE_FORMAT % self.payload_size return EchoHeader.VERSION_STRING + checksum_string + payload_size_string class EchoRequest(EchoHeader): """Class holds data specific to the "echo request" message. This class holds the payload extracted from the "echo request" message. """ # This specifies the starting position of the <payload>. PAYLOAD_START = EchoHeader.PAYLOAD_SIZE_END def __init__(self): """Initializes EchoRequest object.""" EchoHeader.__init__(self) self.payload = '' def ParseAndInitialize(self, echo_request_data): """Parses and Initializes the EchoRequest object from the echo_request_data. This method extracts the header information (checksum and payload_size) and payload from echo_request_data. Args: echo_request_data: (string) The string representation of EchoRequest object. Raises: ValueError: Invalid data """ EchoHeader.ParseAndInitialize(self, echo_request_data) if len(echo_request_data) <= EchoRequest.PAYLOAD_START: raise ValueError('Invalid data:%s' % echo_request_data) self.payload = echo_request_data[EchoRequest.PAYLOAD_START:] def InitializeFromPayload(self, payload): """Initializes the EchoRequest object with payload. It calculates checksum for the payload and initializes self (EchoRequest) object. Args: payload: (string) The payload string for which "echo request" needs to be constructed. """ EchoHeader.InitializeFromPayload(self, payload) self.payload = payload def __str__(self): """String representation of the self (EchoRequest). Returns: A string representation of self (EchoRequest). """ return EchoHeader.__str__(self) + self.payload class EchoResponse(EchoHeader): """Class holds data specific to the "echo response" message. This class knows how to parse the "echo response" message. This class holds key, encoded_payload and decoded_payload of the "echo response" message. """ # This specifies the starting position of the |key_| and length of the |key_|. # Minimum and maximum values for the |key_| are 100,000 and 999,999. KEY_START = EchoHeader.PAYLOAD_SIZE_END KEY_LENGTH = 6 KEY_FORMAT = '%06d' KEY_END = KEY_START + KEY_LENGTH KEY_MIN_VALUE = 0 KEY_MAX_VALUE = 999999 # This specifies the starting position of the <encoded_payload> and length # of the <encoded_payload>. ENCODED_PAYLOAD_START = KEY_END def __init__(self, key='', encoded_payload='', decoded_payload=''): """Initializes the EchoResponse object.""" EchoHeader.__init__(self) self.key = key self.encoded_payload = encoded_payload self.decoded_payload = decoded_payload def ParseAndInitialize(self, echo_response_data=None): """Parses and Initializes the EchoResponse object from echo_response_data. This method calls EchoHeader to extract header information from the echo_response_data and it then extracts key and encoded_payload from the echo_response_data. It holds the decoded payload of the encoded_payload. Args: echo_response_data: (string) The string representation of EchoResponse object. Raises: ValueError: Invalid echo_request_data """ EchoHeader.ParseAndInitialize(self, echo_response_data) if len(echo_response_data) <= EchoResponse.ENCODED_PAYLOAD_START: raise ValueError('Invalid echo_response_data:%s' % echo_response_data) self.key = echo_response_data[EchoResponse.KEY_START:EchoResponse.KEY_END] self.encoded_payload = echo_response_data[ EchoResponse.ENCODED_PAYLOAD_START:] self.decoded_payload = Crypt(self.encoded_payload, self.key) def InitializeFromEchoRequest(self, echo_request): """Initializes EchoResponse with the data from the echo_request object. It gets the checksum, payload_size and payload from the echo_request object and then encodes the payload with a random key. It also saves the payload as decoded_payload. Args: echo_request: (EchoRequest) The EchoRequest object which has "echo request" message. """ self.checksum = echo_request.checksum self.payload_size = echo_request.payload_size self.key = (EchoResponse.KEY_FORMAT % random.randrange(EchoResponse.KEY_MIN_VALUE, EchoResponse.KEY_MAX_VALUE)) self.encoded_payload = Crypt(echo_request.payload, self.key) self.decoded_payload = echo_request.payload def __str__(self): """String representation of the self (EchoResponse). Returns: A string representation of self (EchoResponse). """ return EchoHeader.__str__(self) + self.key + self.encoded_payload def Crypt(payload, key): """Encodes/decodes the payload with the key and returns encoded payload. This method loops through the payload and XORs each byte with the key. Args: payload: (string) The string to be encoded/decoded. key: (string) The key used to encode/decode the payload. Returns: An encoded/decoded string. """ return ''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(payload, cycle(key))) def Checksum(payload, payload_size): """Calculates the checksum of the payload. Args: payload: (string) The payload string for which checksum needs to be calculated. payload_size: (int) The number of bytes in the payload. Returns: The checksum of the payload. """ checksum = 0 length = min(payload_size, len(payload)) for i in range (0, length): checksum += ord(payload[i]) return checksum def GetEchoRequestData(payload): """Constructs an "echo request" message from the payload. It builds an EchoRequest object from the payload and then returns a string representation of the EchoRequest object. This is used by the TCP/UDP echo clients to build the "echo request" message. Args: payload: (string) The payload string for which "echo request" needs to be constructed. Returns: A string representation of the EchoRequest object. Raises: ValueError: Invalid payload """ try: echo_request = EchoRequest() echo_request.InitializeFromPayload(payload) return str(echo_request) except (IndexError, ValueError): raise ValueError('Invalid payload:%s' % payload) def GetEchoResponseData(echo_request_data): """Verifies the echo_request_data and returns "echo response" message. It builds the EchoRequest object from the echo_request_data and then verifies the checksum of the EchoRequest is same as the calculated checksum of the payload. If the checksums don't match then it returns None. It checksums match, it builds the echo_response object from echo_request object and returns string representation of the EchoResponse object. This is used by the TCP/UDP echo servers. Args: echo_request_data: (string) The string that echo servers send to the clients. Returns: A string representation of the EchoResponse object. It returns None if the echo_request_data is not valid. Raises: ValueError: Invalid echo_request_data """ try: if not echo_request_data: raise ValueError('Invalid payload:%s' % echo_request_data) echo_request = EchoRequest() echo_request.ParseAndInitialize(echo_request_data) if Checksum(echo_request.payload, echo_request.payload_size) != echo_request.checksum: return None echo_response = EchoResponse() echo_response.InitializeFromEchoRequest(echo_request) return str(echo_response) except (IndexError, ValueError): raise ValueError('Invalid payload:%s' % echo_request_data) def DecodeAndVerify(echo_request_data, echo_response_data): """Decodes and verifies the echo_response_data. It builds EchoRequest and EchoResponse objects from the echo_request_data and echo_response_data. It returns True if the EchoResponse's payload and checksum match EchoRequest's. This is used by the TCP/UDP echo clients for testing purposes. Args: echo_request_data: (string) The request clients sent to echo servers. echo_response_data: (string) The response clients received from the echo servers. Returns: True if echo_request_data and echo_response_data match. Raises: ValueError: Invalid echo_request_data or Invalid echo_response """ try: echo_request = EchoRequest() echo_request.ParseAndInitialize(echo_request_data) except (IndexError, ValueError): raise ValueError('Invalid echo_request:%s' % echo_request_data) try: echo_response = EchoResponse() echo_response.ParseAndInitialize(echo_response_data) except (IndexError, ValueError): raise ValueError('Invalid echo_response:%s' % echo_response_data) return (echo_request.checksum == echo_response.checksum and echo_request.payload == echo_response.decoded_payload)
bsd-3-clause
drbobrinkman/LIFX-Stock-Orb
stockOrb.py
1
3206
import requests import sys import colorsys ###################### #Configuration constants DAILY_STD_DEV = 1.120 #In %, what is the standard deviation of stock price change? CALL_FREQUENCY = 5 #How often this script will be called, in minutes STOCK_TO_TRACK = '.dji' GOOD_HUE = 120 #Default is green BAD_HUE = 0 #Default is red SELECTOR = "group:StockOrb" #Which light to use ###################### verbose = False if(len(sys.argv[1:]) > 0 and sys.argv[1] == "-v"): verbose = True print "Executing in verbose mode" getresponse = requests.get("https://www.google.com/finance?q="+STOCK_TO_TRACK) if(getresponse.status_code != 200): print "Loading stock data failed" sys.exit(0) start_of_price_change = getresponse.text.find("id-price-change") trimmed = getresponse.text[start_of_price_change:start_of_price_change+200] if(verbose): print trimmed start_of_pct = trimmed.find("(") end_of_pct = trimmed.find("%") pct_str = trimmed[start_of_pct+1:end_of_pct] if(verbose): print pct_str pct_change = float(pct_str); if(verbose): print "pct_change: " + str(pct_change) ###################### #Set up the auth header f = open('token.txt', 'r') token = f.readline() headers = { "Authorization": "Bearer %s" % token.rstrip(), } ############################ #Calculate color from pct_change #very good gives lerp_amt of 1, very bad gives 0 lerp_amt = min(max((2*DAILY_STD_DEV + pct_change)/(4*DAILY_STD_DEV), 0), 1) lerped_hue = int(lerp_amt*GOOD_HUE + (1-lerp_amt)*BAD_HUE) lerped_color = [int(255*x + 0.5) for x in colorsys.hsv_to_rgb(lerped_hue/360.0,1.0,1.0)] highlight_color = [int((255+i)/2) for i in lerped_color] #Main color my_color = "#" + ''.join([hex(i)[2:].zfill(2) for i in lerped_color]) #Color for breathe effect ... brighter version of main color emph_color = "#" + ''.join([hex(i)[2:].zfill(2) for i in highlight_color]) if(verbose): print "my_color: " + my_color print "emph_color: " + emph_color ########################### #First, set the light color #Build the payload payload = { 'power' : 'on', 'color' : my_color, 'duration' : 5, } #Send it, and check results response = requests.put("https://api.lifx.com/v1/lights/" + SELECTOR + "/state", params=payload, headers=headers) response.raise_for_status() ########################### #If it is a big change, do the breathe effect if abs(pct_change) > 2*DAILY_STD_DEV: #The bigger the change, the faster the breathe effect. But keep it # between 1 and 8 cycles per second breathe_rate = min(max(abs(2/(pct_change/DAILY_STD_DEV)), 1), 8) #Build the payload payload = { 'power_on' : 'true', #Light should already be on, but just in case. 'from_color' : my_color, 'color' : emph_color, 'period' : breathe_rate, 'cycles' : (1/breathe_rate)*CALL_FREQUENCY*60, #Repeat until the next script call 'persist' : 'true' } #Send message and check response response = requests.post("https://api.lifx.com/v1/lights/" + SELECTOR + "/effects/breathe", params=payload, headers=headers) response.raise_for_status()
mit
joshsmith2/twitter-image-cloud
tests/python/frontend_tests.py
1
3611
from base import * import re class DatabaseTests(GeneralTest): def setUp(self): super().setUp() self.cloud = main.ImageCloud(self.test_csv_in, self.database, 'media_urls') self.cloud.write_csv_file_to_database() def test_images_load_ok_from_database(self): expected = [{'url': 'http://pbs.twimg.com/media/AoxQ3CECIAAsArF.jpg', 'count': 1, 'share_text': "share"}, {'url': 'http://pbs.twimg.com/media/B2PlfAkCUAE0886.png', 'count': 1, 'share_text': "share"}, {'url': 'http://pbs.twimg.com/media/B4boCpyCEAExIYo.jpg', 'count': 5, 'share_text': "shares"}] result = self.cloud.get_images_from_database() for e in expected: match = [r for r in result if r['url'] == e['url']][0] self.assertEqual(e['count'], match['count']) def test_images_sorted_by_count_from_database(self): result = self.cloud.get_images_from_database() previous_count = 100000 # Initialise this very high indeed for r in result: current_count = r['count'] self.assertLessEqual(current_count, previous_count) previous_count = current_count def test_can_limit_results_from_db(self): self.cloud.image_limit = 2 result = self.cloud.get_images_from_database() self.assertEqual(len(result), 2) class JinjaTests(DatabaseTests): def test_can_render_test_template(self): self.cloud.html_template = 'test.html' test_template = self.cloud.load_template() rendered = test_template.render(worked_goes_here='worked') self.assertIn("<h1>This has worked</h1>", rendered) def test_can_render_loops(self): self.cloud.html_template = 'loop_test.html' loop_template = self.cloud.load_template() felonies = ['pig', 'hog', 'brine'] rendered = loop_template.render(book=felonies) for crime in felonies: self.assertIn('<h1>I got %s felonies</h1>' % crime, rendered) class ImageTests(DatabaseTests): def test_images_end_up_on_the_page(self): images = self.cloud.get_images_from_database() index_template = self.cloud.load_template() rendered = index_template.render(twitter_images=images) tag = '<img src="http://pbs.twimg.com/media/B51oL3fIcAA9j9L.png">' self.assertIn(tag, rendered) def test_counts_get_there_too(self): images = self.cloud.get_images_from_database() index_template = self.cloud.load_template() rendered = index_template.render(twitter_images=images) match_string = '<img src="http://pbs\.twimg\.com/media/B4boCpyCEAExIYo\.jpg">' \ '(?:(?!img).)*' \ '<span class="count">5</span> shares' rendered_stripped = re.sub(r' +', ' ', rendered) assert(re.search(match_string, rendered_stripped, re.S)) def test_images_idd_by_rank(self): images = self.cloud.get_images_from_database() index_template = self.cloud.load_template() rendered = index_template.render(twitter_images=images) rendered_stripped = re.sub(r' +', ' ', rendered) tag = '<div id="item1" class="packery-item">\n ' \ '<img src="http://pbs.twimg.com/media/B4boCpyCEAExIYo.jpg">\n ' self.assertIn(tag, rendered_stripped) if __name__ == '__main__': unittest.main()
gpl-3.0
ehashman/oh-mainline
vendor/packages/twisted/twisted/spread/util.py
60
6340
# -*- test-case-name: twisted.test.test_pb -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Utility classes for spread. """ from twisted.internet import defer from twisted.python.failure import Failure from twisted.spread import pb from twisted.protocols import basic from twisted.internet import interfaces from zope.interface import implements class LocalMethod: def __init__(self, local, name): self.local = local self.name = name def __call__(self, *args, **kw): return self.local.callRemote(self.name, *args, **kw) class LocalAsRemote: """ A class useful for emulating the effects of remote behavior locally. """ reportAllTracebacks = 1 def callRemote(self, name, *args, **kw): """ Call a specially-designated local method. self.callRemote('x') will first try to invoke a method named sync_x and return its result (which should probably be a Deferred). Second, it will look for a method called async_x, which will be called and then have its result (or Failure) automatically wrapped in a Deferred. """ if hasattr(self, 'sync_'+name): return getattr(self, 'sync_'+name)(*args, **kw) try: method = getattr(self, "async_" + name) return defer.succeed(method(*args, **kw)) except: f = Failure() if self.reportAllTracebacks: f.printTraceback() return defer.fail(f) def remoteMethod(self, name): return LocalMethod(self, name) class LocalAsyncForwarder: """ A class useful for forwarding a locally-defined interface. """ def __init__(self, forwarded, interfaceClass, failWhenNotImplemented=0): assert interfaceClass.providedBy(forwarded) self.forwarded = forwarded self.interfaceClass = interfaceClass self.failWhenNotImplemented = failWhenNotImplemented def _callMethod(self, method, *args, **kw): return getattr(self.forwarded, method)(*args, **kw) def callRemote(self, method, *args, **kw): if self.interfaceClass.queryDescriptionFor(method): result = defer.maybeDeferred(self._callMethod, method, *args, **kw) return result elif self.failWhenNotImplemented: return defer.fail( Failure(NotImplementedError, "No Such Method in Interface: %s" % method)) else: return defer.succeed(None) class Pager: """ I am an object which pages out information. """ def __init__(self, collector, callback=None, *args, **kw): """ Create a pager with a Reference to a remote collector and an optional callable to invoke upon completion. """ if callable(callback): self.callback = callback self.callbackArgs = args self.callbackKeyword = kw else: self.callback = None self._stillPaging = 1 self.collector = collector collector.broker.registerPageProducer(self) def stillPaging(self): """ (internal) Method called by Broker. """ if not self._stillPaging: self.collector.callRemote("endedPaging") if self.callback is not None: self.callback(*self.callbackArgs, **self.callbackKeyword) return self._stillPaging def sendNextPage(self): """ (internal) Method called by Broker. """ self.collector.callRemote("gotPage", self.nextPage()) def nextPage(self): """ Override this to return an object to be sent to my collector. """ raise NotImplementedError() def stopPaging(self): """ Call this when you're done paging. """ self._stillPaging = 0 class StringPager(Pager): """ A simple pager that splits a string into chunks. """ def __init__(self, collector, st, chunkSize=8192, callback=None, *args, **kw): self.string = st self.pointer = 0 self.chunkSize = chunkSize Pager.__init__(self, collector, callback, *args, **kw) def nextPage(self): val = self.string[self.pointer:self.pointer+self.chunkSize] self.pointer += self.chunkSize if self.pointer >= len(self.string): self.stopPaging() return val class FilePager(Pager): """ Reads a file in chunks and sends the chunks as they come. """ implements(interfaces.IConsumer) def __init__(self, collector, fd, callback=None, *args, **kw): self.chunks = [] Pager.__init__(self, collector, callback, *args, **kw) self.startProducing(fd) def startProducing(self, fd): self.deferred = basic.FileSender().beginFileTransfer(fd, self) self.deferred.addBoth(lambda x : self.stopPaging()) def registerProducer(self, producer, streaming): self.producer = producer if not streaming: self.producer.resumeProducing() def unregisterProducer(self): self.producer = None def write(self, chunk): self.chunks.append(chunk) def sendNextPage(self): """ Get the first chunk read and send it to collector. """ if not self.chunks: return val = self.chunks.pop(0) self.producer.resumeProducing() self.collector.callRemote("gotPage", val) # Utility paging stuff. class CallbackPageCollector(pb.Referenceable): """ I receive pages from the peer. You may instantiate a Pager with a remote reference to me. I will call the callback with a list of pages once they are all received. """ def __init__(self, callback): self.pages = [] self.callback = callback def remote_gotPage(self, page): self.pages.append(page) def remote_endedPaging(self): self.callback(self.pages) def getAllPages(referenceable, methodName, *args, **kw): """ A utility method that will call a remote method which expects a PageCollector as the first argument. """ d = defer.Deferred() referenceable.callRemote(methodName, CallbackPageCollector(d.callback), *args, **kw) return d
agpl-3.0
shawnadelic/shuup
shuup_tests/front/test_basket.py
1
7407
# -*- coding: utf-8 -*- # This file is part of Shuup. # # Copyright (c) 2012-2016, Shoop Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. import pytest from django.conf import settings from django.db.models import Sum from django.test.utils import override_settings from shuup.core.models import ShippingMode from shuup.front.basket import get_basket from shuup.front.models import StoredBasket from shuup.testing.factories import ( create_product, get_default_shop, get_default_payment_method, get_default_supplier ) from shuup.testing.utils import apply_request_middleware from shuup_tests.utils import printable_gibberish from .utils import get_unstocked_package_product_and_stocked_child @pytest.mark.django_db @pytest.mark.parametrize("storage", [ "shuup.front.basket.storage:DirectSessionBasketStorage", "shuup.front.basket.storage:DatabaseBasketStorage", ]) def test_basket(rf, storage): StoredBasket.objects.all().delete() quantities = [3, 12, 44, 23, 65] shop = get_default_shop() get_default_payment_method() # Can't create baskets without payment methods supplier = get_default_supplier() products_and_quantities = [] for quantity in quantities: product = create_product(printable_gibberish(), shop=shop, supplier=supplier, default_price=50) products_and_quantities.append((product, quantity)) is_database = (storage == "shuup.front.basket.storage:DatabaseBasketStorage") with override_settings(SHUUP_BASKET_STORAGE_CLASS_SPEC=storage): for product, q in products_and_quantities: request = rf.get("/") request.session = {} request.shop = shop apply_request_middleware(request) basket = get_basket(request) assert basket == request.basket assert basket.product_count == 0 line = basket.add_product(supplier=supplier, shop=shop, product=product, quantity=q) assert line.quantity == q assert basket.get_lines() assert basket.get_product_ids_and_quantities().get(product.pk) == q assert basket.product_count == q basket.save() delattr(request, "basket") basket = get_basket(request) assert basket.get_product_ids_and_quantities().get(product.pk) == q if is_database: product_ids = set(StoredBasket.objects.last().products.values_list("id", flat=True)) assert product_ids == set([product.pk]) if is_database: stats = StoredBasket.objects.all().aggregate( n=Sum("product_count"), tfs=Sum("taxful_total_price_value"), tls=Sum("taxless_total_price_value"), ) assert stats["n"] == sum(quantities) if shop.prices_include_tax: assert stats["tfs"] == sum(quantities) * 50 else: assert stats["tls"] == sum(quantities) * 50 basket.finalize() @pytest.mark.django_db def test_basket_dirtying_with_fnl(rf): shop = get_default_shop() supplier = get_default_supplier() product = create_product(printable_gibberish(), shop=shop, supplier=supplier, default_price=50) request = rf.get("/") request.session = {} request.shop = shop apply_request_middleware(request) basket = get_basket(request) line = basket.add_product( supplier=supplier, shop=shop, product=product, quantity=1, force_new_line=True, extra={"foo": "foo"} ) assert basket.dirty # The change should have dirtied the basket @pytest.mark.django_db def test_basket_shipping_error(rf): StoredBasket.objects.all().delete() shop = get_default_shop() supplier = get_default_supplier() shipped_product = create_product( printable_gibberish(), shop=shop, supplier=supplier, default_price=50, shipping_mode=ShippingMode.SHIPPED ) unshipped_product = create_product( printable_gibberish(), shop=shop, supplier=supplier, default_price=50, shipping_mode=ShippingMode.NOT_SHIPPED ) request = rf.get("/") request.session = {} request.shop = shop apply_request_middleware(request) basket = get_basket(request) # With a shipped product but no shipping methods, we oughta get an error basket.add_product(supplier=supplier, shop=shop, product=shipped_product, quantity=1) assert any(ve.code == "no_common_shipping" for ve in basket.get_validation_errors()) basket.clear_all() # But with an unshipped product, we should not basket.add_product(supplier=supplier, shop=shop, product=unshipped_product, quantity=1) assert not any(ve.code == "no_common_shipping" for ve in basket.get_validation_errors()) @pytest.mark.django_db def test_basket_orderability_change(rf): StoredBasket.objects.all().delete() shop = get_default_shop() supplier = get_default_supplier() product = create_product(printable_gibberish(), shop=shop, supplier=supplier, default_price=50) request = rf.get("/") request.session = {} request.shop = shop apply_request_middleware(request) basket = get_basket(request) line = basket.add_product( supplier=supplier, shop=shop, product=product, quantity=1, force_new_line=True, extra={"foo": "foo"} ) assert len(basket.get_lines()) == 1 assert len(basket.get_unorderable_lines()) == 0 product.soft_delete() assert basket.dirty assert len(basket.get_lines()) == 0 assert len(basket.get_unorderable_lines()) == 1 @pytest.mark.django_db def test_basket_package_product_orderability_change(rf): if "shuup.simple_supplier" not in settings.INSTALLED_APPS: pytest.skip("Need shuup.simple_supplier in INSTALLED_APPS") from shuup_tests.simple_supplier.utils import get_simple_supplier StoredBasket.objects.all().delete() shop = get_default_shop() supplier = get_simple_supplier() product, child = get_unstocked_package_product_and_stocked_child(shop, supplier, child_logical_quantity=2) request = rf.get("/") request.session = {} request.shop = shop apply_request_middleware(request) basket = get_basket(request) # Add the package parent basket.add_product( supplier=supplier, shop=shop, product=product, quantity=1, force_new_line=True, extra={"foo": "foo"} ) # Also add the child product separately basket.add_product( supplier=supplier, shop=shop, product=child, quantity=1, force_new_line=True, extra={"foo": "foo"} ) # Should be stock for both assert len(basket.get_lines()) == 2 assert len(basket.get_unorderable_lines()) == 0 supplier.adjust_stock(child.id, -1) assert basket.dirty # After reducing stock to 1, should only be stock for one assert len(basket.get_lines()) == 1 assert len(basket.get_unorderable_lines()) == 1 supplier.adjust_stock(child.id, -1) assert basket.dirty # After reducing stock to 0, should be stock for neither assert len(basket.get_lines()) == 0 assert len(basket.get_unorderable_lines()) == 2
agpl-3.0
Hasimir/cryptography
src/cryptography/hazmat/primitives/padding.py
13
3635
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import abc import six from cryptography import utils from cryptography.exceptions import AlreadyFinalized from cryptography.hazmat.bindings._padding import lib @six.add_metaclass(abc.ABCMeta) class PaddingContext(object): @abc.abstractmethod def update(self, data): """ Pads the provided bytes and returns any available data as bytes. """ @abc.abstractmethod def finalize(self): """ Finalize the padding, returns bytes. """ class PKCS7(object): def __init__(self, block_size): if not (0 <= block_size < 256): raise ValueError("block_size must be in range(0, 256).") if block_size % 8 != 0: raise ValueError("block_size must be a multiple of 8.") self.block_size = block_size def padder(self): return _PKCS7PaddingContext(self.block_size) def unpadder(self): return _PKCS7UnpaddingContext(self.block_size) @utils.register_interface(PaddingContext) class _PKCS7PaddingContext(object): def __init__(self, block_size): self.block_size = block_size # TODO: more copies than necessary, we should use zero-buffer (#193) self._buffer = b"" def update(self, data): if self._buffer is None: raise AlreadyFinalized("Context was already finalized.") if not isinstance(data, bytes): raise TypeError("data must be bytes.") self._buffer += data finished_blocks = len(self._buffer) // (self.block_size // 8) result = self._buffer[:finished_blocks * (self.block_size // 8)] self._buffer = self._buffer[finished_blocks * (self.block_size // 8):] return result def finalize(self): if self._buffer is None: raise AlreadyFinalized("Context was already finalized.") pad_size = self.block_size // 8 - len(self._buffer) result = self._buffer + six.int2byte(pad_size) * pad_size self._buffer = None return result @utils.register_interface(PaddingContext) class _PKCS7UnpaddingContext(object): def __init__(self, block_size): self.block_size = block_size # TODO: more copies than necessary, we should use zero-buffer (#193) self._buffer = b"" def update(self, data): if self._buffer is None: raise AlreadyFinalized("Context was already finalized.") if not isinstance(data, bytes): raise TypeError("data must be bytes.") self._buffer += data finished_blocks = max( len(self._buffer) // (self.block_size // 8) - 1, 0 ) result = self._buffer[:finished_blocks * (self.block_size // 8)] self._buffer = self._buffer[finished_blocks * (self.block_size // 8):] return result def finalize(self): if self._buffer is None: raise AlreadyFinalized("Context was already finalized.") if len(self._buffer) != self.block_size // 8: raise ValueError("Invalid padding bytes.") valid = lib.Cryptography_check_pkcs7_padding( self._buffer, self.block_size // 8 ) if not valid: raise ValueError("Invalid padding bytes.") pad_size = six.indexbytes(self._buffer, -1) res = self._buffer[:-pad_size] self._buffer = None return res
bsd-3-clause
lanfker/tdma_imac
src/aodv/bindings/modulegen__gcc_ILP32.py
29
433522
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.aodv', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## wifi-mac-header.h (module 'wifi'): ns3::WifiMacType [enumeration] module.add_enum('WifiMacType', ['WIFI_MAC_CTL_RTS', 'WIFI_MAC_CTL_CTS', 'WIFI_MAC_CTL_ACK', 'WIFI_MAC_CTL_BACKREQ', 'WIFI_MAC_CTL_BACKRESP', 'WIFI_MAC_MGT_BEACON', 'WIFI_MAC_MGT_ASSOCIATION_REQUEST', 'WIFI_MAC_MGT_ASSOCIATION_RESPONSE', 'WIFI_MAC_MGT_DISASSOCIATION', 'WIFI_MAC_MGT_REASSOCIATION_REQUEST', 'WIFI_MAC_MGT_REASSOCIATION_RESPONSE', 'WIFI_MAC_MGT_PROBE_REQUEST', 'WIFI_MAC_MGT_PROBE_RESPONSE', 'WIFI_MAC_MGT_AUTHENTICATION', 'WIFI_MAC_MGT_DEAUTHENTICATION', 'WIFI_MAC_MGT_ACTION', 'WIFI_MAC_MGT_ACTION_NO_ACK', 'WIFI_MAC_MGT_MULTIHOP_ACTION', 'WIFI_MAC_DATA', 'WIFI_MAC_DATA_CFACK', 'WIFI_MAC_DATA_CFPOLL', 'WIFI_MAC_DATA_CFACK_CFPOLL', 'WIFI_MAC_DATA_NULL', 'WIFI_MAC_DATA_NULL_CFACK', 'WIFI_MAC_DATA_NULL_CFPOLL', 'WIFI_MAC_DATA_NULL_CFACK_CFPOLL', 'WIFI_MAC_QOSDATA', 'WIFI_MAC_QOSDATA_CFACK', 'WIFI_MAC_QOSDATA_CFPOLL', 'WIFI_MAC_QOSDATA_CFACK_CFPOLL', 'WIFI_MAC_QOSDATA_NULL', 'WIFI_MAC_QOSDATA_NULL_CFPOLL', 'WIFI_MAC_QOSDATA_NULL_CFACK_CFPOLL'], import_from_module='ns.wifi') ## address.h (module 'network'): ns3::Address [class] module.add_class('Address', import_from_module='ns.network') ## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration] module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) ## buffer.h (module 'network'): ns3::Buffer [class] module.add_class('Buffer', import_from_module='ns.network') ## buffer.h (module 'network'): ns3::Buffer::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer']) ## packet.h (module 'network'): ns3::ByteTagIterator [class] module.add_class('ByteTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::ByteTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList [class] module.add_class('ByteTagList', import_from_module='ns.network') ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator']) ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## event-id.h (module 'core'): ns3::EventId [class] module.add_class('EventId', import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<0> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['0']) ## int-to-type.h (module 'core'): ns3::IntToType<0>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 0 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<1> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['1']) ## int-to-type.h (module 'core'): ns3::IntToType<1>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 1 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<2> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['2']) ## int-to-type.h (module 'core'): ns3::IntToType<2>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 2 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<3> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['3']) ## int-to-type.h (module 'core'): ns3::IntToType<3>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 3 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<4> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['4']) ## int-to-type.h (module 'core'): ns3::IntToType<4>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 4 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<5> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['5']) ## int-to-type.h (module 'core'): ns3::IntToType<5>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 5 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<6> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['6']) ## int-to-type.h (module 'core'): ns3::IntToType<6>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 6 >'], import_from_module='ns.core') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] module.add_class('Ipv4Address', import_from_module='ns.network') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress [class] module.add_class('Ipv4InterfaceAddress', import_from_module='ns.internet') ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e [enumeration] module.add_enum('InterfaceAddressScope_e', ['HOST', 'LINK', 'GLOBAL'], outer_class=root_module['ns3::Ipv4InterfaceAddress'], import_from_module='ns.internet') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class] module.add_class('Ipv4Mask', import_from_module='ns.network') ## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper [class] module.add_class('Ipv4RoutingHelper', allow_subclassing=True, import_from_module='ns.internet') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] module.add_class('Ipv6Address', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class] module.add_class('Ipv6Prefix', import_from_module='ns.network') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] module.add_class('Mac48Address', import_from_module='ns.network') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address']) ## node-container.h (module 'network'): ns3::NodeContainer [class] module.add_class('NodeContainer', import_from_module='ns.network') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## object-factory.h (module 'core'): ns3::ObjectFactory [class] module.add_class('ObjectFactory', import_from_module='ns.core') ## packet-metadata.h (module 'network'): ns3::PacketMetadata [class] module.add_class('PacketMetadata', import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration] module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class] module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet.h (module 'network'): ns3::PacketTagIterator [class] module.add_class('PacketTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::PacketTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList [class] module.add_class('PacketTagList', import_from_module='ns.network') ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct] module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simulator.h (module 'core'): ns3::Simulator [class] module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core') ## tag.h (module 'network'): ns3::Tag [class] module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## tag-buffer.h (module 'network'): ns3::TagBuffer [class] module.add_class('TagBuffer', import_from_module='ns.network') ## timer.h (module 'core'): ns3::Timer [class] module.add_class('Timer', import_from_module='ns.core') ## timer.h (module 'core'): ns3::Timer::DestroyPolicy [enumeration] module.add_enum('DestroyPolicy', ['CANCEL_ON_DESTROY', 'REMOVE_ON_DESTROY', 'CHECK_ON_DESTROY'], outer_class=root_module['ns3::Timer'], import_from_module='ns.core') ## timer.h (module 'core'): ns3::Timer::State [enumeration] module.add_enum('State', ['RUNNING', 'EXPIRED', 'SUSPENDED'], outer_class=root_module['ns3::Timer'], import_from_module='ns.core') ## timer-impl.h (module 'core'): ns3::TimerImpl [class] module.add_class('TimerImpl', allow_subclassing=True, import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t [class] module.add_class('int64x64_t', import_from_module='ns.core') ## aodv-helper.h (module 'aodv'): ns3::AodvHelper [class] module.add_class('AodvHelper', parent=root_module['ns3::Ipv4RoutingHelper']) ## chunk.h (module 'network'): ns3::Chunk [class] module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## header.h (module 'network'): ns3::Header [class] module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header [class] module.add_class('Ipv4Header', import_from_module='ns.internet', parent=root_module['ns3::Header']) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::DscpType [enumeration] module.add_enum('DscpType', ['DscpDefault', 'CS1', 'AF11', 'AF12', 'AF13', 'CS2', 'AF21', 'AF22', 'AF23', 'CS3', 'AF31', 'AF32', 'AF33', 'CS4', 'AF41', 'AF42', 'AF43', 'CS5', 'EF', 'CS6', 'CS7'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet') ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::EcnType [enumeration] module.add_enum('EcnType', ['NotECT', 'ECT1', 'ECT0', 'CE'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet') ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4MulticastRoute', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4MulticastRoute>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4Route', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4Route>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## socket.h (module 'network'): ns3::Socket [class] module.add_class('Socket', import_from_module='ns.network', parent=root_module['ns3::Object']) ## socket.h (module 'network'): ns3::Socket::SocketErrno [enumeration] module.add_enum('SocketErrno', ['ERROR_NOTERROR', 'ERROR_ISCONN', 'ERROR_NOTCONN', 'ERROR_MSGSIZE', 'ERROR_AGAIN', 'ERROR_SHUTDOWN', 'ERROR_OPNOTSUPP', 'ERROR_AFNOSUPPORT', 'ERROR_INVAL', 'ERROR_BADF', 'ERROR_NOROUTETOHOST', 'ERROR_NODEV', 'ERROR_ADDRNOTAVAIL', 'ERROR_ADDRINUSE', 'SOCKET_ERRNO_LAST'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network') ## socket.h (module 'network'): ns3::Socket::SocketType [enumeration] module.add_enum('SocketType', ['NS3_SOCK_STREAM', 'NS3_SOCK_SEQPACKET', 'NS3_SOCK_DGRAM', 'NS3_SOCK_RAW'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network') ## socket.h (module 'network'): ns3::SocketAddressTag [class] module.add_class('SocketAddressTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketIpTtlTag [class] module.add_class('SocketIpTtlTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag [class] module.add_class('SocketSetDontFragmentTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## nstime.h (module 'core'): ns3::Time [class] module.add_class('Time', import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time::Unit [enumeration] module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time [class] root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t']) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## trailer.h (module 'network'): ns3::Trailer [class] module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## wifi-mac-header.h (module 'wifi'): ns3::WifiMacHeader [class] module.add_class('WifiMacHeader', import_from_module='ns.wifi', parent=root_module['ns3::Header']) ## wifi-mac-header.h (module 'wifi'): ns3::WifiMacHeader::QosAckPolicy [enumeration] module.add_enum('QosAckPolicy', ['NORMAL_ACK', 'NO_ACK', 'NO_EXPLICIT_ACK', 'BLOCK_ACK'], outer_class=root_module['ns3::WifiMacHeader'], import_from_module='ns.wifi') ## wifi-mac-header.h (module 'wifi'): ns3::WifiMacHeader::AddressType [enumeration] module.add_enum('AddressType', ['ADDR1', 'ADDR2', 'ADDR3', 'ADDR4'], outer_class=root_module['ns3::WifiMacHeader'], import_from_module='ns.wifi') ## arp-cache.h (module 'internet'): ns3::ArpCache [class] module.add_class('ArpCache', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## arp-cache.h (module 'internet'): ns3::ArpCache::Entry [class] module.add_class('Entry', import_from_module='ns.internet', outer_class=root_module['ns3::ArpCache']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## enum.h (module 'core'): ns3::EnumChecker [class] module.add_class('EnumChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## enum.h (module 'core'): ns3::EnumValue [class] module.add_class('EnumValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## event-impl.h (module 'core'): ns3::EventImpl [class] module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) ## ipv4.h (module 'internet'): ns3::Ipv4 [class] module.add_class('Ipv4', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class] module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class] module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-interface.h (module 'internet'): ns3::Ipv4Interface [class] module.add_class('Ipv4Interface', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol [class] module.add_class('Ipv4L3Protocol', import_from_module='ns.internet', parent=root_module['ns3::Ipv4']) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol::DropReason [enumeration] module.add_enum('DropReason', ['DROP_TTL_EXPIRED', 'DROP_NO_ROUTE', 'DROP_BAD_CHECKSUM', 'DROP_INTERFACE_DOWN', 'DROP_ROUTE_ERROR', 'DROP_FRAGMENT_TIMEOUT'], outer_class=root_module['ns3::Ipv4L3Protocol'], import_from_module='ns.internet') ## ipv4-l4-protocol.h (module 'internet'): ns3::Ipv4L4Protocol [class] module.add_class('Ipv4L4Protocol', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ipv4-l4-protocol.h (module 'internet'): ns3::Ipv4L4Protocol::RxStatus [enumeration] module.add_enum('RxStatus', ['RX_OK', 'RX_CSUM_FAILED', 'RX_ENDPOINT_CLOSED', 'RX_ENDPOINT_UNREACH'], outer_class=root_module['ns3::Ipv4L4Protocol'], import_from_module='ns.internet') ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class] module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class] module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute [class] module.add_class('Ipv4MulticastRoute', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >']) ## ipv4-route.h (module 'internet'): ns3::Ipv4Route [class] module.add_class('Ipv4Route', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >']) ## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol [class] module.add_class('Ipv4RoutingProtocol', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class] module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class] module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class] module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class] module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class] module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class] module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## net-device.h (module 'network'): ns3::NetDevice [class] module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object']) ## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration] module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network') ## nix-vector.h (module 'network'): ns3::NixVector [class] module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) ## node.h (module 'network'): ns3::Node [class] module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object']) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class] module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class] module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class] module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) ## packet.h (module 'network'): ns3::Packet [class] module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) ## nstime.h (module 'core'): ns3::TimeChecker [class] module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## nstime.h (module 'core'): ns3::TimeValue [class] module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## address.h (module 'network'): ns3::AddressChecker [class] module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## address.h (module 'network'): ns3::AddressValue [class] module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) module.add_container('std::list< ns3::Ptr< ns3::Packet > >', 'ns3::Ptr< ns3::Packet >', container_type='list') module.add_container('std::map< unsigned int, unsigned int >', ('unsigned int', 'unsigned int'), container_type='map') ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) ## Register a nested module for the namespace aodv nested_module = module.add_cpp_namespace('aodv') register_types_ns3_aodv(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_types_ns3_aodv(module): root_module = module.get_root() ## aodv-packet.h (module 'aodv'): ns3::aodv::MessageType [enumeration] module.add_enum('MessageType', ['AODVTYPE_RREQ', 'AODVTYPE_RREP', 'AODVTYPE_RERR', 'AODVTYPE_RREP_ACK']) ## aodv-rtable.h (module 'aodv'): ns3::aodv::RouteFlags [enumeration] module.add_enum('RouteFlags', ['VALID', 'INVALID', 'IN_SEARCH']) ## aodv-dpd.h (module 'aodv'): ns3::aodv::DuplicatePacketDetection [class] module.add_class('DuplicatePacketDetection') ## aodv-id-cache.h (module 'aodv'): ns3::aodv::IdCache [class] module.add_class('IdCache') ## aodv-neighbor.h (module 'aodv'): ns3::aodv::Neighbors [class] module.add_class('Neighbors') ## aodv-neighbor.h (module 'aodv'): ns3::aodv::Neighbors::Neighbor [struct] module.add_class('Neighbor', outer_class=root_module['ns3::aodv::Neighbors']) ## aodv-rqueue.h (module 'aodv'): ns3::aodv::QueueEntry [class] module.add_class('QueueEntry') ## aodv-rqueue.h (module 'aodv'): ns3::aodv::RequestQueue [class] module.add_class('RequestQueue') ## aodv-packet.h (module 'aodv'): ns3::aodv::RerrHeader [class] module.add_class('RerrHeader', parent=root_module['ns3::Header']) ## aodv-routing-protocol.h (module 'aodv'): ns3::aodv::RoutingProtocol [class] module.add_class('RoutingProtocol', parent=root_module['ns3::Ipv4RoutingProtocol']) ## aodv-rtable.h (module 'aodv'): ns3::aodv::RoutingTable [class] module.add_class('RoutingTable') ## aodv-rtable.h (module 'aodv'): ns3::aodv::RoutingTableEntry [class] module.add_class('RoutingTableEntry') ## aodv-packet.h (module 'aodv'): ns3::aodv::RrepAckHeader [class] module.add_class('RrepAckHeader', parent=root_module['ns3::Header']) ## aodv-packet.h (module 'aodv'): ns3::aodv::RrepHeader [class] module.add_class('RrepHeader', parent=root_module['ns3::Header']) ## aodv-packet.h (module 'aodv'): ns3::aodv::RreqHeader [class] module.add_class('RreqHeader', parent=root_module['ns3::Header']) ## aodv-packet.h (module 'aodv'): ns3::aodv::TypeHeader [class] module.add_class('TypeHeader', parent=root_module['ns3::Header']) module.add_container('std::map< ns3::Ipv4Address, unsigned int >', ('ns3::Ipv4Address', 'unsigned int'), container_type='map') module.add_container('std::vector< ns3::Ipv4Address >', 'ns3::Ipv4Address', container_type='vector') def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer']) register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator']) register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator']) register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item']) register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList']) register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator']) register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3EventId_methods(root_module, root_module['ns3::EventId']) register_Ns3IntToType__0_methods(root_module, root_module['ns3::IntToType< 0 >']) register_Ns3IntToType__1_methods(root_module, root_module['ns3::IntToType< 1 >']) register_Ns3IntToType__2_methods(root_module, root_module['ns3::IntToType< 2 >']) register_Ns3IntToType__3_methods(root_module, root_module['ns3::IntToType< 3 >']) register_Ns3IntToType__4_methods(root_module, root_module['ns3::IntToType< 4 >']) register_Ns3IntToType__5_methods(root_module, root_module['ns3::IntToType< 5 >']) register_Ns3IntToType__6_methods(root_module, root_module['ns3::IntToType< 6 >']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4InterfaceAddress_methods(root_module, root_module['ns3::Ipv4InterfaceAddress']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv4RoutingHelper_methods(root_module, root_module['ns3::Ipv4RoutingHelper']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address']) register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata']) register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item']) register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator']) register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator']) register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item']) register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList']) register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator']) register_Ns3Tag_methods(root_module, root_module['ns3::Tag']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3Timer_methods(root_module, root_module['ns3::Timer']) register_Ns3TimerImpl_methods(root_module, root_module['ns3::TimerImpl']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3AodvHelper_methods(root_module, root_module['ns3::AodvHelper']) register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk']) register_Ns3Header_methods(root_module, root_module['ns3::Header']) register_Ns3Ipv4Header_methods(root_module, root_module['ns3::Ipv4Header']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >']) register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >']) register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3Socket_methods(root_module, root_module['ns3::Socket']) register_Ns3SocketAddressTag_methods(root_module, root_module['ns3::SocketAddressTag']) register_Ns3SocketIpTtlTag_methods(root_module, root_module['ns3::SocketIpTtlTag']) register_Ns3SocketSetDontFragmentTag_methods(root_module, root_module['ns3::SocketSetDontFragmentTag']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer']) register_Ns3WifiMacHeader_methods(root_module, root_module['ns3::WifiMacHeader']) register_Ns3ArpCache_methods(root_module, root_module['ns3::ArpCache']) register_Ns3ArpCacheEntry_methods(root_module, root_module['ns3::ArpCache::Entry']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3EnumChecker_methods(root_module, root_module['ns3::EnumChecker']) register_Ns3EnumValue_methods(root_module, root_module['ns3::EnumValue']) register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl']) register_Ns3Ipv4_methods(root_module, root_module['ns3::Ipv4']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4Interface_methods(root_module, root_module['ns3::Ipv4Interface']) register_Ns3Ipv4L3Protocol_methods(root_module, root_module['ns3::Ipv4L3Protocol']) register_Ns3Ipv4L4Protocol_methods(root_module, root_module['ns3::Ipv4L4Protocol']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv4MulticastRoute_methods(root_module, root_module['ns3::Ipv4MulticastRoute']) register_Ns3Ipv4Route_methods(root_module, root_module['ns3::Ipv4Route']) register_Ns3Ipv4RoutingProtocol_methods(root_module, root_module['ns3::Ipv4RoutingProtocol']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker']) register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper']) register_Ns3Packet_methods(root_module, root_module['ns3::Packet']) register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3AodvDuplicatePacketDetection_methods(root_module, root_module['ns3::aodv::DuplicatePacketDetection']) register_Ns3AodvIdCache_methods(root_module, root_module['ns3::aodv::IdCache']) register_Ns3AodvNeighbors_methods(root_module, root_module['ns3::aodv::Neighbors']) register_Ns3AodvNeighborsNeighbor_methods(root_module, root_module['ns3::aodv::Neighbors::Neighbor']) register_Ns3AodvQueueEntry_methods(root_module, root_module['ns3::aodv::QueueEntry']) register_Ns3AodvRequestQueue_methods(root_module, root_module['ns3::aodv::RequestQueue']) register_Ns3AodvRerrHeader_methods(root_module, root_module['ns3::aodv::RerrHeader']) register_Ns3AodvRoutingProtocol_methods(root_module, root_module['ns3::aodv::RoutingProtocol']) register_Ns3AodvRoutingTable_methods(root_module, root_module['ns3::aodv::RoutingTable']) register_Ns3AodvRoutingTableEntry_methods(root_module, root_module['ns3::aodv::RoutingTableEntry']) register_Ns3AodvRrepAckHeader_methods(root_module, root_module['ns3::aodv::RrepAckHeader']) register_Ns3AodvRrepHeader_methods(root_module, root_module['ns3::aodv::RrepHeader']) register_Ns3AodvRreqHeader_methods(root_module, root_module['ns3::aodv::RreqHeader']) register_Ns3AodvTypeHeader_methods(root_module, root_module['ns3::aodv::TypeHeader']) return def register_Ns3Address_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('<') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## address.h (module 'network'): ns3::Address::Address() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor] cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor] cls.add_constructor([param('ns3::Address const &', 'address')]) ## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function] cls.add_method('CheckCompatible', 'bool', [param('uint8_t', 'type'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyAllFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function] cls.add_method('CopyAllTo', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'uint32_t', [param('uint8_t *', 'buffer')], is_const=True) ## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'buffer')]) ## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function] cls.add_method('GetLength', 'uint8_t', [], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function] cls.add_method('IsInvalid', 'bool', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function] cls.add_method('IsMatchingType', 'bool', [param('uint8_t', 'type')], is_const=True) ## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function] cls.add_method('Register', 'uint8_t', [], is_static=True) ## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'buffer')], is_const=True) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3Buffer_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor] cls.add_constructor([param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function] cls.add_method('AddAtEnd', 'bool', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function] cls.add_method('AddAtStart', 'bool', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function] cls.add_method('Begin', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Buffer', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function] cls.add_method('CreateFullCopy', 'ns3::Buffer', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function] cls.add_method('End', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function] cls.add_method('GetCurrentEndOffset', 'int32_t', [], is_const=True) ## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function] cls.add_method('GetCurrentStartOffset', 'int32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3BufferIterator_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function] cls.add_method('GetDistanceFrom', 'uint32_t', [param('ns3::Buffer::Iterator const &', 'o')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function] cls.add_method('IsEnd', 'bool', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function] cls.add_method('IsStart', 'bool', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function] cls.add_method('Next', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function] cls.add_method('Next', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function] cls.add_method('Prev', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function] cls.add_method('Prev', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function] cls.add_method('ReadLsbtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function] cls.add_method('ReadLsbtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function] cls.add_method('ReadLsbtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function] cls.add_method('ReadNtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function] cls.add_method('ReadNtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function] cls.add_method('ReadNtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function] cls.add_method('Write', 'void', [param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function] cls.add_method('WriteHtolsbU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function] cls.add_method('WriteHtolsbU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function] cls.add_method('WriteHtolsbU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function] cls.add_method('WriteHtonU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function] cls.add_method('WriteHtonU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function] cls.add_method('WriteHtonU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data'), param('uint32_t', 'len')]) return def register_Ns3ByteTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagIterator::Item', []) return def register_Ns3ByteTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function] cls.add_method('GetEnd', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function] cls.add_method('GetStart', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3ByteTagList_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor] cls.add_constructor([]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor] cls.add_constructor([param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function] cls.add_method('Add', 'ns3::TagBuffer', [param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function] cls.add_method('Add', 'void', [param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function] cls.add_method('AddAtEnd', 'void', [param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function] cls.add_method('AddAtStart', 'void', [param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function] cls.add_method('Begin', 'ns3::ByteTagList::Iterator', [param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')], is_const=True) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3ByteTagListIterator_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')]) ## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function] cls.add_method('GetOffsetStart', 'uint32_t', [], is_const=True) ## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagList::Iterator::Item', []) return def register_Ns3ByteTagListIteratorItem_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor] cls.add_constructor([param('ns3::TagBuffer', 'buf')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable] cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable] cls.add_instance_attribute('end', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable] cls.add_instance_attribute('size', 'uint32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable] cls.add_instance_attribute('start', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') ## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3EventId_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('==') ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventId const &', 'arg0')]) ## event-id.h (module 'core'): ns3::EventId::EventId() [constructor] cls.add_constructor([]) ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')]) ## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function] cls.add_method('GetContext', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function] cls.add_method('GetTs', 'uint64_t', [], is_const=True) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function] cls.add_method('GetUid', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function] cls.add_method('IsExpired', 'bool', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function] cls.add_method('IsRunning', 'bool', [], is_const=True) ## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function] cls.add_method('PeekEventImpl', 'ns3::EventImpl *', [], is_const=True) return def register_Ns3IntToType__0_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<0>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<0>::IntToType(ns3::IntToType<0> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 0 > const &', 'arg0')]) return def register_Ns3IntToType__1_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<1>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<1>::IntToType(ns3::IntToType<1> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 1 > const &', 'arg0')]) return def register_Ns3IntToType__2_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<2>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<2>::IntToType(ns3::IntToType<2> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 2 > const &', 'arg0')]) return def register_Ns3IntToType__3_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<3>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<3>::IntToType(ns3::IntToType<3> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 3 > const &', 'arg0')]) return def register_Ns3IntToType__4_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<4>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<4>::IntToType(ns3::IntToType<4> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 4 > const &', 'arg0')]) return def register_Ns3IntToType__5_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<5>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<5>::IntToType(ns3::IntToType<5> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 5 > const &', 'arg0')]) return def register_Ns3IntToType__6_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<6>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<6>::IntToType(ns3::IntToType<6> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 6 > const &', 'arg0')]) return def register_Ns3Ipv4Address_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('<') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor] cls.add_constructor([param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('CombineMask', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv4Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv4Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('GetSubnetDirectedBroadcast', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Address const &', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function] cls.add_method('IsLocalMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('IsSubnetDirectedBroadcast', 'bool', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) return def register_Ns3Ipv4InterfaceAddress_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress() [constructor] cls.add_constructor([]) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4Address local, ns3::Ipv4Mask mask) [constructor] cls.add_constructor([param('ns3::Ipv4Address', 'local'), param('ns3::Ipv4Mask', 'mask')]) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4InterfaceAddress const & o) [copy constructor] cls.add_constructor([param('ns3::Ipv4InterfaceAddress const &', 'o')]) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetLocal() const [member function] cls.add_method('GetLocal', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Mask ns3::Ipv4InterfaceAddress::GetMask() const [member function] cls.add_method('GetMask', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e ns3::Ipv4InterfaceAddress::GetScope() const [member function] cls.add_method('GetScope', 'ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): bool ns3::Ipv4InterfaceAddress::IsSecondary() const [member function] cls.add_method('IsSecondary', 'bool', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetBroadcast(ns3::Ipv4Address broadcast) [member function] cls.add_method('SetBroadcast', 'void', [param('ns3::Ipv4Address', 'broadcast')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetLocal(ns3::Ipv4Address local) [member function] cls.add_method('SetLocal', 'void', [param('ns3::Ipv4Address', 'local')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetMask(ns3::Ipv4Mask mask) [member function] cls.add_method('SetMask', 'void', [param('ns3::Ipv4Mask', 'mask')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetPrimary() [member function] cls.add_method('SetPrimary', 'void', []) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetScope(ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function] cls.add_method('SetScope', 'void', [param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetSecondary() [member function] cls.add_method('SetSecondary', 'void', []) return def register_Ns3Ipv4Mask_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor] cls.add_constructor([param('uint32_t', 'mask')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor] cls.add_constructor([param('char const *', 'mask')]) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function] cls.add_method('GetInverse', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint16_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Mask', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'mask')]) return def register_Ns3Ipv4RoutingHelper_methods(root_module, cls): ## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper::Ipv4RoutingHelper() [constructor] cls.add_constructor([]) ## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper::Ipv4RoutingHelper(ns3::Ipv4RoutingHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4RoutingHelper const &', 'arg0')]) ## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper * ns3::Ipv4RoutingHelper::Copy() const [member function] cls.add_method('Copy', 'ns3::Ipv4RoutingHelper *', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4-routing-helper.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4RoutingHelper::Create(ns3::Ptr<ns3::Node> node) const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Ipv4RoutingProtocol >', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4-routing-helper.h (module 'internet'): void ns3::Ipv4RoutingHelper::PrintRoutingTableAllAt(ns3::Time printTime, ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function] cls.add_method('PrintRoutingTableAllAt', 'void', [param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_const=True) ## ipv4-routing-helper.h (module 'internet'): void ns3::Ipv4RoutingHelper::PrintRoutingTableAllEvery(ns3::Time printInterval, ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function] cls.add_method('PrintRoutingTableAllEvery', 'void', [param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_const=True) ## ipv4-routing-helper.h (module 'internet'): void ns3::Ipv4RoutingHelper::PrintRoutingTableAt(ns3::Time printTime, ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function] cls.add_method('PrintRoutingTableAt', 'void', [param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_const=True) ## ipv4-routing-helper.h (module 'internet'): void ns3::Ipv4RoutingHelper::PrintRoutingTableEvery(ns3::Time printInterval, ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function] cls.add_method('PrintRoutingTableEvery', 'void', [param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_const=True) return def register_Ns3Ipv6Address_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('<') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor] cls.add_constructor([param('uint8_t *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor] cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function] cls.add_method('CombinePrefix', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv6Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv6Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function] cls.add_method('GetAllHostsMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function] cls.add_method('GetAllNodesMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function] cls.add_method('GetAllRoutersMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function] cls.add_method('IsAllHostsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function] cls.add_method('IsAllNodesMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function] cls.add_method('IsAllRoutersMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Address const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function] cls.add_method('IsLinkLocal', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function] cls.add_method('IsSolicitedMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function] cls.add_method('MakeSolicitedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function] cls.add_method('Set', 'void', [param('uint8_t *', 'address')]) return def register_Ns3Ipv6Prefix_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor] cls.add_constructor([param('uint8_t *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor] cls.add_constructor([param('char const *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor] cls.add_constructor([param('uint8_t', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint8_t', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Prefix const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) return def register_Ns3Mac48Address_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('<') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor] cls.add_constructor([param('char const *', 'str')]) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function] cls.add_method('Allocate', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Mac48Address', [param('ns3::Address const &', 'address')], is_static=True) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function] cls.add_method('CopyFrom', 'void', [param('uint8_t const *', 'buffer')]) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'void', [param('uint8_t *', 'buffer')], is_const=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv4Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv6Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function] cls.add_method('GetMulticast6Prefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function] cls.add_method('GetMulticastPrefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function] cls.add_method('IsGroup', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) return def register_Ns3NodeContainer_methods(root_module, cls): ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor] cls.add_constructor([]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor] cls.add_constructor([param('std::string', 'nodeName')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::NodeContainer', 'other')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function] cls.add_method('Add', 'void', [param('std::string', 'nodeName')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n'), param('uint32_t', 'systemId')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::Node >', [param('uint32_t', 'i')], is_const=True) ## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function] cls.add_method('GetGlobal', 'ns3::NodeContainer', [], is_static=True) ## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3ObjectFactory_methods(root_module, cls): cls.add_output_stream_operator() ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor] cls.add_constructor([param('std::string', 'typeId')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Object >', [], is_const=True) ## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) ## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function] cls.add_method('SetTypeId', 'void', [param('ns3::TypeId', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function] cls.add_method('SetTypeId', 'void', [param('char const *', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function] cls.add_method('SetTypeId', 'void', [param('std::string', 'tid')]) return def register_Ns3PacketMetadata_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor] cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [param('ns3::Buffer', 'buffer')], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function] cls.add_method('CreateFragment', 'ns3::PacketMetadata', [param('uint32_t', 'start'), param('uint32_t', 'end')], is_const=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function] cls.add_method('Enable', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('RemoveHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('RemoveTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3PacketMetadataItem_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor] cls.add_constructor([]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable] cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable] cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable] cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable] cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable] cls.add_instance_attribute('isFragment', 'bool', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3PacketMetadataItemIterator_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor] cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')]) ## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketMetadata::Item', []) return def register_Ns3PacketTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketTagIterator::Item', []) return def register_Ns3PacketTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3PacketTagList_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor] cls.add_constructor([param('ns3::PacketTagList const &', 'o')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function] cls.add_method('Add', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function] cls.add_method('Head', 'ns3::PacketTagList::TagData const *', [], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function] cls.add_method('Peek', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function] cls.add_method('Remove', 'bool', [param('ns3::Tag &', 'tag')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3PacketTagListTagData_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable] cls.add_instance_attribute('count', 'uint32_t', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable] cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable] cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Simulator_methods(root_module, cls): ## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Simulator const &', 'arg0')]) ## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function] cls.add_method('Cancel', 'void', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function] cls.add_method('Destroy', 'void', [], is_static=True) ## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function] cls.add_method('GetContext', 'uint32_t', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function] cls.add_method('GetDelayLeft', 'ns3::Time', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function] cls.add_method('GetImplementation', 'ns3::Ptr< ns3::SimulatorImpl >', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function] cls.add_method('GetMaximumSimulationTime', 'ns3::Time', [], is_static=True) ## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_static=True) ## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function] cls.add_method('IsExpired', 'bool', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function] cls.add_method('IsFinished', 'bool', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Next() [member function] cls.add_method('Next', 'ns3::Time', [], is_static=True, deprecated=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function] cls.add_method('Now', 'ns3::Time', [], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function] cls.add_method('Remove', 'void', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::RunOneEvent() [member function] cls.add_method('RunOneEvent', 'void', [], is_static=True, deprecated=True) ## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function] cls.add_method('SetImplementation', 'void', [param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function] cls.add_method('SetScheduler', 'void', [param('ns3::ObjectFactory', 'schedulerFactory')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function] cls.add_method('Stop', 'void', [], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & time) [member function] cls.add_method('Stop', 'void', [param('ns3::Time const &', 'time')], is_static=True) return def register_Ns3Tag_methods(root_module, cls): ## tag.h (module 'network'): ns3::Tag::Tag() [constructor] cls.add_constructor([]) ## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor] cls.add_constructor([param('ns3::Tag const &', 'arg0')]) ## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_virtual=True) ## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3TagBuffer_methods(root_module, cls): ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor] cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')]) ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor] cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function] cls.add_method('CopyFrom', 'void', [param('ns3::TagBuffer', 'o')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function] cls.add_method('ReadDouble', 'double', []) ## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function] cls.add_method('TrimAtEnd', 'void', [param('uint32_t', 'trim')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function] cls.add_method('WriteDouble', 'void', [param('double', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'v')]) return def register_Ns3Timer_methods(root_module, cls): ## timer.h (module 'core'): ns3::Timer::Timer(ns3::Timer const & arg0) [copy constructor] cls.add_constructor([param('ns3::Timer const &', 'arg0')]) ## timer.h (module 'core'): ns3::Timer::Timer() [constructor] cls.add_constructor([]) ## timer.h (module 'core'): ns3::Timer::Timer(ns3::Timer::DestroyPolicy destroyPolicy) [constructor] cls.add_constructor([param('ns3::Timer::DestroyPolicy', 'destroyPolicy')]) ## timer.h (module 'core'): void ns3::Timer::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## timer.h (module 'core'): ns3::Time ns3::Timer::GetDelay() const [member function] cls.add_method('GetDelay', 'ns3::Time', [], is_const=True) ## timer.h (module 'core'): ns3::Time ns3::Timer::GetDelayLeft() const [member function] cls.add_method('GetDelayLeft', 'ns3::Time', [], is_const=True) ## timer.h (module 'core'): ns3::Timer::State ns3::Timer::GetState() const [member function] cls.add_method('GetState', 'ns3::Timer::State', [], is_const=True) ## timer.h (module 'core'): bool ns3::Timer::IsExpired() const [member function] cls.add_method('IsExpired', 'bool', [], is_const=True) ## timer.h (module 'core'): bool ns3::Timer::IsRunning() const [member function] cls.add_method('IsRunning', 'bool', [], is_const=True) ## timer.h (module 'core'): bool ns3::Timer::IsSuspended() const [member function] cls.add_method('IsSuspended', 'bool', [], is_const=True) ## timer.h (module 'core'): void ns3::Timer::Remove() [member function] cls.add_method('Remove', 'void', []) ## timer.h (module 'core'): void ns3::Timer::Resume() [member function] cls.add_method('Resume', 'void', []) ## timer.h (module 'core'): void ns3::Timer::Schedule() [member function] cls.add_method('Schedule', 'void', []) ## timer.h (module 'core'): void ns3::Timer::Schedule(ns3::Time delay) [member function] cls.add_method('Schedule', 'void', [param('ns3::Time', 'delay')]) ## timer.h (module 'core'): void ns3::Timer::SetDelay(ns3::Time const & delay) [member function] cls.add_method('SetDelay', 'void', [param('ns3::Time const &', 'delay')]) ## timer.h (module 'core'): void ns3::Timer::Suspend() [member function] cls.add_method('Suspend', 'void', []) return def register_Ns3TimerImpl_methods(root_module, cls): ## timer-impl.h (module 'core'): ns3::TimerImpl::TimerImpl() [constructor] cls.add_constructor([]) ## timer-impl.h (module 'core'): ns3::TimerImpl::TimerImpl(ns3::TimerImpl const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimerImpl const &', 'arg0')]) ## timer-impl.h (module 'core'): void ns3::TimerImpl::Invoke() [member function] cls.add_method('Invoke', 'void', [], is_pure_virtual=True, is_virtual=True) ## timer-impl.h (module 'core'): ns3::EventId ns3::TimerImpl::Schedule(ns3::Time const & delay) [member function] cls.add_method('Schedule', 'ns3::EventId', [param('ns3::Time const &', 'delay')], is_pure_virtual=True, is_virtual=True) return def register_Ns3TypeId_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('<') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor] cls.add_constructor([param('char const *', 'name')]) ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor] cls.add_constructor([param('ns3::TypeId const &', 'o')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function] cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function] cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function] cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function] cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function] cls.add_method('GetGroupName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function] cls.add_method('GetName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function] cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function] cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True) ## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function] cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function] cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function] cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function] cls.add_method('GetUid', 'uint16_t', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function] cls.add_method('HasConstructor', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function] cls.add_method('HasParent', 'bool', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function] cls.add_method('HideFromDocumentation', 'ns3::TypeId', []) ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function] cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function] cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info')], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function] cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function] cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function] cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function] cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function] cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')]) ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function] cls.add_method('SetUid', 'void', [param('uint16_t', 'tid')]) return def register_Ns3TypeIdAttributeInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable] cls.add_instance_attribute('flags', 'uint32_t', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable] cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable] cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) return def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) return def register_Ns3Empty_methods(root_module, cls): ## empty.h (module 'core'): ns3::empty::empty() [constructor] cls.add_constructor([]) ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor] cls.add_constructor([param('ns3::empty const &', 'arg0')]) return def register_Ns3Int64x64_t_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_unary_numeric_operator('-') cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor] cls.add_constructor([]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor] cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function] cls.add_method('GetHigh', 'int64_t', [], is_const=True) ## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function] cls.add_method('GetLow', 'uint64_t', [], is_const=True) ## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function] cls.add_method('Invert', 'ns3::int64x64_t', [param('uint64_t', 'v')], is_static=True) ## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function] cls.add_method('MulByInvert', 'void', [param('ns3::int64x64_t const &', 'o')]) return def register_Ns3AodvHelper_methods(root_module, cls): ## aodv-helper.h (module 'aodv'): ns3::AodvHelper::AodvHelper(ns3::AodvHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::AodvHelper const &', 'arg0')]) ## aodv-helper.h (module 'aodv'): ns3::AodvHelper::AodvHelper() [constructor] cls.add_constructor([]) ## aodv-helper.h (module 'aodv'): ns3::AodvHelper * ns3::AodvHelper::Copy() const [member function] cls.add_method('Copy', 'ns3::AodvHelper *', [], is_const=True, is_virtual=True) ## aodv-helper.h (module 'aodv'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::AodvHelper::Create(ns3::Ptr<ns3::Node> node) const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Ipv4RoutingProtocol >', [param('ns3::Ptr< ns3::Node >', 'node')], is_const=True, is_virtual=True) ## aodv-helper.h (module 'aodv'): void ns3::AodvHelper::Set(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) return def register_Ns3Chunk_methods(root_module, cls): ## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor] cls.add_constructor([]) ## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor] cls.add_constructor([param('ns3::Chunk const &', 'arg0')]) ## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Header_methods(root_module, cls): cls.add_output_stream_operator() ## header.h (module 'network'): ns3::Header::Header() [constructor] cls.add_constructor([]) ## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor] cls.add_constructor([param('ns3::Header const &', 'arg0')]) ## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Ipv4Header_methods(root_module, cls): ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::Ipv4Header(ns3::Ipv4Header const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Header const &', 'arg0')]) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::Ipv4Header() [constructor] cls.add_constructor([]) ## ipv4-header.h (module 'internet'): uint32_t ns3::Ipv4Header::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## ipv4-header.h (module 'internet'): std::string ns3::Ipv4Header::DscpTypeToString(ns3::Ipv4Header::DscpType dscp) const [member function] cls.add_method('DscpTypeToString', 'std::string', [param('ns3::Ipv4Header::DscpType', 'dscp')], is_const=True) ## ipv4-header.h (module 'internet'): std::string ns3::Ipv4Header::EcnTypeToString(ns3::Ipv4Header::EcnType ecn) const [member function] cls.add_method('EcnTypeToString', 'std::string', [param('ns3::Ipv4Header::EcnType', 'ecn')], is_const=True) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::EnableChecksum() [member function] cls.add_method('EnableChecksum', 'void', []) ## ipv4-header.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Header::GetDestination() const [member function] cls.add_method('GetDestination', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::DscpType ns3::Ipv4Header::GetDscp() const [member function] cls.add_method('GetDscp', 'ns3::Ipv4Header::DscpType', [], is_const=True) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::EcnType ns3::Ipv4Header::GetEcn() const [member function] cls.add_method('GetEcn', 'ns3::Ipv4Header::EcnType', [], is_const=True) ## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetFragmentOffset() const [member function] cls.add_method('GetFragmentOffset', 'uint16_t', [], is_const=True) ## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetIdentification() const [member function] cls.add_method('GetIdentification', 'uint16_t', [], is_const=True) ## ipv4-header.h (module 'internet'): ns3::TypeId ns3::Ipv4Header::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetPayloadSize() const [member function] cls.add_method('GetPayloadSize', 'uint16_t', [], is_const=True) ## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetProtocol() const [member function] cls.add_method('GetProtocol', 'uint8_t', [], is_const=True) ## ipv4-header.h (module 'internet'): uint32_t ns3::Ipv4Header::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## ipv4-header.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Header::GetSource() const [member function] cls.add_method('GetSource', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetTos() const [member function] cls.add_method('GetTos', 'uint8_t', [], is_const=True) ## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetTtl() const [member function] cls.add_method('GetTtl', 'uint8_t', [], is_const=True) ## ipv4-header.h (module 'internet'): static ns3::TypeId ns3::Ipv4Header::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsChecksumOk() const [member function] cls.add_method('IsChecksumOk', 'bool', [], is_const=True) ## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsDontFragment() const [member function] cls.add_method('IsDontFragment', 'bool', [], is_const=True) ## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsLastFragment() const [member function] cls.add_method('IsLastFragment', 'bool', [], is_const=True) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDestination(ns3::Ipv4Address destination) [member function] cls.add_method('SetDestination', 'void', [param('ns3::Ipv4Address', 'destination')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDontFragment() [member function] cls.add_method('SetDontFragment', 'void', []) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDscp(ns3::Ipv4Header::DscpType dscp) [member function] cls.add_method('SetDscp', 'void', [param('ns3::Ipv4Header::DscpType', 'dscp')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetEcn(ns3::Ipv4Header::EcnType ecn) [member function] cls.add_method('SetEcn', 'void', [param('ns3::Ipv4Header::EcnType', 'ecn')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetFragmentOffset(uint16_t offsetBytes) [member function] cls.add_method('SetFragmentOffset', 'void', [param('uint16_t', 'offsetBytes')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetIdentification(uint16_t identification) [member function] cls.add_method('SetIdentification', 'void', [param('uint16_t', 'identification')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetLastFragment() [member function] cls.add_method('SetLastFragment', 'void', []) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetMayFragment() [member function] cls.add_method('SetMayFragment', 'void', []) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetMoreFragments() [member function] cls.add_method('SetMoreFragments', 'void', []) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetPayloadSize(uint16_t size) [member function] cls.add_method('SetPayloadSize', 'void', [param('uint16_t', 'size')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetProtocol(uint8_t num) [member function] cls.add_method('SetProtocol', 'void', [param('uint8_t', 'num')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetSource(ns3::Ipv4Address source) [member function] cls.add_method('SetSource', 'void', [param('ns3::Ipv4Address', 'source')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetTos(uint8_t tos) [member function] cls.add_method('SetTos', 'void', [param('uint8_t', 'tos')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetTtl(uint8_t ttl) [member function] cls.add_method('SetTtl', 'void', [param('uint8_t', 'ttl')]) return def register_Ns3Object_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::Object() [constructor] cls.add_constructor([]) ## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function] cls.add_method('AggregateObject', 'void', [param('ns3::Ptr< ns3::Object >', 'other')]) ## object.h (module 'core'): void ns3::Object::Dispose() [member function] cls.add_method('Dispose', 'void', []) ## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function] cls.add_method('GetAggregateIterator', 'ns3::Object::AggregateIterator', [], is_const=True) ## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object.h (module 'core'): void ns3::Object::Start() [member function] cls.add_method('Start', 'void', []) ## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor] cls.add_constructor([param('ns3::Object const &', 'o')], visibility='protected') ## object.h (module 'core'): void ns3::Object::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::DoStart() [member function] cls.add_method('DoStart', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function] cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectAggregateIterator_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')]) ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor] cls.add_constructor([]) ## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function] cls.add_method('Next', 'ns3::Ptr< ns3::Object const >', []) return def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4MulticastRoute > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4Route > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Socket_methods(root_module, cls): ## socket.h (module 'network'): ns3::Socket::Socket(ns3::Socket const & arg0) [copy constructor] cls.add_constructor([param('ns3::Socket const &', 'arg0')]) ## socket.h (module 'network'): ns3::Socket::Socket() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): int ns3::Socket::Bind(ns3::Address const & address) [member function] cls.add_method('Bind', 'int', [param('ns3::Address const &', 'address')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Bind() [member function] cls.add_method('Bind', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::BindToNetDevice(ns3::Ptr<ns3::NetDevice> netdevice) [member function] cls.add_method('BindToNetDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'netdevice')], is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Close() [member function] cls.add_method('Close', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Connect(ns3::Address const & address) [member function] cls.add_method('Connect', 'int', [param('ns3::Address const &', 'address')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): static ns3::Ptr<ns3::Socket> ns3::Socket::CreateSocket(ns3::Ptr<ns3::Node> node, ns3::TypeId tid) [member function] cls.add_method('CreateSocket', 'ns3::Ptr< ns3::Socket >', [param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::TypeId', 'tid')], is_static=True) ## socket.h (module 'network'): bool ns3::Socket::GetAllowBroadcast() const [member function] cls.add_method('GetAllowBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Socket::GetBoundNetDevice() [member function] cls.add_method('GetBoundNetDevice', 'ns3::Ptr< ns3::NetDevice >', []) ## socket.h (module 'network'): ns3::Socket::SocketErrno ns3::Socket::GetErrno() const [member function] cls.add_method('GetErrno', 'ns3::Socket::SocketErrno', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Node> ns3::Socket::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::Socket::GetRxAvailable() const [member function] cls.add_method('GetRxAvailable', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::GetSockName(ns3::Address & address) const [member function] cls.add_method('GetSockName', 'int', [param('ns3::Address &', 'address')], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): ns3::Socket::SocketType ns3::Socket::GetSocketType() const [member function] cls.add_method('GetSocketType', 'ns3::Socket::SocketType', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::Socket::GetTxAvailable() const [member function] cls.add_method('GetTxAvailable', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Listen() [member function] cls.add_method('Listen', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv(uint32_t maxSize, uint32_t flags) [member function] cls.add_method('Recv', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'maxSize'), param('uint32_t', 'flags')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv() [member function] cls.add_method('Recv', 'ns3::Ptr< ns3::Packet >', []) ## socket.h (module 'network'): int ns3::Socket::Recv(uint8_t * buf, uint32_t size, uint32_t flags) [member function] cls.add_method('Recv', 'int', [param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')]) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(uint32_t maxSize, uint32_t flags, ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'maxSize'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'ns3::Ptr< ns3::Packet >', [param('ns3::Address &', 'fromAddress')]) ## socket.h (module 'network'): int ns3::Socket::RecvFrom(uint8_t * buf, uint32_t size, uint32_t flags, ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'int', [param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')]) ## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p, uint32_t flags) [member function] cls.add_method('Send', 'int', [param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('Send', 'int', [param('ns3::Ptr< ns3::Packet >', 'p')]) ## socket.h (module 'network'): int ns3::Socket::Send(uint8_t const * buf, uint32_t size, uint32_t flags) [member function] cls.add_method('Send', 'int', [param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')]) ## socket.h (module 'network'): int ns3::Socket::SendTo(ns3::Ptr<ns3::Packet> p, uint32_t flags, ns3::Address const & toAddress) [member function] cls.add_method('SendTo', 'int', [param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags'), param('ns3::Address const &', 'toAddress')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::SendTo(uint8_t const * buf, uint32_t size, uint32_t flags, ns3::Address const & address) [member function] cls.add_method('SendTo', 'int', [param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address const &', 'address')]) ## socket.h (module 'network'): void ns3::Socket::SetAcceptCallback(ns3::Callback<bool, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionRequest, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> newConnectionCreated) [member function] cls.add_method('SetAcceptCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionRequest'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'newConnectionCreated')]) ## socket.h (module 'network'): bool ns3::Socket::SetAllowBroadcast(bool allowBroadcast) [member function] cls.add_method('SetAllowBroadcast', 'bool', [param('bool', 'allowBroadcast')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::SetCloseCallbacks(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> normalClose, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> errorClose) [member function] cls.add_method('SetCloseCallbacks', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'normalClose'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'errorClose')]) ## socket.h (module 'network'): void ns3::Socket::SetConnectCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionSucceeded, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionFailed) [member function] cls.add_method('SetConnectCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionSucceeded'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionFailed')]) ## socket.h (module 'network'): void ns3::Socket::SetDataSentCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> dataSent) [member function] cls.add_method('SetDataSentCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'dataSent')]) ## socket.h (module 'network'): void ns3::Socket::SetRecvCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> arg0) [member function] cls.add_method('SetRecvCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'arg0')]) ## socket.h (module 'network'): void ns3::Socket::SetRecvPktInfo(bool flag) [member function] cls.add_method('SetRecvPktInfo', 'void', [param('bool', 'flag')]) ## socket.h (module 'network'): void ns3::Socket::SetSendCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> sendCb) [member function] cls.add_method('SetSendCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'sendCb')]) ## socket.h (module 'network'): int ns3::Socket::ShutdownRecv() [member function] cls.add_method('ShutdownRecv', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::ShutdownSend() [member function] cls.add_method('ShutdownSend', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::NotifyConnectionFailed() [member function] cls.add_method('NotifyConnectionFailed', 'void', [], visibility='protected') ## socket.h (module 'network'): bool ns3::Socket::NotifyConnectionRequest(ns3::Address const & from) [member function] cls.add_method('NotifyConnectionRequest', 'bool', [param('ns3::Address const &', 'from')], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyConnectionSucceeded() [member function] cls.add_method('NotifyConnectionSucceeded', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyDataRecv() [member function] cls.add_method('NotifyDataRecv', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyDataSent(uint32_t size) [member function] cls.add_method('NotifyDataSent', 'void', [param('uint32_t', 'size')], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyErrorClose() [member function] cls.add_method('NotifyErrorClose', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyNewConnectionCreated(ns3::Ptr<ns3::Socket> socket, ns3::Address const & from) [member function] cls.add_method('NotifyNewConnectionCreated', 'void', [param('ns3::Ptr< ns3::Socket >', 'socket'), param('ns3::Address const &', 'from')], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyNormalClose() [member function] cls.add_method('NotifyNormalClose', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifySend(uint32_t spaceAvailable) [member function] cls.add_method('NotifySend', 'void', [param('uint32_t', 'spaceAvailable')], visibility='protected') return def register_Ns3SocketAddressTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketAddressTag::SocketAddressTag(ns3::SocketAddressTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketAddressTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketAddressTag::SocketAddressTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketAddressTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): ns3::Address ns3::SocketAddressTag::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_const=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketAddressTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketAddressTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketAddressTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketAddressTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketAddressTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketAddressTag::SetAddress(ns3::Address addr) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'addr')]) return def register_Ns3SocketIpTtlTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag(ns3::SocketIpTtlTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketIpTtlTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketIpTtlTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketIpTtlTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::SocketIpTtlTag::GetTtl() const [member function] cls.add_method('GetTtl', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpTtlTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::SetTtl(uint8_t ttl) [member function] cls.add_method('SetTtl', 'void', [param('uint8_t', 'ttl')]) return def register_Ns3SocketSetDontFragmentTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag(ns3::SocketSetDontFragmentTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketSetDontFragmentTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Disable() [member function] cls.add_method('Disable', 'void', []) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Enable() [member function] cls.add_method('Enable', 'void', []) ## socket.h (module 'network'): ns3::TypeId ns3::SocketSetDontFragmentTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketSetDontFragmentTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketSetDontFragmentTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): bool ns3::SocketSetDontFragmentTag::IsEnabled() const [member function] cls.add_method('IsEnabled', 'bool', [], is_const=True) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) return def register_Ns3Time_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## nstime.h (module 'core'): ns3::Time::Time() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor] cls.add_constructor([param('ns3::Time const &', 'o')]) ## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor] cls.add_constructor([param('std::string const &', 's')]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'value')]) ## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function] cls.add_method('Compare', 'int', [param('ns3::Time const &', 'o')], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function] cls.add_method('FromDouble', 'ns3::Time', [param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function] cls.add_method('FromInteger', 'ns3::Time', [param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function] cls.add_method('GetFemtoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function] cls.add_method('GetInteger', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function] cls.add_method('GetMicroSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function] cls.add_method('GetMilliSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function] cls.add_method('GetNanoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function] cls.add_method('GetPicoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function] cls.add_method('GetResolution', 'ns3::Time::Unit', [], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function] cls.add_method('GetSeconds', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function] cls.add_method('GetTimeStep', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function] cls.add_method('IsNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function] cls.add_method('IsPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function] cls.add_method('IsStrictlyNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function] cls.add_method('IsStrictlyPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function] cls.add_method('IsZero', 'bool', [], is_const=True) ## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function] cls.add_method('SetResolution', 'void', [param('ns3::Time::Unit', 'resolution')], is_static=True) ## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function] cls.add_method('To', 'ns3::int64x64_t', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) ## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function] cls.add_method('ToDouble', 'double', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function] cls.add_method('ToInteger', 'int64_t', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) return def register_Ns3TraceSourceAccessor_methods(root_module, cls): ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')]) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor] cls.add_constructor([]) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Connect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('ConnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Disconnect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('DisconnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Trailer_methods(root_module, cls): cls.add_output_stream_operator() ## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor] cls.add_constructor([]) ## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor] cls.add_constructor([param('ns3::Trailer const &', 'arg0')]) ## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'end')], is_pure_virtual=True, is_virtual=True) ## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3WifiMacHeader_methods(root_module, cls): ## wifi-mac-header.h (module 'wifi'): ns3::WifiMacHeader::WifiMacHeader(ns3::WifiMacHeader const & arg0) [copy constructor] cls.add_constructor([param('ns3::WifiMacHeader const &', 'arg0')]) ## wifi-mac-header.h (module 'wifi'): ns3::WifiMacHeader::WifiMacHeader() [constructor] cls.add_constructor([]) ## wifi-mac-header.h (module 'wifi'): uint32_t ns3::WifiMacHeader::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## wifi-mac-header.h (module 'wifi'): ns3::Mac48Address ns3::WifiMacHeader::GetAddr1() const [member function] cls.add_method('GetAddr1', 'ns3::Mac48Address', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): ns3::Mac48Address ns3::WifiMacHeader::GetAddr2() const [member function] cls.add_method('GetAddr2', 'ns3::Mac48Address', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): ns3::Mac48Address ns3::WifiMacHeader::GetAddr3() const [member function] cls.add_method('GetAddr3', 'ns3::Mac48Address', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): ns3::Mac48Address ns3::WifiMacHeader::GetAddr4() const [member function] cls.add_method('GetAddr4', 'ns3::Mac48Address', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): ns3::Time ns3::WifiMacHeader::GetDuration() const [member function] cls.add_method('GetDuration', 'ns3::Time', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): uint16_t ns3::WifiMacHeader::GetFragmentNumber() const [member function] cls.add_method('GetFragmentNumber', 'uint16_t', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): ns3::TypeId ns3::WifiMacHeader::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## wifi-mac-header.h (module 'wifi'): ns3::WifiMacHeader::QosAckPolicy ns3::WifiMacHeader::GetQosAckPolicy() const [member function] cls.add_method('GetQosAckPolicy', 'ns3::WifiMacHeader::QosAckPolicy', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): uint8_t ns3::WifiMacHeader::GetQosTid() const [member function] cls.add_method('GetQosTid', 'uint8_t', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): uint8_t ns3::WifiMacHeader::GetQosTxopLimit() const [member function] cls.add_method('GetQosTxopLimit', 'uint8_t', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): uint16_t ns3::WifiMacHeader::GetRawDuration() const [member function] cls.add_method('GetRawDuration', 'uint16_t', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): uint16_t ns3::WifiMacHeader::GetSequenceControl() const [member function] cls.add_method('GetSequenceControl', 'uint16_t', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): uint16_t ns3::WifiMacHeader::GetSequenceNumber() const [member function] cls.add_method('GetSequenceNumber', 'uint16_t', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): uint32_t ns3::WifiMacHeader::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## wifi-mac-header.h (module 'wifi'): uint32_t ns3::WifiMacHeader::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): ns3::WifiMacType ns3::WifiMacHeader::GetType() const [member function] cls.add_method('GetType', 'ns3::WifiMacType', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): static ns3::TypeId ns3::WifiMacHeader::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## wifi-mac-header.h (module 'wifi'): char const * ns3::WifiMacHeader::GetTypeString() const [member function] cls.add_method('GetTypeString', 'char const *', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsAck() const [member function] cls.add_method('IsAck', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsAction() const [member function] cls.add_method('IsAction', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsAssocReq() const [member function] cls.add_method('IsAssocReq', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsAssocResp() const [member function] cls.add_method('IsAssocResp', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsAuthentication() const [member function] cls.add_method('IsAuthentication', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsBeacon() const [member function] cls.add_method('IsBeacon', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsBlockAck() const [member function] cls.add_method('IsBlockAck', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsBlockAckReq() const [member function] cls.add_method('IsBlockAckReq', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsCfpoll() const [member function] cls.add_method('IsCfpoll', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsCtl() const [member function] cls.add_method('IsCtl', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsCts() const [member function] cls.add_method('IsCts', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsData() const [member function] cls.add_method('IsData', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsDeauthentication() const [member function] cls.add_method('IsDeauthentication', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsDisassociation() const [member function] cls.add_method('IsDisassociation', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsFromDs() const [member function] cls.add_method('IsFromDs', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsMgt() const [member function] cls.add_method('IsMgt', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsMoreFragments() const [member function] cls.add_method('IsMoreFragments', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsMultihopAction() const [member function] cls.add_method('IsMultihopAction', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsProbeReq() const [member function] cls.add_method('IsProbeReq', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsProbeResp() const [member function] cls.add_method('IsProbeResp', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsQosAck() const [member function] cls.add_method('IsQosAck', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsQosAmsdu() const [member function] cls.add_method('IsQosAmsdu', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsQosBlockAck() const [member function] cls.add_method('IsQosBlockAck', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsQosData() const [member function] cls.add_method('IsQosData', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsQosEosp() const [member function] cls.add_method('IsQosEosp', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsQosNoAck() const [member function] cls.add_method('IsQosNoAck', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsReassocReq() const [member function] cls.add_method('IsReassocReq', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsReassocResp() const [member function] cls.add_method('IsReassocResp', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsRetry() const [member function] cls.add_method('IsRetry', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsRts() const [member function] cls.add_method('IsRts', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): bool ns3::WifiMacHeader::IsToDs() const [member function] cls.add_method('IsToDs', 'bool', [], is_const=True) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetAction() [member function] cls.add_method('SetAction', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetAddr1(ns3::Mac48Address address) [member function] cls.add_method('SetAddr1', 'void', [param('ns3::Mac48Address', 'address')]) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetAddr2(ns3::Mac48Address address) [member function] cls.add_method('SetAddr2', 'void', [param('ns3::Mac48Address', 'address')]) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetAddr3(ns3::Mac48Address address) [member function] cls.add_method('SetAddr3', 'void', [param('ns3::Mac48Address', 'address')]) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetAddr4(ns3::Mac48Address address) [member function] cls.add_method('SetAddr4', 'void', [param('ns3::Mac48Address', 'address')]) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetAssocReq() [member function] cls.add_method('SetAssocReq', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetAssocResp() [member function] cls.add_method('SetAssocResp', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetBeacon() [member function] cls.add_method('SetBeacon', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetBlockAck() [member function] cls.add_method('SetBlockAck', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetBlockAckReq() [member function] cls.add_method('SetBlockAckReq', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetDsFrom() [member function] cls.add_method('SetDsFrom', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetDsNotFrom() [member function] cls.add_method('SetDsNotFrom', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetDsNotTo() [member function] cls.add_method('SetDsNotTo', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetDsTo() [member function] cls.add_method('SetDsTo', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetDuration(ns3::Time duration) [member function] cls.add_method('SetDuration', 'void', [param('ns3::Time', 'duration')]) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetFragmentNumber(uint8_t frag) [member function] cls.add_method('SetFragmentNumber', 'void', [param('uint8_t', 'frag')]) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetId(uint16_t id) [member function] cls.add_method('SetId', 'void', [param('uint16_t', 'id')]) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetMoreFragments() [member function] cls.add_method('SetMoreFragments', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetMultihopAction() [member function] cls.add_method('SetMultihopAction', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetNoMoreFragments() [member function] cls.add_method('SetNoMoreFragments', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetNoRetry() [member function] cls.add_method('SetNoRetry', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetProbeReq() [member function] cls.add_method('SetProbeReq', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetProbeResp() [member function] cls.add_method('SetProbeResp', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetQosAckPolicy(ns3::WifiMacHeader::QosAckPolicy arg0) [member function] cls.add_method('SetQosAckPolicy', 'void', [param('ns3::WifiMacHeader::QosAckPolicy', 'arg0')]) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetQosAmsdu() [member function] cls.add_method('SetQosAmsdu', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetQosBlockAck() [member function] cls.add_method('SetQosBlockAck', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetQosEosp() [member function] cls.add_method('SetQosEosp', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetQosNoAck() [member function] cls.add_method('SetQosNoAck', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetQosNoAmsdu() [member function] cls.add_method('SetQosNoAmsdu', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetQosNoEosp() [member function] cls.add_method('SetQosNoEosp', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetQosNormalAck() [member function] cls.add_method('SetQosNormalAck', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetQosTid(uint8_t tid) [member function] cls.add_method('SetQosTid', 'void', [param('uint8_t', 'tid')]) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetQosTxopLimit(uint8_t txop) [member function] cls.add_method('SetQosTxopLimit', 'void', [param('uint8_t', 'txop')]) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetRawDuration(uint16_t duration) [member function] cls.add_method('SetRawDuration', 'void', [param('uint16_t', 'duration')]) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetRetry() [member function] cls.add_method('SetRetry', 'void', []) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetSequenceNumber(uint16_t seq) [member function] cls.add_method('SetSequenceNumber', 'void', [param('uint16_t', 'seq')]) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetType(ns3::WifiMacType type) [member function] cls.add_method('SetType', 'void', [param('ns3::WifiMacType', 'type')]) ## wifi-mac-header.h (module 'wifi'): void ns3::WifiMacHeader::SetTypeData() [member function] cls.add_method('SetTypeData', 'void', []) return def register_Ns3ArpCache_methods(root_module, cls): ## arp-cache.h (module 'internet'): ns3::ArpCache::ArpCache() [constructor] cls.add_constructor([]) ## arp-cache.h (module 'internet'): ns3::ArpCache::Entry * ns3::ArpCache::Add(ns3::Ipv4Address to) [member function] cls.add_method('Add', 'ns3::ArpCache::Entry *', [param('ns3::Ipv4Address', 'to')]) ## arp-cache.h (module 'internet'): void ns3::ArpCache::Flush() [member function] cls.add_method('Flush', 'void', []) ## arp-cache.h (module 'internet'): ns3::Time ns3::ArpCache::GetAliveTimeout() const [member function] cls.add_method('GetAliveTimeout', 'ns3::Time', [], is_const=True) ## arp-cache.h (module 'internet'): ns3::Time ns3::ArpCache::GetDeadTimeout() const [member function] cls.add_method('GetDeadTimeout', 'ns3::Time', [], is_const=True) ## arp-cache.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::ArpCache::GetDevice() const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [], is_const=True) ## arp-cache.h (module 'internet'): ns3::Ptr<ns3::Ipv4Interface> ns3::ArpCache::GetInterface() const [member function] cls.add_method('GetInterface', 'ns3::Ptr< ns3::Ipv4Interface >', [], is_const=True) ## arp-cache.h (module 'internet'): static ns3::TypeId ns3::ArpCache::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## arp-cache.h (module 'internet'): ns3::Time ns3::ArpCache::GetWaitReplyTimeout() const [member function] cls.add_method('GetWaitReplyTimeout', 'ns3::Time', [], is_const=True) ## arp-cache.h (module 'internet'): ns3::ArpCache::Entry * ns3::ArpCache::Lookup(ns3::Ipv4Address destination) [member function] cls.add_method('Lookup', 'ns3::ArpCache::Entry *', [param('ns3::Ipv4Address', 'destination')]) ## arp-cache.h (module 'internet'): void ns3::ArpCache::SetAliveTimeout(ns3::Time aliveTimeout) [member function] cls.add_method('SetAliveTimeout', 'void', [param('ns3::Time', 'aliveTimeout')]) ## arp-cache.h (module 'internet'): void ns3::ArpCache::SetArpRequestCallback(ns3::Callback<void, ns3::Ptr<ns3::ArpCache const>, ns3::Ipv4Address, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> arpRequestCallback) [member function] cls.add_method('SetArpRequestCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::ArpCache const >, ns3::Ipv4Address, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'arpRequestCallback')]) ## arp-cache.h (module 'internet'): void ns3::ArpCache::SetDeadTimeout(ns3::Time deadTimeout) [member function] cls.add_method('SetDeadTimeout', 'void', [param('ns3::Time', 'deadTimeout')]) ## arp-cache.h (module 'internet'): void ns3::ArpCache::SetDevice(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<ns3::Ipv4Interface> interface) [member function] cls.add_method('SetDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::Ipv4Interface >', 'interface')]) ## arp-cache.h (module 'internet'): void ns3::ArpCache::SetWaitReplyTimeout(ns3::Time waitReplyTimeout) [member function] cls.add_method('SetWaitReplyTimeout', 'void', [param('ns3::Time', 'waitReplyTimeout')]) ## arp-cache.h (module 'internet'): void ns3::ArpCache::StartWaitReplyTimer() [member function] cls.add_method('StartWaitReplyTimer', 'void', []) ## arp-cache.h (module 'internet'): void ns3::ArpCache::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True) return def register_Ns3ArpCacheEntry_methods(root_module, cls): ## arp-cache.h (module 'internet'): ns3::ArpCache::Entry::Entry(ns3::ArpCache::Entry const & arg0) [copy constructor] cls.add_constructor([param('ns3::ArpCache::Entry const &', 'arg0')]) ## arp-cache.h (module 'internet'): ns3::ArpCache::Entry::Entry(ns3::ArpCache * arp) [constructor] cls.add_constructor([param('ns3::ArpCache *', 'arp')]) ## arp-cache.h (module 'internet'): void ns3::ArpCache::Entry::ClearRetries() [member function] cls.add_method('ClearRetries', 'void', []) ## arp-cache.h (module 'internet'): ns3::Ptr<ns3::Packet> ns3::ArpCache::Entry::DequeuePending() [member function] cls.add_method('DequeuePending', 'ns3::Ptr< ns3::Packet >', []) ## arp-cache.h (module 'internet'): ns3::Ipv4Address ns3::ArpCache::Entry::GetIpv4Address() const [member function] cls.add_method('GetIpv4Address', 'ns3::Ipv4Address', [], is_const=True) ## arp-cache.h (module 'internet'): ns3::Address ns3::ArpCache::Entry::GetMacAddress() const [member function] cls.add_method('GetMacAddress', 'ns3::Address', [], is_const=True) ## arp-cache.h (module 'internet'): uint32_t ns3::ArpCache::Entry::GetRetries() const [member function] cls.add_method('GetRetries', 'uint32_t', [], is_const=True) ## arp-cache.h (module 'internet'): void ns3::ArpCache::Entry::IncrementRetries() [member function] cls.add_method('IncrementRetries', 'void', []) ## arp-cache.h (module 'internet'): bool ns3::ArpCache::Entry::IsAlive() [member function] cls.add_method('IsAlive', 'bool', []) ## arp-cache.h (module 'internet'): bool ns3::ArpCache::Entry::IsDead() [member function] cls.add_method('IsDead', 'bool', []) ## arp-cache.h (module 'internet'): bool ns3::ArpCache::Entry::IsExpired() const [member function] cls.add_method('IsExpired', 'bool', [], is_const=True) ## arp-cache.h (module 'internet'): bool ns3::ArpCache::Entry::IsWaitReply() [member function] cls.add_method('IsWaitReply', 'bool', []) ## arp-cache.h (module 'internet'): void ns3::ArpCache::Entry::MarkAlive(ns3::Address macAddress) [member function] cls.add_method('MarkAlive', 'void', [param('ns3::Address', 'macAddress')]) ## arp-cache.h (module 'internet'): void ns3::ArpCache::Entry::MarkDead() [member function] cls.add_method('MarkDead', 'void', []) ## arp-cache.h (module 'internet'): void ns3::ArpCache::Entry::MarkWaitReply(ns3::Ptr<ns3::Packet> waiting) [member function] cls.add_method('MarkWaitReply', 'void', [param('ns3::Ptr< ns3::Packet >', 'waiting')]) ## arp-cache.h (module 'internet'): void ns3::ArpCache::Entry::SetIpv4Address(ns3::Ipv4Address destination) [member function] cls.add_method('SetIpv4Address', 'void', [param('ns3::Ipv4Address', 'destination')]) ## arp-cache.h (module 'internet'): bool ns3::ArpCache::Entry::UpdateWaitReply(ns3::Ptr<ns3::Packet> waiting) [member function] cls.add_method('UpdateWaitReply', 'bool', [param('ns3::Ptr< ns3::Packet >', 'waiting')]) return def register_Ns3AttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function] cls.add_method('CreateValidValue', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::AttributeValue const &', 'value')], is_const=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackChecker_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')]) return def register_Ns3CallbackImplBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')]) ## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackValue_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'base')]) ## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function] cls.add_method('Set', 'void', [param('ns3::CallbackBase', 'base')]) return def register_Ns3EmptyAttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, visibility='private', is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], visibility='private', is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3EnumChecker_methods(root_module, cls): ## enum.h (module 'core'): ns3::EnumChecker::EnumChecker(ns3::EnumChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::EnumChecker const &', 'arg0')]) ## enum.h (module 'core'): ns3::EnumChecker::EnumChecker() [constructor] cls.add_constructor([]) ## enum.h (module 'core'): void ns3::EnumChecker::Add(int v, std::string name) [member function] cls.add_method('Add', 'void', [param('int', 'v'), param('std::string', 'name')]) ## enum.h (module 'core'): void ns3::EnumChecker::AddDefault(int v, std::string name) [member function] cls.add_method('AddDefault', 'void', [param('int', 'v'), param('std::string', 'name')]) ## enum.h (module 'core'): bool ns3::EnumChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_const=True, is_virtual=True) ## enum.h (module 'core'): bool ns3::EnumChecker::Copy(ns3::AttributeValue const & src, ns3::AttributeValue & dst) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'src'), param('ns3::AttributeValue &', 'dst')], is_const=True, is_virtual=True) ## enum.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EnumChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## enum.h (module 'core'): std::string ns3::EnumChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_const=True, is_virtual=True) ## enum.h (module 'core'): std::string ns3::EnumChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_const=True, is_virtual=True) ## enum.h (module 'core'): bool ns3::EnumChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_const=True, is_virtual=True) return def register_Ns3EnumValue_methods(root_module, cls): ## enum.h (module 'core'): ns3::EnumValue::EnumValue(ns3::EnumValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::EnumValue const &', 'arg0')]) ## enum.h (module 'core'): ns3::EnumValue::EnumValue() [constructor] cls.add_constructor([]) ## enum.h (module 'core'): ns3::EnumValue::EnumValue(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## enum.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EnumValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## enum.h (module 'core'): bool ns3::EnumValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## enum.h (module 'core'): int ns3::EnumValue::Get() const [member function] cls.add_method('Get', 'int', [], is_const=True) ## enum.h (module 'core'): std::string ns3::EnumValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## enum.h (module 'core'): void ns3::EnumValue::Set(int v) [member function] cls.add_method('Set', 'void', [param('int', 'v')]) return def register_Ns3EventImpl_methods(root_module, cls): ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventImpl const &', 'arg0')]) ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor] cls.add_constructor([]) ## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function] cls.add_method('Invoke', 'void', []) ## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function] cls.add_method('IsCancelled', 'bool', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function] cls.add_method('Notify', 'void', [], is_pure_virtual=True, visibility='protected', is_virtual=True) return def register_Ns3Ipv4_methods(root_module, cls): ## ipv4.h (module 'internet'): ns3::Ipv4::Ipv4(ns3::Ipv4 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4 const &', 'arg0')]) ## ipv4.h (module 'internet'): ns3::Ipv4::Ipv4() [constructor] cls.add_constructor([]) ## ipv4.h (module 'internet'): bool ns3::Ipv4::AddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('AddAddress', 'bool', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::AddInterface(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddInterface', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4::GetAddress(uint32_t interface, uint32_t addressIndex) const [member function] cls.add_method('GetAddress', 'ns3::Ipv4InterfaceAddress', [param('uint32_t', 'interface'), param('uint32_t', 'addressIndex')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForAddress(ns3::Ipv4Address address) const [member function] cls.add_method('GetInterfaceForAddress', 'int32_t', [param('ns3::Ipv4Address', 'address')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForDevice(ns3::Ptr<const ns3::NetDevice> device) const [member function] cls.add_method('GetInterfaceForDevice', 'int32_t', [param('ns3::Ptr< ns3::NetDevice const >', 'device')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForPrefix(ns3::Ipv4Address address, ns3::Ipv4Mask mask) const [member function] cls.add_method('GetInterfaceForPrefix', 'int32_t', [param('ns3::Ipv4Address', 'address'), param('ns3::Ipv4Mask', 'mask')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): uint16_t ns3::Ipv4::GetMetric(uint32_t interface) const [member function] cls.add_method('GetMetric', 'uint16_t', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): uint16_t ns3::Ipv4::GetMtu(uint32_t interface) const [member function] cls.add_method('GetMtu', 'uint16_t', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::GetNAddresses(uint32_t interface) const [member function] cls.add_method('GetNAddresses', 'uint32_t', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::GetNInterfaces() const [member function] cls.add_method('GetNInterfaces', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4::GetNetDevice(uint32_t interface) [member function] cls.add_method('GetNetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4::GetRoutingProtocol() const [member function] cls.add_method('GetRoutingProtocol', 'ns3::Ptr< ns3::Ipv4RoutingProtocol >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): static ns3::TypeId ns3::Ipv4::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::Insert(ns3::Ptr<ns3::Ipv4L4Protocol> protocol) [member function] cls.add_method('Insert', 'void', [param('ns3::Ptr< ns3::Ipv4L4Protocol >', 'protocol')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::IsDestinationAddress(ns3::Ipv4Address address, uint32_t iif) const [member function] cls.add_method('IsDestinationAddress', 'bool', [param('ns3::Ipv4Address', 'address'), param('uint32_t', 'iif')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::IsForwarding(uint32_t interface) const [member function] cls.add_method('IsForwarding', 'bool', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::IsUp(uint32_t interface) const [member function] cls.add_method('IsUp', 'bool', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::RemoveAddress(uint32_t interface, uint32_t addressIndex) [member function] cls.add_method('RemoveAddress', 'bool', [param('uint32_t', 'interface'), param('uint32_t', 'addressIndex')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4::SelectSourceAddress(ns3::Ptr<const ns3::NetDevice> device, ns3::Ipv4Address dst, ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function] cls.add_method('SelectSourceAddress', 'ns3::Ipv4Address', [param('ns3::Ptr< ns3::NetDevice const >', 'device'), param('ns3::Ipv4Address', 'dst'), param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::Send(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Address source, ns3::Ipv4Address destination, uint8_t protocol, ns3::Ptr<ns3::Ipv4Route> route) [member function] cls.add_method('Send', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Address', 'source'), param('ns3::Ipv4Address', 'destination'), param('uint8_t', 'protocol'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetDown(uint32_t interface) [member function] cls.add_method('SetDown', 'void', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetForwarding(uint32_t interface, bool val) [member function] cls.add_method('SetForwarding', 'void', [param('uint32_t', 'interface'), param('bool', 'val')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetMetric(uint32_t interface, uint16_t metric) [member function] cls.add_method('SetMetric', 'void', [param('uint32_t', 'interface'), param('uint16_t', 'metric')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetRoutingProtocol(ns3::Ptr<ns3::Ipv4RoutingProtocol> routingProtocol) [member function] cls.add_method('SetRoutingProtocol', 'void', [param('ns3::Ptr< ns3::Ipv4RoutingProtocol >', 'routingProtocol')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetUp(uint32_t interface) [member function] cls.add_method('SetUp', 'void', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ipv4::IF_ANY [variable] cls.add_static_attribute('IF_ANY', 'uint32_t const', is_const=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::GetIpForward() const [member function] cls.add_method('GetIpForward', 'bool', [], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::GetWeakEsModel() const [member function] cls.add_method('GetWeakEsModel', 'bool', [], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetIpForward(bool forward) [member function] cls.add_method('SetIpForward', 'void', [param('bool', 'forward')], is_pure_virtual=True, visibility='private', is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetWeakEsModel(bool model) [member function] cls.add_method('SetWeakEsModel', 'void', [param('bool', 'model')], is_pure_virtual=True, visibility='private', is_virtual=True) return def register_Ns3Ipv4AddressChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')]) return def register_Ns3Ipv4AddressValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Address const &', 'value')]) return def register_Ns3Ipv4Interface_methods(root_module, cls): ## ipv4-interface.h (module 'internet'): ns3::Ipv4Interface::Ipv4Interface(ns3::Ipv4Interface const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Interface const &', 'arg0')]) ## ipv4-interface.h (module 'internet'): ns3::Ipv4Interface::Ipv4Interface() [constructor] cls.add_constructor([]) ## ipv4-interface.h (module 'internet'): bool ns3::Ipv4Interface::AddAddress(ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('AddAddress', 'bool', [param('ns3::Ipv4InterfaceAddress', 'address')]) ## ipv4-interface.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4Interface::GetAddress(uint32_t index) const [member function] cls.add_method('GetAddress', 'ns3::Ipv4InterfaceAddress', [param('uint32_t', 'index')], is_const=True) ## ipv4-interface.h (module 'internet'): ns3::Ptr<ns3::ArpCache> ns3::Ipv4Interface::GetArpCache() const [member function] cls.add_method('GetArpCache', 'ns3::Ptr< ns3::ArpCache >', [], is_const=True) ## ipv4-interface.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4Interface::GetDevice() const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [], is_const=True) ## ipv4-interface.h (module 'internet'): uint16_t ns3::Ipv4Interface::GetMetric() const [member function] cls.add_method('GetMetric', 'uint16_t', [], is_const=True) ## ipv4-interface.h (module 'internet'): uint32_t ns3::Ipv4Interface::GetNAddresses() const [member function] cls.add_method('GetNAddresses', 'uint32_t', [], is_const=True) ## ipv4-interface.h (module 'internet'): static ns3::TypeId ns3::Ipv4Interface::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4-interface.h (module 'internet'): bool ns3::Ipv4Interface::IsDown() const [member function] cls.add_method('IsDown', 'bool', [], is_const=True) ## ipv4-interface.h (module 'internet'): bool ns3::Ipv4Interface::IsForwarding() const [member function] cls.add_method('IsForwarding', 'bool', [], is_const=True) ## ipv4-interface.h (module 'internet'): bool ns3::Ipv4Interface::IsUp() const [member function] cls.add_method('IsUp', 'bool', [], is_const=True) ## ipv4-interface.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4Interface::RemoveAddress(uint32_t index) [member function] cls.add_method('RemoveAddress', 'ns3::Ipv4InterfaceAddress', [param('uint32_t', 'index')]) ## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::Send(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Address dest) [member function] cls.add_method('Send', 'void', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Address', 'dest')]) ## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::SetArpCache(ns3::Ptr<ns3::ArpCache> arg0) [member function] cls.add_method('SetArpCache', 'void', [param('ns3::Ptr< ns3::ArpCache >', 'arg0')]) ## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::SetDevice(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('SetDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::SetDown() [member function] cls.add_method('SetDown', 'void', []) ## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::SetForwarding(bool val) [member function] cls.add_method('SetForwarding', 'void', [param('bool', 'val')]) ## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::SetMetric(uint16_t metric) [member function] cls.add_method('SetMetric', 'void', [param('uint16_t', 'metric')]) ## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::SetUp() [member function] cls.add_method('SetUp', 'void', []) ## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3Ipv4L3Protocol_methods(root_module, cls): ## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol::Ipv4L3Protocol() [constructor] cls.add_constructor([]) ## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::AddAddress(uint32_t i, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('AddAddress', 'bool', [param('uint32_t', 'i'), param('ns3::Ipv4InterfaceAddress', 'address')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): uint32_t ns3::Ipv4L3Protocol::AddInterface(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddInterface', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Socket> ns3::Ipv4L3Protocol::CreateRawSocket() [member function] cls.add_method('CreateRawSocket', 'ns3::Ptr< ns3::Socket >', []) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::DeleteRawSocket(ns3::Ptr<ns3::Socket> socket) [member function] cls.add_method('DeleteRawSocket', 'void', [param('ns3::Ptr< ns3::Socket >', 'socket')]) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4L3Protocol::GetAddress(uint32_t interfaceIndex, uint32_t addressIndex) const [member function] cls.add_method('GetAddress', 'ns3::Ipv4InterfaceAddress', [param('uint32_t', 'interfaceIndex'), param('uint32_t', 'addressIndex')], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4Interface> ns3::Ipv4L3Protocol::GetInterface(uint32_t i) const [member function] cls.add_method('GetInterface', 'ns3::Ptr< ns3::Ipv4Interface >', [param('uint32_t', 'i')], is_const=True) ## ipv4-l3-protocol.h (module 'internet'): int32_t ns3::Ipv4L3Protocol::GetInterfaceForAddress(ns3::Ipv4Address addr) const [member function] cls.add_method('GetInterfaceForAddress', 'int32_t', [param('ns3::Ipv4Address', 'addr')], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): int32_t ns3::Ipv4L3Protocol::GetInterfaceForDevice(ns3::Ptr<const ns3::NetDevice> device) const [member function] cls.add_method('GetInterfaceForDevice', 'int32_t', [param('ns3::Ptr< ns3::NetDevice const >', 'device')], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): int32_t ns3::Ipv4L3Protocol::GetInterfaceForPrefix(ns3::Ipv4Address addr, ns3::Ipv4Mask mask) const [member function] cls.add_method('GetInterfaceForPrefix', 'int32_t', [param('ns3::Ipv4Address', 'addr'), param('ns3::Ipv4Mask', 'mask')], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): uint16_t ns3::Ipv4L3Protocol::GetMetric(uint32_t i) const [member function] cls.add_method('GetMetric', 'uint16_t', [param('uint32_t', 'i')], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): uint16_t ns3::Ipv4L3Protocol::GetMtu(uint32_t i) const [member function] cls.add_method('GetMtu', 'uint16_t', [param('uint32_t', 'i')], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): uint32_t ns3::Ipv4L3Protocol::GetNAddresses(uint32_t interface) const [member function] cls.add_method('GetNAddresses', 'uint32_t', [param('uint32_t', 'interface')], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): uint32_t ns3::Ipv4L3Protocol::GetNInterfaces() const [member function] cls.add_method('GetNInterfaces', 'uint32_t', [], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4L3Protocol::GetNetDevice(uint32_t i) [member function] cls.add_method('GetNetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'i')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4L4Protocol> ns3::Ipv4L3Protocol::GetProtocol(int protocolNumber) const [member function] cls.add_method('GetProtocol', 'ns3::Ptr< ns3::Ipv4L4Protocol >', [param('int', 'protocolNumber')], is_const=True) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4L3Protocol::GetRoutingProtocol() const [member function] cls.add_method('GetRoutingProtocol', 'ns3::Ptr< ns3::Ipv4RoutingProtocol >', [], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): static ns3::TypeId ns3::Ipv4L3Protocol::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Insert(ns3::Ptr<ns3::Ipv4L4Protocol> protocol) [member function] cls.add_method('Insert', 'void', [param('ns3::Ptr< ns3::Ipv4L4Protocol >', 'protocol')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::IsDestinationAddress(ns3::Ipv4Address address, uint32_t iif) const [member function] cls.add_method('IsDestinationAddress', 'bool', [param('ns3::Ipv4Address', 'address'), param('uint32_t', 'iif')], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::IsForwarding(uint32_t i) const [member function] cls.add_method('IsForwarding', 'bool', [param('uint32_t', 'i')], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::IsUp(uint32_t i) const [member function] cls.add_method('IsUp', 'bool', [param('uint32_t', 'i')], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Receive(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<ns3::Packet const> p, uint16_t protocol, ns3::Address const & from, ns3::Address const & to, ns3::NetDevice::PacketType packetType) [member function] cls.add_method('Receive', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::Packet const >', 'p'), param('uint16_t', 'protocol'), param('ns3::Address const &', 'from'), param('ns3::Address const &', 'to'), param('ns3::NetDevice::PacketType', 'packetType')]) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Remove(ns3::Ptr<ns3::Ipv4L4Protocol> protocol) [member function] cls.add_method('Remove', 'void', [param('ns3::Ptr< ns3::Ipv4L4Protocol >', 'protocol')]) ## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::RemoveAddress(uint32_t interfaceIndex, uint32_t addressIndex) [member function] cls.add_method('RemoveAddress', 'bool', [param('uint32_t', 'interfaceIndex'), param('uint32_t', 'addressIndex')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4L3Protocol::SelectSourceAddress(ns3::Ptr<const ns3::NetDevice> device, ns3::Ipv4Address dst, ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function] cls.add_method('SelectSourceAddress', 'ns3::Ipv4Address', [param('ns3::Ptr< ns3::NetDevice const >', 'device'), param('ns3::Ipv4Address', 'dst'), param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Send(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Address source, ns3::Ipv4Address destination, uint8_t protocol, ns3::Ptr<ns3::Ipv4Route> route) [member function] cls.add_method('Send', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Address', 'source'), param('ns3::Ipv4Address', 'destination'), param('uint8_t', 'protocol'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SendWithHeader(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Header ipHeader, ns3::Ptr<ns3::Ipv4Route> route) [member function] cls.add_method('SendWithHeader', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Header', 'ipHeader'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')]) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetDefaultTtl(uint8_t ttl) [member function] cls.add_method('SetDefaultTtl', 'void', [param('uint8_t', 'ttl')]) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetDown(uint32_t i) [member function] cls.add_method('SetDown', 'void', [param('uint32_t', 'i')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetForwarding(uint32_t i, bool val) [member function] cls.add_method('SetForwarding', 'void', [param('uint32_t', 'i'), param('bool', 'val')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetMetric(uint32_t i, uint16_t metric) [member function] cls.add_method('SetMetric', 'void', [param('uint32_t', 'i'), param('uint16_t', 'metric')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetRoutingProtocol(ns3::Ptr<ns3::Ipv4RoutingProtocol> routingProtocol) [member function] cls.add_method('SetRoutingProtocol', 'void', [param('ns3::Ptr< ns3::Ipv4RoutingProtocol >', 'routingProtocol')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetUp(uint32_t i) [member function] cls.add_method('SetUp', 'void', [param('uint32_t', 'i')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol::PROT_NUMBER [variable] cls.add_static_attribute('PROT_NUMBER', 'uint16_t const', is_const=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::NotifyNewAggregate() [member function] cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::GetIpForward() const [member function] cls.add_method('GetIpForward', 'bool', [], is_const=True, visibility='private', is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::GetWeakEsModel() const [member function] cls.add_method('GetWeakEsModel', 'bool', [], is_const=True, visibility='private', is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetIpForward(bool forward) [member function] cls.add_method('SetIpForward', 'void', [param('bool', 'forward')], visibility='private', is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetWeakEsModel(bool model) [member function] cls.add_method('SetWeakEsModel', 'void', [param('bool', 'model')], visibility='private', is_virtual=True) return def register_Ns3Ipv4L4Protocol_methods(root_module, cls): ## ipv4-l4-protocol.h (module 'internet'): ns3::Ipv4L4Protocol::Ipv4L4Protocol() [constructor] cls.add_constructor([]) ## ipv4-l4-protocol.h (module 'internet'): ns3::Ipv4L4Protocol::Ipv4L4Protocol(ns3::Ipv4L4Protocol const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4L4Protocol const &', 'arg0')]) ## ipv4-l4-protocol.h (module 'internet'): ns3::Callback<void,ns3::Ptr<ns3::Packet>,ns3::Ipv4Address,ns3::Ipv4Address,unsigned char,ns3::Ptr<ns3::Ipv4Route>,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::Ipv4L4Protocol::GetDownTarget() const [member function] cls.add_method('GetDownTarget', 'ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::Ipv4Address, ns3::Ipv4Address, unsigned char, ns3::Ptr< ns3::Ipv4Route >, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4-l4-protocol.h (module 'internet'): int ns3::Ipv4L4Protocol::GetProtocolNumber() const [member function] cls.add_method('GetProtocolNumber', 'int', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4-l4-protocol.h (module 'internet'): static ns3::TypeId ns3::Ipv4L4Protocol::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4-l4-protocol.h (module 'internet'): ns3::Ipv4L4Protocol::RxStatus ns3::Ipv4L4Protocol::Receive(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::Ipv4Interface> incomingInterface) [member function] cls.add_method('Receive', 'ns3::Ipv4L4Protocol::RxStatus', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::Ipv4Interface >', 'incomingInterface')], is_pure_virtual=True, is_virtual=True) ## ipv4-l4-protocol.h (module 'internet'): void ns3::Ipv4L4Protocol::ReceiveIcmp(ns3::Ipv4Address icmpSource, uint8_t icmpTtl, uint8_t icmpType, uint8_t icmpCode, uint32_t icmpInfo, ns3::Ipv4Address payloadSource, ns3::Ipv4Address payloadDestination, uint8_t const * payload) [member function] cls.add_method('ReceiveIcmp', 'void', [param('ns3::Ipv4Address', 'icmpSource'), param('uint8_t', 'icmpTtl'), param('uint8_t', 'icmpType'), param('uint8_t', 'icmpCode'), param('uint32_t', 'icmpInfo'), param('ns3::Ipv4Address', 'payloadSource'), param('ns3::Ipv4Address', 'payloadDestination'), param('uint8_t const *', 'payload')], is_virtual=True) ## ipv4-l4-protocol.h (module 'internet'): void ns3::Ipv4L4Protocol::SetDownTarget(ns3::Callback<void,ns3::Ptr<ns3::Packet>,ns3::Ipv4Address,ns3::Ipv4Address,unsigned char,ns3::Ptr<ns3::Ipv4Route>,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function] cls.add_method('SetDownTarget', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::Ipv4Address, ns3::Ipv4Address, unsigned char, ns3::Ptr< ns3::Ipv4Route >, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) return def register_Ns3Ipv4MaskChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')]) return def register_Ns3Ipv4MaskValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Mask const &', 'value')]) return def register_Ns3Ipv4MulticastRoute_methods(root_module, cls): ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::Ipv4MulticastRoute(ns3::Ipv4MulticastRoute const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MulticastRoute const &', 'arg0')]) ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::Ipv4MulticastRoute() [constructor] cls.add_constructor([]) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4MulticastRoute::GetGroup() const [member function] cls.add_method('GetGroup', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4MulticastRoute::GetOrigin() const [member function] cls.add_method('GetOrigin', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): uint32_t ns3::Ipv4MulticastRoute::GetOutputTtl(uint32_t oif) [member function] cls.add_method('GetOutputTtl', 'uint32_t', [param('uint32_t', 'oif')], deprecated=True) ## ipv4-route.h (module 'internet'): std::map<unsigned int, unsigned int, std::less<unsigned int>, std::allocator<std::pair<unsigned int const, unsigned int> > > ns3::Ipv4MulticastRoute::GetOutputTtlMap() const [member function] cls.add_method('GetOutputTtlMap', 'std::map< unsigned int, unsigned int >', [], is_const=True) ## ipv4-route.h (module 'internet'): uint32_t ns3::Ipv4MulticastRoute::GetParent() const [member function] cls.add_method('GetParent', 'uint32_t', [], is_const=True) ## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetGroup(ns3::Ipv4Address const group) [member function] cls.add_method('SetGroup', 'void', [param('ns3::Ipv4Address const', 'group')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetOrigin(ns3::Ipv4Address const origin) [member function] cls.add_method('SetOrigin', 'void', [param('ns3::Ipv4Address const', 'origin')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetOutputTtl(uint32_t oif, uint32_t ttl) [member function] cls.add_method('SetOutputTtl', 'void', [param('uint32_t', 'oif'), param('uint32_t', 'ttl')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetParent(uint32_t iif) [member function] cls.add_method('SetParent', 'void', [param('uint32_t', 'iif')]) ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::MAX_INTERFACES [variable] cls.add_static_attribute('MAX_INTERFACES', 'uint32_t const', is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::MAX_TTL [variable] cls.add_static_attribute('MAX_TTL', 'uint32_t const', is_const=True) return def register_Ns3Ipv4Route_methods(root_module, cls): cls.add_output_stream_operator() ## ipv4-route.h (module 'internet'): ns3::Ipv4Route::Ipv4Route(ns3::Ipv4Route const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Route const &', 'arg0')]) ## ipv4-route.h (module 'internet'): ns3::Ipv4Route::Ipv4Route() [constructor] cls.add_constructor([]) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetDestination() const [member function] cls.add_method('GetDestination', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetGateway() const [member function] cls.add_method('GetGateway', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4Route::GetOutputDevice() const [member function] cls.add_method('GetOutputDevice', 'ns3::Ptr< ns3::NetDevice >', [], is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetSource() const [member function] cls.add_method('GetSource', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetDestination(ns3::Ipv4Address dest) [member function] cls.add_method('SetDestination', 'void', [param('ns3::Ipv4Address', 'dest')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetGateway(ns3::Ipv4Address gw) [member function] cls.add_method('SetGateway', 'void', [param('ns3::Ipv4Address', 'gw')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetOutputDevice(ns3::Ptr<ns3::NetDevice> outputDevice) [member function] cls.add_method('SetOutputDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'outputDevice')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetSource(ns3::Ipv4Address src) [member function] cls.add_method('SetSource', 'void', [param('ns3::Ipv4Address', 'src')]) return def register_Ns3Ipv4RoutingProtocol_methods(root_module, cls): ## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol::Ipv4RoutingProtocol() [constructor] cls.add_constructor([]) ## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol::Ipv4RoutingProtocol(ns3::Ipv4RoutingProtocol const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4RoutingProtocol const &', 'arg0')]) ## ipv4-routing-protocol.h (module 'internet'): static ns3::TypeId ns3::Ipv4RoutingProtocol::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('NotifyAddAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyInterfaceDown(uint32_t interface) [member function] cls.add_method('NotifyInterfaceDown', 'void', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyInterfaceUp(uint32_t interface) [member function] cls.add_method('NotifyInterfaceUp', 'void', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('NotifyRemoveAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function] cls.add_method('PrintRoutingTable', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): bool ns3::Ipv4RoutingProtocol::RouteInput(ns3::Ptr<ns3::Packet const> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Callback<void, ns3::Ptr<ns3::Ipv4Route>, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ucb, ns3::Callback<void,ns3::Ptr<ns3::Ipv4MulticastRoute>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> mcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,unsigned int,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> lcb, ns3::Callback<void, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ecb) [member function] cls.add_method('RouteInput', 'bool', [param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4Route> ns3::Ipv4RoutingProtocol::RouteOutput(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::NetDevice> oif, ns3::Socket::SocketErrno & sockerr) [member function] cls.add_method('RouteOutput', 'ns3::Ptr< ns3::Ipv4Route >', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::SetIpv4(ns3::Ptr<ns3::Ipv4> ipv4) [member function] cls.add_method('SetIpv4', 'void', [param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')], is_pure_virtual=True, is_virtual=True) return def register_Ns3Ipv6AddressChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')]) return def register_Ns3Ipv6AddressValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Address', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Address const &', 'value')]) return def register_Ns3Ipv6PrefixChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')]) return def register_Ns3Ipv6PrefixValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Prefix', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Prefix const &', 'value')]) return def register_Ns3Mac48AddressChecker_methods(root_module, cls): ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')]) return def register_Ns3Mac48AddressValue_methods(root_module, cls): ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'value')]) ## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Mac48Address', [], is_const=True) ## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Mac48Address const &', 'value')]) return def register_Ns3NetDevice_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDevice const &', 'arg0')]) ## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3NixVector_methods(root_module, cls): cls.add_output_stream_operator() ## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor] cls.add_constructor([]) ## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor] cls.add_constructor([param('ns3::NixVector const &', 'o')]) ## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function] cls.add_method('AddNeighborIndex', 'void', [param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function] cls.add_method('BitCount', 'uint32_t', [param('uint32_t', 'numberOfNeighbors')], is_const=True) ## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint32_t const *', 'buffer'), param('uint32_t', 'size')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function] cls.add_method('ExtractNeighborIndex', 'uint32_t', [param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function] cls.add_method('GetRemainingBits', 'uint32_t', []) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3Node_methods(root_module, cls): ## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor] cls.add_constructor([param('ns3::Node const &', 'arg0')]) ## node.h (module 'network'): ns3::Node::Node() [constructor] cls.add_constructor([]) ## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor] cls.add_constructor([param('uint32_t', 'systemId')]) ## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function] cls.add_method('AddApplication', 'uint32_t', [param('ns3::Ptr< ns3::Application >', 'application')]) ## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddDevice', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function] cls.add_method('ChecksumEnabled', 'bool', [], is_static=True) ## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function] cls.add_method('GetApplication', 'ns3::Ptr< ns3::Application >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function] cls.add_method('GetNApplications', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('RegisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function] cls.add_method('RegisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')]) ## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('UnregisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function] cls.add_method('UnregisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')]) ## node.h (module 'network'): void ns3::Node::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## node.h (module 'network'): void ns3::Node::DoStart() [member function] cls.add_method('DoStart', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectFactoryChecker_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')]) return def register_Ns3ObjectFactoryValue_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'value')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function] cls.add_method('Get', 'ns3::ObjectFactory', [], is_const=True) ## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function] cls.add_method('Set', 'void', [param('ns3::ObjectFactory const &', 'value')]) return def register_Ns3OutputStreamWrapper_methods(root_module, cls): ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [copy constructor] cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')]) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::_Ios_Openmode filemode) [constructor] cls.add_constructor([param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode')]) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::ostream * os) [constructor] cls.add_constructor([param('std::ostream *', 'os')]) ## output-stream-wrapper.h (module 'network'): std::ostream * ns3::OutputStreamWrapper::GetStream() [member function] cls.add_method('GetStream', 'std::ostream *', []) return def register_Ns3Packet_methods(root_module, cls): cls.add_output_stream_operator() ## packet.h (module 'network'): ns3::Packet::Packet() [constructor] cls.add_constructor([]) ## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor] cls.add_constructor([param('ns3::Packet const &', 'o')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor] cls.add_constructor([param('uint32_t', 'size')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<ns3::Packet const> packet) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')]) ## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function] cls.add_method('AddByteTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header')]) ## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function] cls.add_method('AddPacketTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer')]) ## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::Packet >', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function] cls.add_method('EnablePrinting', 'void', [], is_static=True) ## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function] cls.add_method('FindFirstMatchingByteTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function] cls.add_method('GetByteTagIterator', 'ns3::ByteTagIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function] cls.add_method('GetNixVector', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function] cls.add_method('GetPacketTagIterator', 'ns3::PacketTagIterator', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet.h (module 'network'): uint8_t const * ns3::Packet::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], deprecated=True, is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function] cls.add_method('PeekHeader', 'uint32_t', [param('ns3::Header &', 'header')], is_const=True) ## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function] cls.add_method('PeekPacketTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function] cls.add_method('PeekTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function] cls.add_method('PrintByteTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function] cls.add_method('PrintPacketTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function] cls.add_method('RemoveAllByteTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function] cls.add_method('RemoveAllPacketTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function] cls.add_method('RemoveHeader', 'uint32_t', [param('ns3::Header &', 'header')]) ## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function] cls.add_method('RemovePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function] cls.add_method('RemoveTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> arg0) [member function] cls.add_method('SetNixVector', 'void', [param('ns3::Ptr< ns3::NixVector >', 'arg0')]) return def register_Ns3TimeChecker_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker(ns3::TimeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')]) return def register_Ns3TimeValue_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeValue const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor] cls.add_constructor([param('ns3::Time const &', 'value')]) ## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function] cls.add_method('Get', 'ns3::Time', [], is_const=True) ## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Time const &', 'value')]) return def register_Ns3TypeIdChecker_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')]) return def register_Ns3TypeIdValue_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor] cls.add_constructor([param('ns3::TypeId const &', 'value')]) ## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function] cls.add_method('Get', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function] cls.add_method('Set', 'void', [param('ns3::TypeId const &', 'value')]) return def register_Ns3AddressChecker_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')]) return def register_Ns3AddressValue_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressValue const &', 'arg0')]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor] cls.add_constructor([param('ns3::Address const &', 'value')]) ## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Address', [], is_const=True) ## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Address const &', 'value')]) return def register_Ns3AodvDuplicatePacketDetection_methods(root_module, cls): ## aodv-dpd.h (module 'aodv'): ns3::aodv::DuplicatePacketDetection::DuplicatePacketDetection(ns3::aodv::DuplicatePacketDetection const & arg0) [copy constructor] cls.add_constructor([param('ns3::aodv::DuplicatePacketDetection const &', 'arg0')]) ## aodv-dpd.h (module 'aodv'): ns3::aodv::DuplicatePacketDetection::DuplicatePacketDetection(ns3::Time lifetime) [constructor] cls.add_constructor([param('ns3::Time', 'lifetime')]) ## aodv-dpd.h (module 'aodv'): ns3::Time ns3::aodv::DuplicatePacketDetection::GetLifetime() const [member function] cls.add_method('GetLifetime', 'ns3::Time', [], is_const=True) ## aodv-dpd.h (module 'aodv'): bool ns3::aodv::DuplicatePacketDetection::IsDuplicate(ns3::Ptr<ns3::Packet const> p, ns3::Ipv4Header const & header) [member function] cls.add_method('IsDuplicate', 'bool', [param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header')]) ## aodv-dpd.h (module 'aodv'): void ns3::aodv::DuplicatePacketDetection::SetLifetime(ns3::Time lifetime) [member function] cls.add_method('SetLifetime', 'void', [param('ns3::Time', 'lifetime')]) return def register_Ns3AodvIdCache_methods(root_module, cls): ## aodv-id-cache.h (module 'aodv'): ns3::aodv::IdCache::IdCache(ns3::aodv::IdCache const & arg0) [copy constructor] cls.add_constructor([param('ns3::aodv::IdCache const &', 'arg0')]) ## aodv-id-cache.h (module 'aodv'): ns3::aodv::IdCache::IdCache(ns3::Time lifetime) [constructor] cls.add_constructor([param('ns3::Time', 'lifetime')]) ## aodv-id-cache.h (module 'aodv'): ns3::Time ns3::aodv::IdCache::GetLifeTime() const [member function] cls.add_method('GetLifeTime', 'ns3::Time', [], is_const=True) ## aodv-id-cache.h (module 'aodv'): uint32_t ns3::aodv::IdCache::GetSize() [member function] cls.add_method('GetSize', 'uint32_t', []) ## aodv-id-cache.h (module 'aodv'): bool ns3::aodv::IdCache::IsDuplicate(ns3::Ipv4Address addr, uint32_t id) [member function] cls.add_method('IsDuplicate', 'bool', [param('ns3::Ipv4Address', 'addr'), param('uint32_t', 'id')]) ## aodv-id-cache.h (module 'aodv'): void ns3::aodv::IdCache::Purge() [member function] cls.add_method('Purge', 'void', []) ## aodv-id-cache.h (module 'aodv'): void ns3::aodv::IdCache::SetLifetime(ns3::Time lifetime) [member function] cls.add_method('SetLifetime', 'void', [param('ns3::Time', 'lifetime')]) return def register_Ns3AodvNeighbors_methods(root_module, cls): ## aodv-neighbor.h (module 'aodv'): ns3::aodv::Neighbors::Neighbors(ns3::aodv::Neighbors const & arg0) [copy constructor] cls.add_constructor([param('ns3::aodv::Neighbors const &', 'arg0')]) ## aodv-neighbor.h (module 'aodv'): ns3::aodv::Neighbors::Neighbors(ns3::Time delay) [constructor] cls.add_constructor([param('ns3::Time', 'delay')]) ## aodv-neighbor.h (module 'aodv'): void ns3::aodv::Neighbors::AddArpCache(ns3::Ptr<ns3::ArpCache> arg0) [member function] cls.add_method('AddArpCache', 'void', [param('ns3::Ptr< ns3::ArpCache >', 'arg0')]) ## aodv-neighbor.h (module 'aodv'): void ns3::aodv::Neighbors::Clear() [member function] cls.add_method('Clear', 'void', []) ## aodv-neighbor.h (module 'aodv'): void ns3::aodv::Neighbors::DelArpCache(ns3::Ptr<ns3::ArpCache> arg0) [member function] cls.add_method('DelArpCache', 'void', [param('ns3::Ptr< ns3::ArpCache >', 'arg0')]) ## aodv-neighbor.h (module 'aodv'): ns3::Callback<void, ns3::Ipv4Address, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::aodv::Neighbors::GetCallback() const [member function] cls.add_method('GetCallback', 'ns3::Callback< void, ns3::Ipv4Address, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## aodv-neighbor.h (module 'aodv'): ns3::Time ns3::aodv::Neighbors::GetExpireTime(ns3::Ipv4Address addr) [member function] cls.add_method('GetExpireTime', 'ns3::Time', [param('ns3::Ipv4Address', 'addr')]) ## aodv-neighbor.h (module 'aodv'): ns3::Callback<void, ns3::WifiMacHeader const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::aodv::Neighbors::GetTxErrorCallback() const [member function] cls.add_method('GetTxErrorCallback', 'ns3::Callback< void, ns3::WifiMacHeader const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## aodv-neighbor.h (module 'aodv'): bool ns3::aodv::Neighbors::IsNeighbor(ns3::Ipv4Address addr) [member function] cls.add_method('IsNeighbor', 'bool', [param('ns3::Ipv4Address', 'addr')]) ## aodv-neighbor.h (module 'aodv'): void ns3::aodv::Neighbors::Purge() [member function] cls.add_method('Purge', 'void', []) ## aodv-neighbor.h (module 'aodv'): void ns3::aodv::Neighbors::ScheduleTimer() [member function] cls.add_method('ScheduleTimer', 'void', []) ## aodv-neighbor.h (module 'aodv'): void ns3::aodv::Neighbors::SetCallback(ns3::Callback<void, ns3::Ipv4Address, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetCallback', 'void', [param('ns3::Callback< void, ns3::Ipv4Address, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')]) ## aodv-neighbor.h (module 'aodv'): void ns3::aodv::Neighbors::Update(ns3::Ipv4Address addr, ns3::Time expire) [member function] cls.add_method('Update', 'void', [param('ns3::Ipv4Address', 'addr'), param('ns3::Time', 'expire')]) return def register_Ns3AodvNeighborsNeighbor_methods(root_module, cls): ## aodv-neighbor.h (module 'aodv'): ns3::aodv::Neighbors::Neighbor::Neighbor(ns3::aodv::Neighbors::Neighbor const & arg0) [copy constructor] cls.add_constructor([param('ns3::aodv::Neighbors::Neighbor const &', 'arg0')]) ## aodv-neighbor.h (module 'aodv'): ns3::aodv::Neighbors::Neighbor::Neighbor(ns3::Ipv4Address ip, ns3::Mac48Address mac, ns3::Time t) [constructor] cls.add_constructor([param('ns3::Ipv4Address', 'ip'), param('ns3::Mac48Address', 'mac'), param('ns3::Time', 't')]) ## aodv-neighbor.h (module 'aodv'): ns3::aodv::Neighbors::Neighbor::close [variable] cls.add_instance_attribute('close', 'bool', is_const=False) ## aodv-neighbor.h (module 'aodv'): ns3::aodv::Neighbors::Neighbor::m_expireTime [variable] cls.add_instance_attribute('m_expireTime', 'ns3::Time', is_const=False) ## aodv-neighbor.h (module 'aodv'): ns3::aodv::Neighbors::Neighbor::m_hardwareAddress [variable] cls.add_instance_attribute('m_hardwareAddress', 'ns3::Mac48Address', is_const=False) ## aodv-neighbor.h (module 'aodv'): ns3::aodv::Neighbors::Neighbor::m_neighborAddress [variable] cls.add_instance_attribute('m_neighborAddress', 'ns3::Ipv4Address', is_const=False) return def register_Ns3AodvQueueEntry_methods(root_module, cls): cls.add_binary_comparison_operator('==') ## aodv-rqueue.h (module 'aodv'): ns3::aodv::QueueEntry::QueueEntry(ns3::aodv::QueueEntry const & arg0) [copy constructor] cls.add_constructor([param('ns3::aodv::QueueEntry const &', 'arg0')]) ## aodv-rqueue.h (module 'aodv'): ns3::aodv::QueueEntry::QueueEntry(ns3::Ptr<ns3::Packet const> pa=0, ns3::Ipv4Header const & h=ns3::Ipv4Header(), ns3::Callback<void, ns3::Ptr<ns3::Ipv4Route>, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ucb=ns3::Callback<void, ns3::Ptr<ns3::Ipv4Route>, ns3::Ptr<const ns3::Packet>, const ns3::Ipv4Header&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>(), ns3::Callback<void, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ecb=ns3::Callback<void, ns3::Ptr<const ns3::Packet>, const ns3::Ipv4Header&, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>(), ns3::Time exp=ns3::Simulator::Now( )) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Packet const >', 'pa', default_value='0'), param('ns3::Ipv4Header const &', 'h', default_value='ns3::Ipv4Header()'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb', default_value='ns3::Callback<void, ns3::Ptr<ns3::Ipv4Route>, ns3::Ptr<const ns3::Packet>, const ns3::Ipv4Header&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>()'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb', default_value='ns3::Callback<void, ns3::Ptr<const ns3::Packet>, const ns3::Ipv4Header&, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>()'), param('ns3::Time', 'exp', default_value='ns3::Simulator::Now( )')]) ## aodv-rqueue.h (module 'aodv'): ns3::Callback<void, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::aodv::QueueEntry::GetErrorCallback() const [member function] cls.add_method('GetErrorCallback', 'ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## aodv-rqueue.h (module 'aodv'): ns3::Time ns3::aodv::QueueEntry::GetExpireTime() const [member function] cls.add_method('GetExpireTime', 'ns3::Time', [], is_const=True) ## aodv-rqueue.h (module 'aodv'): ns3::Ipv4Header ns3::aodv::QueueEntry::GetIpv4Header() const [member function] cls.add_method('GetIpv4Header', 'ns3::Ipv4Header', [], is_const=True) ## aodv-rqueue.h (module 'aodv'): ns3::Ptr<ns3::Packet const> ns3::aodv::QueueEntry::GetPacket() const [member function] cls.add_method('GetPacket', 'ns3::Ptr< ns3::Packet const >', [], is_const=True) ## aodv-rqueue.h (module 'aodv'): ns3::Callback<void, ns3::Ptr<ns3::Ipv4Route>, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::aodv::QueueEntry::GetUnicastForwardCallback() const [member function] cls.add_method('GetUnicastForwardCallback', 'ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## aodv-rqueue.h (module 'aodv'): void ns3::aodv::QueueEntry::SetErrorCallback(ns3::Callback<void, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ecb) [member function] cls.add_method('SetErrorCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')]) ## aodv-rqueue.h (module 'aodv'): void ns3::aodv::QueueEntry::SetExpireTime(ns3::Time exp) [member function] cls.add_method('SetExpireTime', 'void', [param('ns3::Time', 'exp')]) ## aodv-rqueue.h (module 'aodv'): void ns3::aodv::QueueEntry::SetIpv4Header(ns3::Ipv4Header h) [member function] cls.add_method('SetIpv4Header', 'void', [param('ns3::Ipv4Header', 'h')]) ## aodv-rqueue.h (module 'aodv'): void ns3::aodv::QueueEntry::SetPacket(ns3::Ptr<ns3::Packet const> p) [member function] cls.add_method('SetPacket', 'void', [param('ns3::Ptr< ns3::Packet const >', 'p')]) ## aodv-rqueue.h (module 'aodv'): void ns3::aodv::QueueEntry::SetUnicastForwardCallback(ns3::Callback<void, ns3::Ptr<ns3::Ipv4Route>, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ucb) [member function] cls.add_method('SetUnicastForwardCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb')]) return def register_Ns3AodvRequestQueue_methods(root_module, cls): ## aodv-rqueue.h (module 'aodv'): ns3::aodv::RequestQueue::RequestQueue(ns3::aodv::RequestQueue const & arg0) [copy constructor] cls.add_constructor([param('ns3::aodv::RequestQueue const &', 'arg0')]) ## aodv-rqueue.h (module 'aodv'): ns3::aodv::RequestQueue::RequestQueue(uint32_t maxLen, ns3::Time routeToQueueTimeout) [constructor] cls.add_constructor([param('uint32_t', 'maxLen'), param('ns3::Time', 'routeToQueueTimeout')]) ## aodv-rqueue.h (module 'aodv'): bool ns3::aodv::RequestQueue::Dequeue(ns3::Ipv4Address dst, ns3::aodv::QueueEntry & entry) [member function] cls.add_method('Dequeue', 'bool', [param('ns3::Ipv4Address', 'dst'), param('ns3::aodv::QueueEntry &', 'entry')]) ## aodv-rqueue.h (module 'aodv'): void ns3::aodv::RequestQueue::DropPacketWithDst(ns3::Ipv4Address dst) [member function] cls.add_method('DropPacketWithDst', 'void', [param('ns3::Ipv4Address', 'dst')]) ## aodv-rqueue.h (module 'aodv'): bool ns3::aodv::RequestQueue::Enqueue(ns3::aodv::QueueEntry & entry) [member function] cls.add_method('Enqueue', 'bool', [param('ns3::aodv::QueueEntry &', 'entry')]) ## aodv-rqueue.h (module 'aodv'): bool ns3::aodv::RequestQueue::Find(ns3::Ipv4Address dst) [member function] cls.add_method('Find', 'bool', [param('ns3::Ipv4Address', 'dst')]) ## aodv-rqueue.h (module 'aodv'): uint32_t ns3::aodv::RequestQueue::GetMaxQueueLen() const [member function] cls.add_method('GetMaxQueueLen', 'uint32_t', [], is_const=True) ## aodv-rqueue.h (module 'aodv'): ns3::Time ns3::aodv::RequestQueue::GetQueueTimeout() const [member function] cls.add_method('GetQueueTimeout', 'ns3::Time', [], is_const=True) ## aodv-rqueue.h (module 'aodv'): uint32_t ns3::aodv::RequestQueue::GetSize() [member function] cls.add_method('GetSize', 'uint32_t', []) ## aodv-rqueue.h (module 'aodv'): void ns3::aodv::RequestQueue::SetMaxQueueLen(uint32_t len) [member function] cls.add_method('SetMaxQueueLen', 'void', [param('uint32_t', 'len')]) ## aodv-rqueue.h (module 'aodv'): void ns3::aodv::RequestQueue::SetQueueTimeout(ns3::Time t) [member function] cls.add_method('SetQueueTimeout', 'void', [param('ns3::Time', 't')]) return def register_Ns3AodvRerrHeader_methods(root_module, cls): cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## aodv-packet.h (module 'aodv'): ns3::aodv::RerrHeader::RerrHeader(ns3::aodv::RerrHeader const & arg0) [copy constructor] cls.add_constructor([param('ns3::aodv::RerrHeader const &', 'arg0')]) ## aodv-packet.h (module 'aodv'): ns3::aodv::RerrHeader::RerrHeader() [constructor] cls.add_constructor([]) ## aodv-packet.h (module 'aodv'): bool ns3::aodv::RerrHeader::AddUnDestination(ns3::Ipv4Address dst, uint32_t seqNo) [member function] cls.add_method('AddUnDestination', 'bool', [param('ns3::Ipv4Address', 'dst'), param('uint32_t', 'seqNo')]) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RerrHeader::Clear() [member function] cls.add_method('Clear', 'void', []) ## aodv-packet.h (module 'aodv'): uint32_t ns3::aodv::RerrHeader::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## aodv-packet.h (module 'aodv'): uint8_t ns3::aodv::RerrHeader::GetDestCount() const [member function] cls.add_method('GetDestCount', 'uint8_t', [], is_const=True) ## aodv-packet.h (module 'aodv'): ns3::TypeId ns3::aodv::RerrHeader::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## aodv-packet.h (module 'aodv'): bool ns3::aodv::RerrHeader::GetNoDelete() const [member function] cls.add_method('GetNoDelete', 'bool', [], is_const=True) ## aodv-packet.h (module 'aodv'): uint32_t ns3::aodv::RerrHeader::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## aodv-packet.h (module 'aodv'): static ns3::TypeId ns3::aodv::RerrHeader::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RerrHeader::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## aodv-packet.h (module 'aodv'): bool ns3::aodv::RerrHeader::RemoveUnDestination(std::pair<ns3::Ipv4Address,unsigned int> & un) [member function] cls.add_method('RemoveUnDestination', 'bool', [param('std::pair< ns3::Ipv4Address, unsigned int > &', 'un')]) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RerrHeader::Serialize(ns3::Buffer::Iterator i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'i')], is_const=True, is_virtual=True) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RerrHeader::SetNoDelete(bool f) [member function] cls.add_method('SetNoDelete', 'void', [param('bool', 'f')]) return def register_Ns3AodvRoutingProtocol_methods(root_module, cls): ## aodv-routing-protocol.h (module 'aodv'): ns3::aodv::RoutingProtocol::RoutingProtocol(ns3::aodv::RoutingProtocol const & arg0) [copy constructor] cls.add_constructor([param('ns3::aodv::RoutingProtocol const &', 'arg0')]) ## aodv-routing-protocol.h (module 'aodv'): ns3::aodv::RoutingProtocol::RoutingProtocol() [constructor] cls.add_constructor([]) ## aodv-routing-protocol.h (module 'aodv'): void ns3::aodv::RoutingProtocol::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], is_virtual=True) ## aodv-routing-protocol.h (module 'aodv'): bool ns3::aodv::RoutingProtocol::GetBroadcastEnable() const [member function] cls.add_method('GetBroadcastEnable', 'bool', [], is_const=True) ## aodv-routing-protocol.h (module 'aodv'): bool ns3::aodv::RoutingProtocol::GetDesinationOnlyFlag() const [member function] cls.add_method('GetDesinationOnlyFlag', 'bool', [], is_const=True) ## aodv-routing-protocol.h (module 'aodv'): bool ns3::aodv::RoutingProtocol::GetGratuitousReplyFlag() const [member function] cls.add_method('GetGratuitousReplyFlag', 'bool', [], is_const=True) ## aodv-routing-protocol.h (module 'aodv'): bool ns3::aodv::RoutingProtocol::GetHelloEnable() const [member function] cls.add_method('GetHelloEnable', 'bool', [], is_const=True) ## aodv-routing-protocol.h (module 'aodv'): uint32_t ns3::aodv::RoutingProtocol::GetMaxQueueLen() const [member function] cls.add_method('GetMaxQueueLen', 'uint32_t', [], is_const=True) ## aodv-routing-protocol.h (module 'aodv'): ns3::Time ns3::aodv::RoutingProtocol::GetMaxQueueTime() const [member function] cls.add_method('GetMaxQueueTime', 'ns3::Time', [], is_const=True) ## aodv-routing-protocol.h (module 'aodv'): static ns3::TypeId ns3::aodv::RoutingProtocol::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## aodv-routing-protocol.h (module 'aodv'): void ns3::aodv::RoutingProtocol::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('NotifyAddAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_virtual=True) ## aodv-routing-protocol.h (module 'aodv'): void ns3::aodv::RoutingProtocol::NotifyInterfaceDown(uint32_t interface) [member function] cls.add_method('NotifyInterfaceDown', 'void', [param('uint32_t', 'interface')], is_virtual=True) ## aodv-routing-protocol.h (module 'aodv'): void ns3::aodv::RoutingProtocol::NotifyInterfaceUp(uint32_t interface) [member function] cls.add_method('NotifyInterfaceUp', 'void', [param('uint32_t', 'interface')], is_virtual=True) ## aodv-routing-protocol.h (module 'aodv'): void ns3::aodv::RoutingProtocol::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('NotifyRemoveAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_virtual=True) ## aodv-routing-protocol.h (module 'aodv'): void ns3::aodv::RoutingProtocol::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function] cls.add_method('PrintRoutingTable', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_const=True, is_virtual=True) ## aodv-routing-protocol.h (module 'aodv'): bool ns3::aodv::RoutingProtocol::RouteInput(ns3::Ptr<ns3::Packet const> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Callback<void, ns3::Ptr<ns3::Ipv4Route>, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ucb, ns3::Callback<void,ns3::Ptr<ns3::Ipv4MulticastRoute>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> mcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,unsigned int,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> lcb, ns3::Callback<void, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ecb) [member function] cls.add_method('RouteInput', 'bool', [param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')], is_virtual=True) ## aodv-routing-protocol.h (module 'aodv'): ns3::Ptr<ns3::Ipv4Route> ns3::aodv::RoutingProtocol::RouteOutput(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::NetDevice> oif, ns3::Socket::SocketErrno & sockerr) [member function] cls.add_method('RouteOutput', 'ns3::Ptr< ns3::Ipv4Route >', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')], is_virtual=True) ## aodv-routing-protocol.h (module 'aodv'): void ns3::aodv::RoutingProtocol::SetBroadcastEnable(bool f) [member function] cls.add_method('SetBroadcastEnable', 'void', [param('bool', 'f')]) ## aodv-routing-protocol.h (module 'aodv'): void ns3::aodv::RoutingProtocol::SetDesinationOnlyFlag(bool f) [member function] cls.add_method('SetDesinationOnlyFlag', 'void', [param('bool', 'f')]) ## aodv-routing-protocol.h (module 'aodv'): void ns3::aodv::RoutingProtocol::SetGratuitousReplyFlag(bool f) [member function] cls.add_method('SetGratuitousReplyFlag', 'void', [param('bool', 'f')]) ## aodv-routing-protocol.h (module 'aodv'): void ns3::aodv::RoutingProtocol::SetHelloEnable(bool f) [member function] cls.add_method('SetHelloEnable', 'void', [param('bool', 'f')]) ## aodv-routing-protocol.h (module 'aodv'): void ns3::aodv::RoutingProtocol::SetIpv4(ns3::Ptr<ns3::Ipv4> ipv4) [member function] cls.add_method('SetIpv4', 'void', [param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')], is_virtual=True) ## aodv-routing-protocol.h (module 'aodv'): void ns3::aodv::RoutingProtocol::SetMaxQueueLen(uint32_t len) [member function] cls.add_method('SetMaxQueueLen', 'void', [param('uint32_t', 'len')]) ## aodv-routing-protocol.h (module 'aodv'): void ns3::aodv::RoutingProtocol::SetMaxQueueTime(ns3::Time t) [member function] cls.add_method('SetMaxQueueTime', 'void', [param('ns3::Time', 't')]) ## aodv-routing-protocol.h (module 'aodv'): ns3::aodv::RoutingProtocol::AODV_PORT [variable] cls.add_static_attribute('AODV_PORT', 'uint32_t const', is_const=True) return def register_Ns3AodvRoutingTable_methods(root_module, cls): ## aodv-rtable.h (module 'aodv'): ns3::aodv::RoutingTable::RoutingTable(ns3::aodv::RoutingTable const & arg0) [copy constructor] cls.add_constructor([param('ns3::aodv::RoutingTable const &', 'arg0')]) ## aodv-rtable.h (module 'aodv'): ns3::aodv::RoutingTable::RoutingTable(ns3::Time t) [constructor] cls.add_constructor([param('ns3::Time', 't')]) ## aodv-rtable.h (module 'aodv'): bool ns3::aodv::RoutingTable::AddRoute(ns3::aodv::RoutingTableEntry & r) [member function] cls.add_method('AddRoute', 'bool', [param('ns3::aodv::RoutingTableEntry &', 'r')]) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTable::Clear() [member function] cls.add_method('Clear', 'void', []) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTable::DeleteAllRoutesFromInterface(ns3::Ipv4InterfaceAddress iface) [member function] cls.add_method('DeleteAllRoutesFromInterface', 'void', [param('ns3::Ipv4InterfaceAddress', 'iface')]) ## aodv-rtable.h (module 'aodv'): bool ns3::aodv::RoutingTable::DeleteRoute(ns3::Ipv4Address dst) [member function] cls.add_method('DeleteRoute', 'bool', [param('ns3::Ipv4Address', 'dst')]) ## aodv-rtable.h (module 'aodv'): ns3::Time ns3::aodv::RoutingTable::GetBadLinkLifetime() const [member function] cls.add_method('GetBadLinkLifetime', 'ns3::Time', [], is_const=True) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTable::GetListOfDestinationWithNextHop(ns3::Ipv4Address nextHop, std::map<ns3::Ipv4Address, unsigned int, std::less<ns3::Ipv4Address>, std::allocator<std::pair<ns3::Ipv4Address const, unsigned int> > > & unreachable) [member function] cls.add_method('GetListOfDestinationWithNextHop', 'void', [param('ns3::Ipv4Address', 'nextHop'), param('std::map< ns3::Ipv4Address, unsigned int > &', 'unreachable')]) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTable::InvalidateRoutesWithDst(std::map<ns3::Ipv4Address, unsigned int, std::less<ns3::Ipv4Address>, std::allocator<std::pair<ns3::Ipv4Address const, unsigned int> > > const & unreachable) [member function] cls.add_method('InvalidateRoutesWithDst', 'void', [param('std::map< ns3::Ipv4Address, unsigned int > const &', 'unreachable')]) ## aodv-rtable.h (module 'aodv'): bool ns3::aodv::RoutingTable::LookupRoute(ns3::Ipv4Address dst, ns3::aodv::RoutingTableEntry & rt) [member function] cls.add_method('LookupRoute', 'bool', [param('ns3::Ipv4Address', 'dst'), param('ns3::aodv::RoutingTableEntry &', 'rt')]) ## aodv-rtable.h (module 'aodv'): bool ns3::aodv::RoutingTable::LookupValidRoute(ns3::Ipv4Address dst, ns3::aodv::RoutingTableEntry & rt) [member function] cls.add_method('LookupValidRoute', 'bool', [param('ns3::Ipv4Address', 'dst'), param('ns3::aodv::RoutingTableEntry &', 'rt')]) ## aodv-rtable.h (module 'aodv'): bool ns3::aodv::RoutingTable::MarkLinkAsUnidirectional(ns3::Ipv4Address neighbor, ns3::Time blacklistTimeout) [member function] cls.add_method('MarkLinkAsUnidirectional', 'bool', [param('ns3::Ipv4Address', 'neighbor'), param('ns3::Time', 'blacklistTimeout')]) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTable::Print(ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function] cls.add_method('Print', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_const=True) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTable::Purge() [member function] cls.add_method('Purge', 'void', []) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTable::SetBadLinkLifetime(ns3::Time t) [member function] cls.add_method('SetBadLinkLifetime', 'void', [param('ns3::Time', 't')]) ## aodv-rtable.h (module 'aodv'): bool ns3::aodv::RoutingTable::SetEntryState(ns3::Ipv4Address dst, ns3::aodv::RouteFlags state) [member function] cls.add_method('SetEntryState', 'bool', [param('ns3::Ipv4Address', 'dst'), param('ns3::aodv::RouteFlags', 'state')]) ## aodv-rtable.h (module 'aodv'): bool ns3::aodv::RoutingTable::Update(ns3::aodv::RoutingTableEntry & rt) [member function] cls.add_method('Update', 'bool', [param('ns3::aodv::RoutingTableEntry &', 'rt')]) return def register_Ns3AodvRoutingTableEntry_methods(root_module, cls): ## aodv-rtable.h (module 'aodv'): ns3::aodv::RoutingTableEntry::RoutingTableEntry(ns3::aodv::RoutingTableEntry const & arg0) [copy constructor] cls.add_constructor([param('ns3::aodv::RoutingTableEntry const &', 'arg0')]) ## aodv-rtable.h (module 'aodv'): ns3::aodv::RoutingTableEntry::RoutingTableEntry(ns3::Ptr<ns3::NetDevice> dev=0, ns3::Ipv4Address dst=ns3::Ipv4Address(), bool vSeqNo=false, uint32_t m_seqNo=0, ns3::Ipv4InterfaceAddress iface=ns3::Ipv4InterfaceAddress(), uint16_t hops=0, ns3::Ipv4Address nextHop=ns3::Ipv4Address(), ns3::Time lifetime=ns3::Simulator::Now( )) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev', default_value='0'), param('ns3::Ipv4Address', 'dst', default_value='ns3::Ipv4Address()'), param('bool', 'vSeqNo', default_value='false'), param('uint32_t', 'm_seqNo', default_value='0'), param('ns3::Ipv4InterfaceAddress', 'iface', default_value='ns3::Ipv4InterfaceAddress()'), param('uint16_t', 'hops', default_value='0'), param('ns3::Ipv4Address', 'nextHop', default_value='ns3::Ipv4Address()'), param('ns3::Time', 'lifetime', default_value='ns3::Simulator::Now( )')]) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTableEntry::DeleteAllPrecursors() [member function] cls.add_method('DeleteAllPrecursors', 'void', []) ## aodv-rtable.h (module 'aodv'): bool ns3::aodv::RoutingTableEntry::DeletePrecursor(ns3::Ipv4Address id) [member function] cls.add_method('DeletePrecursor', 'bool', [param('ns3::Ipv4Address', 'id')]) ## aodv-rtable.h (module 'aodv'): ns3::Time ns3::aodv::RoutingTableEntry::GetBlacklistTimeout() const [member function] cls.add_method('GetBlacklistTimeout', 'ns3::Time', [], is_const=True) ## aodv-rtable.h (module 'aodv'): ns3::Ipv4Address ns3::aodv::RoutingTableEntry::GetDestination() const [member function] cls.add_method('GetDestination', 'ns3::Ipv4Address', [], is_const=True) ## aodv-rtable.h (module 'aodv'): ns3::aodv::RouteFlags ns3::aodv::RoutingTableEntry::GetFlag() const [member function] cls.add_method('GetFlag', 'ns3::aodv::RouteFlags', [], is_const=True) ## aodv-rtable.h (module 'aodv'): uint16_t ns3::aodv::RoutingTableEntry::GetHop() const [member function] cls.add_method('GetHop', 'uint16_t', [], is_const=True) ## aodv-rtable.h (module 'aodv'): ns3::Ipv4InterfaceAddress ns3::aodv::RoutingTableEntry::GetInterface() const [member function] cls.add_method('GetInterface', 'ns3::Ipv4InterfaceAddress', [], is_const=True) ## aodv-rtable.h (module 'aodv'): ns3::Time ns3::aodv::RoutingTableEntry::GetLifeTime() const [member function] cls.add_method('GetLifeTime', 'ns3::Time', [], is_const=True) ## aodv-rtable.h (module 'aodv'): ns3::Ipv4Address ns3::aodv::RoutingTableEntry::GetNextHop() const [member function] cls.add_method('GetNextHop', 'ns3::Ipv4Address', [], is_const=True) ## aodv-rtable.h (module 'aodv'): ns3::Ptr<ns3::NetDevice> ns3::aodv::RoutingTableEntry::GetOutputDevice() const [member function] cls.add_method('GetOutputDevice', 'ns3::Ptr< ns3::NetDevice >', [], is_const=True) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTableEntry::GetPrecursors(std::vector<ns3::Ipv4Address, std::allocator<ns3::Ipv4Address> > & prec) const [member function] cls.add_method('GetPrecursors', 'void', [param('std::vector< ns3::Ipv4Address > &', 'prec')], is_const=True) ## aodv-rtable.h (module 'aodv'): ns3::Ptr<ns3::Ipv4Route> ns3::aodv::RoutingTableEntry::GetRoute() const [member function] cls.add_method('GetRoute', 'ns3::Ptr< ns3::Ipv4Route >', [], is_const=True) ## aodv-rtable.h (module 'aodv'): uint8_t ns3::aodv::RoutingTableEntry::GetRreqCnt() const [member function] cls.add_method('GetRreqCnt', 'uint8_t', [], is_const=True) ## aodv-rtable.h (module 'aodv'): uint32_t ns3::aodv::RoutingTableEntry::GetSeqNo() const [member function] cls.add_method('GetSeqNo', 'uint32_t', [], is_const=True) ## aodv-rtable.h (module 'aodv'): bool ns3::aodv::RoutingTableEntry::GetValidSeqNo() const [member function] cls.add_method('GetValidSeqNo', 'bool', [], is_const=True) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTableEntry::IncrementRreqCnt() [member function] cls.add_method('IncrementRreqCnt', 'void', []) ## aodv-rtable.h (module 'aodv'): bool ns3::aodv::RoutingTableEntry::InsertPrecursor(ns3::Ipv4Address id) [member function] cls.add_method('InsertPrecursor', 'bool', [param('ns3::Ipv4Address', 'id')]) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTableEntry::Invalidate(ns3::Time badLinkLifetime) [member function] cls.add_method('Invalidate', 'void', [param('ns3::Time', 'badLinkLifetime')]) ## aodv-rtable.h (module 'aodv'): bool ns3::aodv::RoutingTableEntry::IsPrecursorListEmpty() const [member function] cls.add_method('IsPrecursorListEmpty', 'bool', [], is_const=True) ## aodv-rtable.h (module 'aodv'): bool ns3::aodv::RoutingTableEntry::IsUnidirectional() const [member function] cls.add_method('IsUnidirectional', 'bool', [], is_const=True) ## aodv-rtable.h (module 'aodv'): bool ns3::aodv::RoutingTableEntry::LookupPrecursor(ns3::Ipv4Address id) [member function] cls.add_method('LookupPrecursor', 'bool', [param('ns3::Ipv4Address', 'id')]) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTableEntry::Print(ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function] cls.add_method('Print', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_const=True) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTableEntry::SetBalcklistTimeout(ns3::Time t) [member function] cls.add_method('SetBalcklistTimeout', 'void', [param('ns3::Time', 't')]) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTableEntry::SetFlag(ns3::aodv::RouteFlags flag) [member function] cls.add_method('SetFlag', 'void', [param('ns3::aodv::RouteFlags', 'flag')]) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTableEntry::SetHop(uint16_t hop) [member function] cls.add_method('SetHop', 'void', [param('uint16_t', 'hop')]) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTableEntry::SetInterface(ns3::Ipv4InterfaceAddress iface) [member function] cls.add_method('SetInterface', 'void', [param('ns3::Ipv4InterfaceAddress', 'iface')]) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTableEntry::SetLifeTime(ns3::Time lt) [member function] cls.add_method('SetLifeTime', 'void', [param('ns3::Time', 'lt')]) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTableEntry::SetNextHop(ns3::Ipv4Address nextHop) [member function] cls.add_method('SetNextHop', 'void', [param('ns3::Ipv4Address', 'nextHop')]) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTableEntry::SetOutputDevice(ns3::Ptr<ns3::NetDevice> dev) [member function] cls.add_method('SetOutputDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'dev')]) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTableEntry::SetRoute(ns3::Ptr<ns3::Ipv4Route> r) [member function] cls.add_method('SetRoute', 'void', [param('ns3::Ptr< ns3::Ipv4Route >', 'r')]) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTableEntry::SetRreqCnt(uint8_t n) [member function] cls.add_method('SetRreqCnt', 'void', [param('uint8_t', 'n')]) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTableEntry::SetSeqNo(uint32_t sn) [member function] cls.add_method('SetSeqNo', 'void', [param('uint32_t', 'sn')]) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTableEntry::SetUnidirectional(bool u) [member function] cls.add_method('SetUnidirectional', 'void', [param('bool', 'u')]) ## aodv-rtable.h (module 'aodv'): void ns3::aodv::RoutingTableEntry::SetValidSeqNo(bool s) [member function] cls.add_method('SetValidSeqNo', 'void', [param('bool', 's')]) ## aodv-rtable.h (module 'aodv'): ns3::aodv::RoutingTableEntry::m_ackTimer [variable] cls.add_instance_attribute('m_ackTimer', 'ns3::Timer', is_const=False) return def register_Ns3AodvRrepAckHeader_methods(root_module, cls): cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## aodv-packet.h (module 'aodv'): ns3::aodv::RrepAckHeader::RrepAckHeader(ns3::aodv::RrepAckHeader const & arg0) [copy constructor] cls.add_constructor([param('ns3::aodv::RrepAckHeader const &', 'arg0')]) ## aodv-packet.h (module 'aodv'): ns3::aodv::RrepAckHeader::RrepAckHeader() [constructor] cls.add_constructor([]) ## aodv-packet.h (module 'aodv'): uint32_t ns3::aodv::RrepAckHeader::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## aodv-packet.h (module 'aodv'): ns3::TypeId ns3::aodv::RrepAckHeader::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## aodv-packet.h (module 'aodv'): uint32_t ns3::aodv::RrepAckHeader::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## aodv-packet.h (module 'aodv'): static ns3::TypeId ns3::aodv::RrepAckHeader::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RrepAckHeader::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RrepAckHeader::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) return def register_Ns3AodvRrepHeader_methods(root_module, cls): cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## aodv-packet.h (module 'aodv'): ns3::aodv::RrepHeader::RrepHeader(ns3::aodv::RrepHeader const & arg0) [copy constructor] cls.add_constructor([param('ns3::aodv::RrepHeader const &', 'arg0')]) ## aodv-packet.h (module 'aodv'): ns3::aodv::RrepHeader::RrepHeader(uint8_t prefixSize=0, uint8_t hopCount=0, ns3::Ipv4Address dst=ns3::Ipv4Address(), uint32_t dstSeqNo=0, ns3::Ipv4Address origin=ns3::Ipv4Address(), ns3::Time lifetime=ns3::MilliSeconds( )) [constructor] cls.add_constructor([param('uint8_t', 'prefixSize', default_value='0'), param('uint8_t', 'hopCount', default_value='0'), param('ns3::Ipv4Address', 'dst', default_value='ns3::Ipv4Address()'), param('uint32_t', 'dstSeqNo', default_value='0'), param('ns3::Ipv4Address', 'origin', default_value='ns3::Ipv4Address()'), param('ns3::Time', 'lifetime', default_value='ns3::MilliSeconds(0)')]) ## aodv-packet.h (module 'aodv'): uint32_t ns3::aodv::RrepHeader::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## aodv-packet.h (module 'aodv'): bool ns3::aodv::RrepHeader::GetAckRequired() const [member function] cls.add_method('GetAckRequired', 'bool', [], is_const=True) ## aodv-packet.h (module 'aodv'): ns3::Ipv4Address ns3::aodv::RrepHeader::GetDst() const [member function] cls.add_method('GetDst', 'ns3::Ipv4Address', [], is_const=True) ## aodv-packet.h (module 'aodv'): uint32_t ns3::aodv::RrepHeader::GetDstSeqno() const [member function] cls.add_method('GetDstSeqno', 'uint32_t', [], is_const=True) ## aodv-packet.h (module 'aodv'): uint8_t ns3::aodv::RrepHeader::GetHopCount() const [member function] cls.add_method('GetHopCount', 'uint8_t', [], is_const=True) ## aodv-packet.h (module 'aodv'): ns3::TypeId ns3::aodv::RrepHeader::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## aodv-packet.h (module 'aodv'): ns3::Time ns3::aodv::RrepHeader::GetLifeTime() const [member function] cls.add_method('GetLifeTime', 'ns3::Time', [], is_const=True) ## aodv-packet.h (module 'aodv'): ns3::Ipv4Address ns3::aodv::RrepHeader::GetOrigin() const [member function] cls.add_method('GetOrigin', 'ns3::Ipv4Address', [], is_const=True) ## aodv-packet.h (module 'aodv'): uint8_t ns3::aodv::RrepHeader::GetPrefixSize() const [member function] cls.add_method('GetPrefixSize', 'uint8_t', [], is_const=True) ## aodv-packet.h (module 'aodv'): uint32_t ns3::aodv::RrepHeader::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## aodv-packet.h (module 'aodv'): static ns3::TypeId ns3::aodv::RrepHeader::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RrepHeader::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RrepHeader::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RrepHeader::SetAckRequired(bool f) [member function] cls.add_method('SetAckRequired', 'void', [param('bool', 'f')]) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RrepHeader::SetDst(ns3::Ipv4Address a) [member function] cls.add_method('SetDst', 'void', [param('ns3::Ipv4Address', 'a')]) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RrepHeader::SetDstSeqno(uint32_t s) [member function] cls.add_method('SetDstSeqno', 'void', [param('uint32_t', 's')]) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RrepHeader::SetHello(ns3::Ipv4Address src, uint32_t srcSeqNo, ns3::Time lifetime) [member function] cls.add_method('SetHello', 'void', [param('ns3::Ipv4Address', 'src'), param('uint32_t', 'srcSeqNo'), param('ns3::Time', 'lifetime')]) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RrepHeader::SetHopCount(uint8_t count) [member function] cls.add_method('SetHopCount', 'void', [param('uint8_t', 'count')]) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RrepHeader::SetLifeTime(ns3::Time t) [member function] cls.add_method('SetLifeTime', 'void', [param('ns3::Time', 't')]) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RrepHeader::SetOrigin(ns3::Ipv4Address a) [member function] cls.add_method('SetOrigin', 'void', [param('ns3::Ipv4Address', 'a')]) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RrepHeader::SetPrefixSize(uint8_t sz) [member function] cls.add_method('SetPrefixSize', 'void', [param('uint8_t', 'sz')]) return def register_Ns3AodvRreqHeader_methods(root_module, cls): cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## aodv-packet.h (module 'aodv'): ns3::aodv::RreqHeader::RreqHeader(ns3::aodv::RreqHeader const & arg0) [copy constructor] cls.add_constructor([param('ns3::aodv::RreqHeader const &', 'arg0')]) ## aodv-packet.h (module 'aodv'): ns3::aodv::RreqHeader::RreqHeader(uint8_t flags=0, uint8_t reserved=0, uint8_t hopCount=0, uint32_t requestID=0, ns3::Ipv4Address dst=ns3::Ipv4Address(), uint32_t dstSeqNo=0, ns3::Ipv4Address origin=ns3::Ipv4Address(), uint32_t originSeqNo=0) [constructor] cls.add_constructor([param('uint8_t', 'flags', default_value='0'), param('uint8_t', 'reserved', default_value='0'), param('uint8_t', 'hopCount', default_value='0'), param('uint32_t', 'requestID', default_value='0'), param('ns3::Ipv4Address', 'dst', default_value='ns3::Ipv4Address()'), param('uint32_t', 'dstSeqNo', default_value='0'), param('ns3::Ipv4Address', 'origin', default_value='ns3::Ipv4Address()'), param('uint32_t', 'originSeqNo', default_value='0')]) ## aodv-packet.h (module 'aodv'): uint32_t ns3::aodv::RreqHeader::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## aodv-packet.h (module 'aodv'): bool ns3::aodv::RreqHeader::GetDestinationOnly() const [member function] cls.add_method('GetDestinationOnly', 'bool', [], is_const=True) ## aodv-packet.h (module 'aodv'): ns3::Ipv4Address ns3::aodv::RreqHeader::GetDst() const [member function] cls.add_method('GetDst', 'ns3::Ipv4Address', [], is_const=True) ## aodv-packet.h (module 'aodv'): uint32_t ns3::aodv::RreqHeader::GetDstSeqno() const [member function] cls.add_method('GetDstSeqno', 'uint32_t', [], is_const=True) ## aodv-packet.h (module 'aodv'): bool ns3::aodv::RreqHeader::GetGratiousRrep() const [member function] cls.add_method('GetGratiousRrep', 'bool', [], is_const=True) ## aodv-packet.h (module 'aodv'): uint8_t ns3::aodv::RreqHeader::GetHopCount() const [member function] cls.add_method('GetHopCount', 'uint8_t', [], is_const=True) ## aodv-packet.h (module 'aodv'): uint32_t ns3::aodv::RreqHeader::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## aodv-packet.h (module 'aodv'): ns3::TypeId ns3::aodv::RreqHeader::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## aodv-packet.h (module 'aodv'): ns3::Ipv4Address ns3::aodv::RreqHeader::GetOrigin() const [member function] cls.add_method('GetOrigin', 'ns3::Ipv4Address', [], is_const=True) ## aodv-packet.h (module 'aodv'): uint32_t ns3::aodv::RreqHeader::GetOriginSeqno() const [member function] cls.add_method('GetOriginSeqno', 'uint32_t', [], is_const=True) ## aodv-packet.h (module 'aodv'): uint32_t ns3::aodv::RreqHeader::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## aodv-packet.h (module 'aodv'): static ns3::TypeId ns3::aodv::RreqHeader::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## aodv-packet.h (module 'aodv'): bool ns3::aodv::RreqHeader::GetUnknownSeqno() const [member function] cls.add_method('GetUnknownSeqno', 'bool', [], is_const=True) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RreqHeader::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RreqHeader::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RreqHeader::SetDestinationOnly(bool f) [member function] cls.add_method('SetDestinationOnly', 'void', [param('bool', 'f')]) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RreqHeader::SetDst(ns3::Ipv4Address a) [member function] cls.add_method('SetDst', 'void', [param('ns3::Ipv4Address', 'a')]) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RreqHeader::SetDstSeqno(uint32_t s) [member function] cls.add_method('SetDstSeqno', 'void', [param('uint32_t', 's')]) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RreqHeader::SetGratiousRrep(bool f) [member function] cls.add_method('SetGratiousRrep', 'void', [param('bool', 'f')]) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RreqHeader::SetHopCount(uint8_t count) [member function] cls.add_method('SetHopCount', 'void', [param('uint8_t', 'count')]) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RreqHeader::SetId(uint32_t id) [member function] cls.add_method('SetId', 'void', [param('uint32_t', 'id')]) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RreqHeader::SetOrigin(ns3::Ipv4Address a) [member function] cls.add_method('SetOrigin', 'void', [param('ns3::Ipv4Address', 'a')]) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RreqHeader::SetOriginSeqno(uint32_t s) [member function] cls.add_method('SetOriginSeqno', 'void', [param('uint32_t', 's')]) ## aodv-packet.h (module 'aodv'): void ns3::aodv::RreqHeader::SetUnknownSeqno(bool f) [member function] cls.add_method('SetUnknownSeqno', 'void', [param('bool', 'f')]) return def register_Ns3AodvTypeHeader_methods(root_module, cls): cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## aodv-packet.h (module 'aodv'): ns3::aodv::TypeHeader::TypeHeader(ns3::aodv::TypeHeader const & arg0) [copy constructor] cls.add_constructor([param('ns3::aodv::TypeHeader const &', 'arg0')]) ## aodv-packet.h (module 'aodv'): ns3::aodv::TypeHeader::TypeHeader(ns3::aodv::MessageType t) [constructor] cls.add_constructor([param('ns3::aodv::MessageType', 't')]) ## aodv-packet.h (module 'aodv'): uint32_t ns3::aodv::TypeHeader::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## aodv-packet.h (module 'aodv'): ns3::aodv::MessageType ns3::aodv::TypeHeader::Get() const [member function] cls.add_method('Get', 'ns3::aodv::MessageType', [], is_const=True) ## aodv-packet.h (module 'aodv'): ns3::TypeId ns3::aodv::TypeHeader::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## aodv-packet.h (module 'aodv'): uint32_t ns3::aodv::TypeHeader::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## aodv-packet.h (module 'aodv'): static ns3::TypeId ns3::aodv::TypeHeader::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## aodv-packet.h (module 'aodv'): bool ns3::aodv::TypeHeader::IsValid() const [member function] cls.add_method('IsValid', 'bool', [], is_const=True) ## aodv-packet.h (module 'aodv'): void ns3::aodv::TypeHeader::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## aodv-packet.h (module 'aodv'): void ns3::aodv::TypeHeader::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) return def register_functions(root_module): module = root_module register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) register_functions_ns3_aodv(module.get_submodule('aodv'), root_module) return def register_functions_ns3_FatalImpl(module, root_module): return def register_functions_ns3_aodv(module, root_module): return def main(): out = FileCodeSink(sys.stdout) root_module = module_init() register_types(root_module) register_methods(root_module) register_functions(root_module) root_module.generate(out) if __name__ == '__main__': main()
gpl-2.0
babelsberg/babelsberg-r
topaz/objects/threadobject.py
2
1802
import copy from topaz.module import ClassDef from topaz.objects.objectobject import W_Object class W_ThreadObject(W_Object): classdef = ClassDef("Thread", W_Object.classdef) def __init__(self, space): W_Object.__init__(self, space) # TODO: This should be a map dict. self.local_storage = {} def __deepcopy__(self, memo): obj = super(W_ThreadObject, self).__deepcopy__(memo) obj.local_storage = copy.deepcopy(self.local_storage, memo) return obj @classdef.singleton_method("current") def method_current(self, space): return space.w_main_thread @classdef.method("[]", key="str") def method_subscript(self, space, key): return self.local_storage.get(key, space.w_nil) @classdef.method("[]=", key="str") def method_subscript_assign(self, space, key, w_value): self.local_storage[key] = w_value return w_value @classdef.method("recursion_guard") def method_recursion_guard(self, space, w_identifier, w_obj, block): """ Calls the block with true if recursion is detected, false otherwise. It is up to the block to decide what to do in either case. """ ec = space.getexecutioncontext() identifier = space.symbol_w(w_identifier) with ec.recursion_guard(identifier, w_obj) as in_recursion: if not in_recursion: space.invoke_block(block, []) return space.newbool(in_recursion) @classdef.method("in_recursion_guard?") def method_in_recursion_guardp(self, space, w_identifier): ec = space.getexecutioncontext() identifier = space.symbol_w(w_identifier) if identifier in ec.recursive_calls: return space.w_true return space.w_false
bsd-3-clause
tafaRU/odoo
addons/l10n_co/__init__.py
313
1180
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) David Arnold (devCO). # Author David Arnold (devCO), [email protected] # Co-Authors Juan Pablo Aries (devCO), [email protected] # Hector Ivan Valencia Muñoz (TIX SAS) # Nhomar Hernandez (Vauxoo) # Humberto Ochoa (Vauxoo) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import wizard
agpl-3.0
moijes12/oh-mainline
vendor/packages/beautifulsoup4/bs4/dammit.py
408
29302
# -*- coding: utf-8 -*- """Beautiful Soup bonus library: Unicode, Dammit This library converts a bytestream to Unicode through any means necessary. It is heavily based on code from Mark Pilgrim's Universal Feed Parser. It works best on XML and XML, but it does not rewrite the XML or HTML to reflect a new encoding; that's the tree builder's job. """ import codecs from htmlentitydefs import codepoint2name import re import logging import string # Import a library to autodetect character encodings. chardet_type = None try: # First try the fast C implementation. # PyPI package: cchardet import cchardet def chardet_dammit(s): return cchardet.detect(s)['encoding'] except ImportError: try: # Fall back to the pure Python implementation # Debian package: python-chardet # PyPI package: chardet import chardet def chardet_dammit(s): return chardet.detect(s)['encoding'] #import chardet.constants #chardet.constants._debug = 1 except ImportError: # No chardet available. def chardet_dammit(s): return None # Available from http://cjkpython.i18n.org/. try: import iconv_codec except ImportError: pass xml_encoding_re = re.compile( '^<\?.*encoding=[\'"](.*?)[\'"].*\?>'.encode(), re.I) html_meta_re = re.compile( '<\s*meta[^>]+charset\s*=\s*["\']?([^>]*?)[ /;\'">]'.encode(), re.I) class EntitySubstitution(object): """Substitute XML or HTML entities for the corresponding characters.""" def _populate_class_variables(): lookup = {} reverse_lookup = {} characters_for_re = [] for codepoint, name in list(codepoint2name.items()): character = unichr(codepoint) if codepoint != 34: # There's no point in turning the quotation mark into # &quot;, unless it happens within an attribute value, which # is handled elsewhere. characters_for_re.append(character) lookup[character] = name # But we do want to turn &quot; into the quotation mark. reverse_lookup[name] = character re_definition = "[%s]" % "".join(characters_for_re) return lookup, reverse_lookup, re.compile(re_definition) (CHARACTER_TO_HTML_ENTITY, HTML_ENTITY_TO_CHARACTER, CHARACTER_TO_HTML_ENTITY_RE) = _populate_class_variables() CHARACTER_TO_XML_ENTITY = { "'": "apos", '"': "quot", "&": "amp", "<": "lt", ">": "gt", } BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|" "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)" ")") AMPERSAND_OR_BRACKET = re.compile("([<>&])") @classmethod def _substitute_html_entity(cls, matchobj): entity = cls.CHARACTER_TO_HTML_ENTITY.get(matchobj.group(0)) return "&%s;" % entity @classmethod def _substitute_xml_entity(cls, matchobj): """Used with a regular expression to substitute the appropriate XML entity for an XML special character.""" entity = cls.CHARACTER_TO_XML_ENTITY[matchobj.group(0)] return "&%s;" % entity @classmethod def quoted_attribute_value(self, value): """Make a value into a quoted XML attribute, possibly escaping it. Most strings will be quoted using double quotes. Bob's Bar -> "Bob's Bar" If a string contains double quotes, it will be quoted using single quotes. Welcome to "my bar" -> 'Welcome to "my bar"' If a string contains both single and double quotes, the double quotes will be escaped, and the string will be quoted using double quotes. Welcome to "Bob's Bar" -> "Welcome to &quot;Bob's bar&quot; """ quote_with = '"' if '"' in value: if "'" in value: # The string contains both single and double # quotes. Turn the double quotes into # entities. We quote the double quotes rather than # the single quotes because the entity name is # "&quot;" whether this is HTML or XML. If we # quoted the single quotes, we'd have to decide # between &apos; and &squot;. replace_with = "&quot;" value = value.replace('"', replace_with) else: # There are double quotes but no single quotes. # We can use single quotes to quote the attribute. quote_with = "'" return quote_with + value + quote_with @classmethod def substitute_xml(cls, value, make_quoted_attribute=False): """Substitute XML entities for special XML characters. :param value: A string to be substituted. The less-than sign will become &lt;, the greater-than sign will become &gt;, and any ampersands will become &amp;. If you want ampersands that appear to be part of an entity definition to be left alone, use substitute_xml_containing_entities() instead. :param make_quoted_attribute: If True, then the string will be quoted, as befits an attribute value. """ # Escape angle brackets and ampersands. value = cls.AMPERSAND_OR_BRACKET.sub( cls._substitute_xml_entity, value) if make_quoted_attribute: value = cls.quoted_attribute_value(value) return value @classmethod def substitute_xml_containing_entities( cls, value, make_quoted_attribute=False): """Substitute XML entities for special XML characters. :param value: A string to be substituted. The less-than sign will become &lt;, the greater-than sign will become &gt;, and any ampersands that are not part of an entity defition will become &amp;. :param make_quoted_attribute: If True, then the string will be quoted, as befits an attribute value. """ # Escape angle brackets, and ampersands that aren't part of # entities. value = cls.BARE_AMPERSAND_OR_BRACKET.sub( cls._substitute_xml_entity, value) if make_quoted_attribute: value = cls.quoted_attribute_value(value) return value @classmethod def substitute_html(cls, s): """Replace certain Unicode characters with named HTML entities. This differs from data.encode(encoding, 'xmlcharrefreplace') in that the goal is to make the result more readable (to those with ASCII displays) rather than to recover from errors. There's absolutely nothing wrong with a UTF-8 string containg a LATIN SMALL LETTER E WITH ACUTE, but replacing that character with "&eacute;" will make it more readable to some people. """ return cls.CHARACTER_TO_HTML_ENTITY_RE.sub( cls._substitute_html_entity, s) class EncodingDetector: """Suggests a number of possible encodings for a bytestring. Order of precedence: 1. Encodings you specifically tell EncodingDetector to try first (the override_encodings argument to the constructor). 2. An encoding declared within the bytestring itself, either in an XML declaration (if the bytestring is to be interpreted as an XML document), or in a <meta> tag (if the bytestring is to be interpreted as an HTML document.) 3. An encoding detected through textual analysis by chardet, cchardet, or a similar external library. 4. UTF-8. 5. Windows-1252. """ def __init__(self, markup, override_encodings=None, is_html=False): self.override_encodings = override_encodings or [] self.chardet_encoding = None self.is_html = is_html self.declared_encoding = None # First order of business: strip a byte-order mark. self.markup, self.sniffed_encoding = self.strip_byte_order_mark(markup) def _usable(self, encoding, tried): if encoding is not None: encoding = encoding.lower() if encoding not in tried: tried.add(encoding) return True return False @property def encodings(self): """Yield a number of encodings that might work for this markup.""" tried = set() for e in self.override_encodings: if self._usable(e, tried): yield e # Did the document originally start with a byte-order mark # that indicated its encoding? if self._usable(self.sniffed_encoding, tried): yield self.sniffed_encoding # Look within the document for an XML or HTML encoding # declaration. if self.declared_encoding is None: self.declared_encoding = self.find_declared_encoding( self.markup, self.is_html) if self._usable(self.declared_encoding, tried): yield self.declared_encoding # Use third-party character set detection to guess at the # encoding. if self.chardet_encoding is None: self.chardet_encoding = chardet_dammit(self.markup) if self._usable(self.chardet_encoding, tried): yield self.chardet_encoding # As a last-ditch effort, try utf-8 and windows-1252. for e in ('utf-8', 'windows-1252'): if self._usable(e, tried): yield e @classmethod def strip_byte_order_mark(cls, data): """If a byte-order mark is present, strip it and return the encoding it implies.""" encoding = None if (len(data) >= 4) and (data[:2] == b'\xfe\xff') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == b'\xff\xfe') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16le' data = data[2:] elif data[:3] == b'\xef\xbb\xbf': encoding = 'utf-8' data = data[3:] elif data[:4] == b'\x00\x00\xfe\xff': encoding = 'utf-32be' data = data[4:] elif data[:4] == b'\xff\xfe\x00\x00': encoding = 'utf-32le' data = data[4:] return data, encoding @classmethod def find_declared_encoding(cls, markup, is_html=False, search_entire_document=False): """Given a document, tries to find its declared encoding. An XML encoding is declared at the beginning of the document. An HTML encoding is declared in a <meta> tag, hopefully near the beginning of the document. """ if search_entire_document: xml_endpos = html_endpos = len(markup) else: xml_endpos = 1024 html_endpos = max(2048, int(len(markup) * 0.05)) declared_encoding = None declared_encoding_match = xml_encoding_re.search(markup, endpos=xml_endpos) if not declared_encoding_match and is_html: declared_encoding_match = html_meta_re.search(markup, endpos=html_endpos) if declared_encoding_match is not None: declared_encoding = declared_encoding_match.groups()[0].decode( 'ascii') if declared_encoding: return declared_encoding.lower() return None class UnicodeDammit: """A class for detecting the encoding of a *ML document and converting it to a Unicode string. If the source encoding is windows-1252, can replace MS smart quotes with their HTML or XML equivalents.""" # This dictionary maps commonly seen values for "charset" in HTML # meta tags to the corresponding Python codec names. It only covers # values that aren't in Python's aliases and can't be determined # by the heuristics in find_codec. CHARSET_ALIASES = {"macintosh": "mac-roman", "x-sjis": "shift-jis"} ENCODINGS_WITH_SMART_QUOTES = [ "windows-1252", "iso-8859-1", "iso-8859-2", ] def __init__(self, markup, override_encodings=[], smart_quotes_to=None, is_html=False): self.smart_quotes_to = smart_quotes_to self.tried_encodings = [] self.contains_replacement_characters = False self.is_html = is_html self.detector = EncodingDetector(markup, override_encodings, is_html) # Short-circuit if the data is in Unicode to begin with. if isinstance(markup, unicode) or markup == '': self.markup = markup self.unicode_markup = unicode(markup) self.original_encoding = None return # The encoding detector may have stripped a byte-order mark. # Use the stripped markup from this point on. self.markup = self.detector.markup u = None for encoding in self.detector.encodings: markup = self.detector.markup u = self._convert_from(encoding) if u is not None: break if not u: # None of the encodings worked. As an absolute last resort, # try them again with character replacement. for encoding in self.detector.encodings: if encoding != "ascii": u = self._convert_from(encoding, "replace") if u is not None: logging.warning( "Some characters could not be decoded, and were " "replaced with REPLACEMENT CHARACTER.") self.contains_replacement_characters = True break # If none of that worked, we could at this point force it to # ASCII, but that would destroy so much data that I think # giving up is better. self.unicode_markup = u if not u: self.original_encoding = None def _sub_ms_char(self, match): """Changes a MS smart quote character to an XML or HTML entity, or an ASCII character.""" orig = match.group(1) if self.smart_quotes_to == 'ascii': sub = self.MS_CHARS_TO_ASCII.get(orig).encode() else: sub = self.MS_CHARS.get(orig) if type(sub) == tuple: if self.smart_quotes_to == 'xml': sub = '&#x'.encode() + sub[1].encode() + ';'.encode() else: sub = '&'.encode() + sub[0].encode() + ';'.encode() else: sub = sub.encode() return sub def _convert_from(self, proposed, errors="strict"): proposed = self.find_codec(proposed) if not proposed or (proposed, errors) in self.tried_encodings: return None self.tried_encodings.append((proposed, errors)) markup = self.markup # Convert smart quotes to HTML if coming from an encoding # that might have them. if (self.smart_quotes_to is not None and proposed in self.ENCODINGS_WITH_SMART_QUOTES): smart_quotes_re = b"([\x80-\x9f])" smart_quotes_compiled = re.compile(smart_quotes_re) markup = smart_quotes_compiled.sub(self._sub_ms_char, markup) try: #print "Trying to convert document to %s (errors=%s)" % ( # proposed, errors) u = self._to_unicode(markup, proposed, errors) self.markup = u self.original_encoding = proposed except Exception as e: #print "That didn't work!" #print e return None #print "Correct encoding: %s" % proposed return self.markup def _to_unicode(self, data, encoding, errors="strict"): '''Given a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliases''' return unicode(data, encoding, errors) @property def declared_html_encoding(self): if not self.is_html: return None return self.detector.declared_encoding def find_codec(self, charset): value = (self._codec(self.CHARSET_ALIASES.get(charset, charset)) or (charset and self._codec(charset.replace("-", ""))) or (charset and self._codec(charset.replace("-", "_"))) or (charset and charset.lower()) or charset ) if value: return value.lower() return None def _codec(self, charset): if not charset: return charset codec = None try: codecs.lookup(charset) codec = charset except (LookupError, ValueError): pass return codec # A partial mapping of ISO-Latin-1 to HTML entities/XML numeric entities. MS_CHARS = {b'\x80': ('euro', '20AC'), b'\x81': ' ', b'\x82': ('sbquo', '201A'), b'\x83': ('fnof', '192'), b'\x84': ('bdquo', '201E'), b'\x85': ('hellip', '2026'), b'\x86': ('dagger', '2020'), b'\x87': ('Dagger', '2021'), b'\x88': ('circ', '2C6'), b'\x89': ('permil', '2030'), b'\x8A': ('Scaron', '160'), b'\x8B': ('lsaquo', '2039'), b'\x8C': ('OElig', '152'), b'\x8D': '?', b'\x8E': ('#x17D', '17D'), b'\x8F': '?', b'\x90': '?', b'\x91': ('lsquo', '2018'), b'\x92': ('rsquo', '2019'), b'\x93': ('ldquo', '201C'), b'\x94': ('rdquo', '201D'), b'\x95': ('bull', '2022'), b'\x96': ('ndash', '2013'), b'\x97': ('mdash', '2014'), b'\x98': ('tilde', '2DC'), b'\x99': ('trade', '2122'), b'\x9a': ('scaron', '161'), b'\x9b': ('rsaquo', '203A'), b'\x9c': ('oelig', '153'), b'\x9d': '?', b'\x9e': ('#x17E', '17E'), b'\x9f': ('Yuml', ''),} # A parochial partial mapping of ISO-Latin-1 to ASCII. Contains # horrors like stripping diacritical marks to turn á into a, but also # contains non-horrors like turning “ into ". MS_CHARS_TO_ASCII = { b'\x80' : 'EUR', b'\x81' : ' ', b'\x82' : ',', b'\x83' : 'f', b'\x84' : ',,', b'\x85' : '...', b'\x86' : '+', b'\x87' : '++', b'\x88' : '^', b'\x89' : '%', b'\x8a' : 'S', b'\x8b' : '<', b'\x8c' : 'OE', b'\x8d' : '?', b'\x8e' : 'Z', b'\x8f' : '?', b'\x90' : '?', b'\x91' : "'", b'\x92' : "'", b'\x93' : '"', b'\x94' : '"', b'\x95' : '*', b'\x96' : '-', b'\x97' : '--', b'\x98' : '~', b'\x99' : '(TM)', b'\x9a' : 's', b'\x9b' : '>', b'\x9c' : 'oe', b'\x9d' : '?', b'\x9e' : 'z', b'\x9f' : 'Y', b'\xa0' : ' ', b'\xa1' : '!', b'\xa2' : 'c', b'\xa3' : 'GBP', b'\xa4' : '$', #This approximation is especially parochial--this is the #generic currency symbol. b'\xa5' : 'YEN', b'\xa6' : '|', b'\xa7' : 'S', b'\xa8' : '..', b'\xa9' : '', b'\xaa' : '(th)', b'\xab' : '<<', b'\xac' : '!', b'\xad' : ' ', b'\xae' : '(R)', b'\xaf' : '-', b'\xb0' : 'o', b'\xb1' : '+-', b'\xb2' : '2', b'\xb3' : '3', b'\xb4' : ("'", 'acute'), b'\xb5' : 'u', b'\xb6' : 'P', b'\xb7' : '*', b'\xb8' : ',', b'\xb9' : '1', b'\xba' : '(th)', b'\xbb' : '>>', b'\xbc' : '1/4', b'\xbd' : '1/2', b'\xbe' : '3/4', b'\xbf' : '?', b'\xc0' : 'A', b'\xc1' : 'A', b'\xc2' : 'A', b'\xc3' : 'A', b'\xc4' : 'A', b'\xc5' : 'A', b'\xc6' : 'AE', b'\xc7' : 'C', b'\xc8' : 'E', b'\xc9' : 'E', b'\xca' : 'E', b'\xcb' : 'E', b'\xcc' : 'I', b'\xcd' : 'I', b'\xce' : 'I', b'\xcf' : 'I', b'\xd0' : 'D', b'\xd1' : 'N', b'\xd2' : 'O', b'\xd3' : 'O', b'\xd4' : 'O', b'\xd5' : 'O', b'\xd6' : 'O', b'\xd7' : '*', b'\xd8' : 'O', b'\xd9' : 'U', b'\xda' : 'U', b'\xdb' : 'U', b'\xdc' : 'U', b'\xdd' : 'Y', b'\xde' : 'b', b'\xdf' : 'B', b'\xe0' : 'a', b'\xe1' : 'a', b'\xe2' : 'a', b'\xe3' : 'a', b'\xe4' : 'a', b'\xe5' : 'a', b'\xe6' : 'ae', b'\xe7' : 'c', b'\xe8' : 'e', b'\xe9' : 'e', b'\xea' : 'e', b'\xeb' : 'e', b'\xec' : 'i', b'\xed' : 'i', b'\xee' : 'i', b'\xef' : 'i', b'\xf0' : 'o', b'\xf1' : 'n', b'\xf2' : 'o', b'\xf3' : 'o', b'\xf4' : 'o', b'\xf5' : 'o', b'\xf6' : 'o', b'\xf7' : '/', b'\xf8' : 'o', b'\xf9' : 'u', b'\xfa' : 'u', b'\xfb' : 'u', b'\xfc' : 'u', b'\xfd' : 'y', b'\xfe' : 'b', b'\xff' : 'y', } # A map used when removing rogue Windows-1252/ISO-8859-1 # characters in otherwise UTF-8 documents. # # Note that \x81, \x8d, \x8f, \x90, and \x9d are undefined in # Windows-1252. WINDOWS_1252_TO_UTF8 = { 0x80 : b'\xe2\x82\xac', # € 0x82 : b'\xe2\x80\x9a', # ‚ 0x83 : b'\xc6\x92', # ƒ 0x84 : b'\xe2\x80\x9e', # „ 0x85 : b'\xe2\x80\xa6', # … 0x86 : b'\xe2\x80\xa0', # † 0x87 : b'\xe2\x80\xa1', # ‡ 0x88 : b'\xcb\x86', # ˆ 0x89 : b'\xe2\x80\xb0', # ‰ 0x8a : b'\xc5\xa0', # Š 0x8b : b'\xe2\x80\xb9', # ‹ 0x8c : b'\xc5\x92', # Œ 0x8e : b'\xc5\xbd', # Ž 0x91 : b'\xe2\x80\x98', # ‘ 0x92 : b'\xe2\x80\x99', # ’ 0x93 : b'\xe2\x80\x9c', # “ 0x94 : b'\xe2\x80\x9d', # ” 0x95 : b'\xe2\x80\xa2', # • 0x96 : b'\xe2\x80\x93', # – 0x97 : b'\xe2\x80\x94', # — 0x98 : b'\xcb\x9c', # ˜ 0x99 : b'\xe2\x84\xa2', # ™ 0x9a : b'\xc5\xa1', # š 0x9b : b'\xe2\x80\xba', # › 0x9c : b'\xc5\x93', # œ 0x9e : b'\xc5\xbe', # ž 0x9f : b'\xc5\xb8', # Ÿ 0xa0 : b'\xc2\xa0', #   0xa1 : b'\xc2\xa1', # ¡ 0xa2 : b'\xc2\xa2', # ¢ 0xa3 : b'\xc2\xa3', # £ 0xa4 : b'\xc2\xa4', # ¤ 0xa5 : b'\xc2\xa5', # ¥ 0xa6 : b'\xc2\xa6', # ¦ 0xa7 : b'\xc2\xa7', # § 0xa8 : b'\xc2\xa8', # ¨ 0xa9 : b'\xc2\xa9', # © 0xaa : b'\xc2\xaa', # ª 0xab : b'\xc2\xab', # « 0xac : b'\xc2\xac', # ¬ 0xad : b'\xc2\xad', # ­ 0xae : b'\xc2\xae', # ® 0xaf : b'\xc2\xaf', # ¯ 0xb0 : b'\xc2\xb0', # ° 0xb1 : b'\xc2\xb1', # ± 0xb2 : b'\xc2\xb2', # ² 0xb3 : b'\xc2\xb3', # ³ 0xb4 : b'\xc2\xb4', # ´ 0xb5 : b'\xc2\xb5', # µ 0xb6 : b'\xc2\xb6', # ¶ 0xb7 : b'\xc2\xb7', # · 0xb8 : b'\xc2\xb8', # ¸ 0xb9 : b'\xc2\xb9', # ¹ 0xba : b'\xc2\xba', # º 0xbb : b'\xc2\xbb', # » 0xbc : b'\xc2\xbc', # ¼ 0xbd : b'\xc2\xbd', # ½ 0xbe : b'\xc2\xbe', # ¾ 0xbf : b'\xc2\xbf', # ¿ 0xc0 : b'\xc3\x80', # À 0xc1 : b'\xc3\x81', # Á 0xc2 : b'\xc3\x82', #  0xc3 : b'\xc3\x83', # à 0xc4 : b'\xc3\x84', # Ä 0xc5 : b'\xc3\x85', # Å 0xc6 : b'\xc3\x86', # Æ 0xc7 : b'\xc3\x87', # Ç 0xc8 : b'\xc3\x88', # È 0xc9 : b'\xc3\x89', # É 0xca : b'\xc3\x8a', # Ê 0xcb : b'\xc3\x8b', # Ë 0xcc : b'\xc3\x8c', # Ì 0xcd : b'\xc3\x8d', # Í 0xce : b'\xc3\x8e', # Î 0xcf : b'\xc3\x8f', # Ï 0xd0 : b'\xc3\x90', # Ð 0xd1 : b'\xc3\x91', # Ñ 0xd2 : b'\xc3\x92', # Ò 0xd3 : b'\xc3\x93', # Ó 0xd4 : b'\xc3\x94', # Ô 0xd5 : b'\xc3\x95', # Õ 0xd6 : b'\xc3\x96', # Ö 0xd7 : b'\xc3\x97', # × 0xd8 : b'\xc3\x98', # Ø 0xd9 : b'\xc3\x99', # Ù 0xda : b'\xc3\x9a', # Ú 0xdb : b'\xc3\x9b', # Û 0xdc : b'\xc3\x9c', # Ü 0xdd : b'\xc3\x9d', # Ý 0xde : b'\xc3\x9e', # Þ 0xdf : b'\xc3\x9f', # ß 0xe0 : b'\xc3\xa0', # à 0xe1 : b'\xa1', # á 0xe2 : b'\xc3\xa2', # â 0xe3 : b'\xc3\xa3', # ã 0xe4 : b'\xc3\xa4', # ä 0xe5 : b'\xc3\xa5', # å 0xe6 : b'\xc3\xa6', # æ 0xe7 : b'\xc3\xa7', # ç 0xe8 : b'\xc3\xa8', # è 0xe9 : b'\xc3\xa9', # é 0xea : b'\xc3\xaa', # ê 0xeb : b'\xc3\xab', # ë 0xec : b'\xc3\xac', # ì 0xed : b'\xc3\xad', # í 0xee : b'\xc3\xae', # î 0xef : b'\xc3\xaf', # ï 0xf0 : b'\xc3\xb0', # ð 0xf1 : b'\xc3\xb1', # ñ 0xf2 : b'\xc3\xb2', # ò 0xf3 : b'\xc3\xb3', # ó 0xf4 : b'\xc3\xb4', # ô 0xf5 : b'\xc3\xb5', # õ 0xf6 : b'\xc3\xb6', # ö 0xf7 : b'\xc3\xb7', # ÷ 0xf8 : b'\xc3\xb8', # ø 0xf9 : b'\xc3\xb9', # ù 0xfa : b'\xc3\xba', # ú 0xfb : b'\xc3\xbb', # û 0xfc : b'\xc3\xbc', # ü 0xfd : b'\xc3\xbd', # ý 0xfe : b'\xc3\xbe', # þ } MULTIBYTE_MARKERS_AND_SIZES = [ (0xc2, 0xdf, 2), # 2-byte characters start with a byte C2-DF (0xe0, 0xef, 3), # 3-byte characters start with E0-EF (0xf0, 0xf4, 4), # 4-byte characters start with F0-F4 ] FIRST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[0][0] LAST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[-1][1] @classmethod def detwingle(cls, in_bytes, main_encoding="utf8", embedded_encoding="windows-1252"): """Fix characters from one encoding embedded in some other encoding. Currently the only situation supported is Windows-1252 (or its subset ISO-8859-1), embedded in UTF-8. The input must be a bytestring. If you've already converted the document to Unicode, you're too late. The output is a bytestring in which `embedded_encoding` characters have been converted to their `main_encoding` equivalents. """ if embedded_encoding.replace('_', '-').lower() not in ( 'windows-1252', 'windows_1252'): raise NotImplementedError( "Windows-1252 and ISO-8859-1 are the only currently supported " "embedded encodings.") if main_encoding.lower() not in ('utf8', 'utf-8'): raise NotImplementedError( "UTF-8 is the only currently supported main encoding.") byte_chunks = [] chunk_start = 0 pos = 0 while pos < len(in_bytes): byte = in_bytes[pos] if not isinstance(byte, int): # Python 2.x byte = ord(byte) if (byte >= cls.FIRST_MULTIBYTE_MARKER and byte <= cls.LAST_MULTIBYTE_MARKER): # This is the start of a UTF-8 multibyte character. Skip # to the end. for start, end, size in cls.MULTIBYTE_MARKERS_AND_SIZES: if byte >= start and byte <= end: pos += size break elif byte >= 0x80 and byte in cls.WINDOWS_1252_TO_UTF8: # We found a Windows-1252 character! # Save the string up to this point as a chunk. byte_chunks.append(in_bytes[chunk_start:pos]) # Now translate the Windows-1252 character into UTF-8 # and add it as another, one-byte chunk. byte_chunks.append(cls.WINDOWS_1252_TO_UTF8[byte]) pos += 1 chunk_start = pos else: # Go on to the next character. pos += 1 if chunk_start == 0: # The string is unchanged. return in_bytes else: # Store the final chunk. byte_chunks.append(in_bytes[chunk_start:]) return b''.join(byte_chunks)
agpl-3.0
n0max/servo
tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/executors/executorservodriver.py
9
8679
import json import os import socket import threading import time import traceback from .base import (Protocol, RefTestExecutor, RefTestImplementation, TestharnessExecutor, strip_server) from ..testrunner import Stop webdriver = None here = os.path.join(os.path.split(__file__)[0]) extra_timeout = 5 def do_delayed_imports(): global webdriver import webdriver class ServoWebDriverProtocol(Protocol): def __init__(self, executor, browser, capabilities, **kwargs): do_delayed_imports() Protocol.__init__(self, executor, browser) self.capabilities = capabilities self.host = browser.webdriver_host self.port = browser.webdriver_port self.session = None def setup(self, runner): """Connect to browser via WebDriver.""" self.runner = runner url = "http://%s:%d" % (self.host, self.port) session_started = False try: self.session = webdriver.Session(self.host, self.port, extension=webdriver.servo.ServoCommandExtensions) self.session.start() except: self.logger.warning( "Connecting with WebDriver failed:\n%s" % traceback.format_exc()) else: self.logger.debug("session started") session_started = True if not session_started: self.logger.warning("Failed to connect via WebDriver") self.executor.runner.send_message("init_failed") else: self.executor.runner.send_message("init_succeeded") def teardown(self): self.logger.debug("Hanging up on WebDriver session") try: self.session.end() except: pass def is_alive(self): try: # Get a simple property over the connection self.session.window_handle # TODO what exception? except Exception: return False return True def after_connect(self): pass def wait(self): while True: try: self.session.execute_async_script("") except webdriver.TimeoutException: pass except (socket.timeout, IOError): break except Exception as e: self.logger.error(traceback.format_exc(e)) break def on_environment_change(self, old_environment, new_environment): #Unset all the old prefs self.session.extension.reset_prefs(*old_environment.get("prefs", {}).keys()) self.session.extension.set_prefs(new_environment.get("prefs", {})) class ServoWebDriverRun(object): def __init__(self, func, session, url, timeout, current_timeout=None): self.func = func self.result = None self.session = session self.url = url self.timeout = timeout self.result_flag = threading.Event() def run(self): executor = threading.Thread(target=self._run) executor.start() flag = self.result_flag.wait(self.timeout + extra_timeout) if self.result is None: assert not flag self.result = False, ("EXTERNAL-TIMEOUT", None) return self.result def _run(self): try: self.result = True, self.func(self.session, self.url, self.timeout) except webdriver.TimeoutException: self.result = False, ("EXTERNAL-TIMEOUT", None) except (socket.timeout, IOError): self.result = False, ("CRASH", None) except Exception as e: message = getattr(e, "message", "") if message: message += "\n" message += traceback.format_exc(e) self.result = False, ("ERROR", e) finally: self.result_flag.set() def timeout_func(timeout): if timeout: t0 = time.time() return lambda: time.time() - t0 > timeout + extra_timeout else: return lambda: False class ServoWebDriverTestharnessExecutor(TestharnessExecutor): def __init__(self, browser, server_config, timeout_multiplier=1, close_after_done=True, capabilities=None, debug_info=None, **kwargs): TestharnessExecutor.__init__(self, browser, server_config, timeout_multiplier=1, debug_info=None) self.protocol = ServoWebDriverProtocol(self, browser, capabilities=capabilities) with open(os.path.join(here, "testharness_servodriver.js")) as f: self.script = f.read() self.timeout = None def on_protocol_change(self, new_protocol): pass def is_alive(self): return self.protocol.is_alive() def do_test(self, test): url = self.test_url(test) timeout = test.timeout * self.timeout_multiplier + extra_timeout if timeout != self.timeout: try: self.protocol.session.timeouts.script = timeout self.timeout = timeout except IOError: self.logger.error("Lost webdriver connection") return Stop success, data = ServoWebDriverRun(self.do_testharness, self.protocol.session, url, timeout).run() if success: return self.convert_result(test, data) return (test.result_cls(*data), []) def do_testharness(self, session, url, timeout): session.url = url result = json.loads( session.execute_async_script( self.script % {"abs_url": url, "url": strip_server(url), "timeout_multiplier": self.timeout_multiplier, "timeout": timeout * 1000})) # Prevent leaking every page in history until Servo develops a more sane # page cache session.back() return result class TimeoutError(Exception): pass class ServoWebDriverRefTestExecutor(RefTestExecutor): def __init__(self, browser, server_config, timeout_multiplier=1, screenshot_cache=None, capabilities=None, debug_info=None, **kwargs): """Selenium WebDriver-based executor for reftests""" RefTestExecutor.__init__(self, browser, server_config, screenshot_cache=screenshot_cache, timeout_multiplier=timeout_multiplier, debug_info=debug_info) self.protocol = ServoWebDriverProtocol(self, browser, capabilities=capabilities) self.implementation = RefTestImplementation(self) self.timeout = None with open(os.path.join(here, "reftest-wait_webdriver.js")) as f: self.wait_script = f.read() def is_alive(self): return self.protocol.is_alive() def do_test(self, test): try: result = self.implementation.run_test(test) return self.convert_result(test, result) except IOError: return test.result_cls("CRASH", None), [] except TimeoutError: return test.result_cls("TIMEOUT", None), [] except Exception as e: message = getattr(e, "message", "") if message: message += "\n" message += traceback.format_exc(e) return test.result_cls("ERROR", message), [] def screenshot(self, test, viewport_size, dpi): # https://github.com/w3c/wptrunner/issues/166 assert viewport_size is None assert dpi is None timeout = (test.timeout * self.timeout_multiplier + extra_timeout if self.debug_info is None else None) if self.timeout != timeout: try: self.protocol.session.timeouts.script = timeout self.timeout = timeout except IOError: self.logger.error("Lost webdriver connection") return Stop return ServoWebDriverRun(self._screenshot, self.protocol.session, self.test_url(test), timeout).run() def _screenshot(self, session, url, timeout): session.url = url session.execute_async_script(self.wait_script) return session.screenshot()
mpl-2.0
shosca/django-rest-witchcraft
rest_witchcraft/filters.py
1
3474
"""Provides generic filtering backends that can be used to filter the results returned by list views.""" from sqlalchemy import func, or_ from sqlalchemy.sql import operators from django.template import loader from django.utils.encoding import force_text from django.utils.translation import gettext_lazy from rest_framework.compat import coreapi, coreschema from rest_framework.filters import BaseFilterBackend from rest_framework.settings import api_settings class SearchFilter(BaseFilterBackend): search_param = api_settings.SEARCH_PARAM template = "rest_framework/filters/search.html" lookup_prefixes = { "": lambda c, x: operators.ilike_op(c, "%{}%".format(x)), # icontains "^": lambda c, x: c.ilike(x.replace("%", "%%") + "%"), # istartswith "=": lambda c, x: func.lower(c) == func.lower(x), # iequals "@": operators.eq, # equals } search_title = gettext_lazy("Search") search_description = gettext_lazy("A search term.") def get_schema_fields(self, view): assert coreapi is not None, "coreapi must be installed to use `get_schema_fields()`" assert coreschema is not None, "coreschema must be installed to use `get_schema_fields()`" return [ coreapi.Field( name=self.search_param, required=False, location="query", schema=coreschema.String( title=force_text(self.search_title), description=force_text(self.search_description) ), ) ] def get_schema_operation_parameters(self, view): return [ { "name": self.search_param, "required": False, "in": "query", "description": force_text(self.search_description), "schema": {"type": "string"}, } ] def get_search_fields(self, view, request): return getattr(view, "search_fields", None) def get_search_terms(self, request): params = request.query_params.get(self.search_param, "") params = params.replace("\x00", "") # strip null characters params = params.replace(",", " ") return params.split() def to_html(self, request, queryset, view): if not getattr(view, "search_fields", None): return "" term = self.get_search_terms(request) term = term[0] if term else "" context = {"param": self.search_param, "term": term} template = loader.get_template(self.template) return template.render(context) def filter_queryset(self, request, queryset, view): search_fields = self.get_search_fields(view, request) search_terms = self.get_search_terms(request) if not search_fields or not search_terms: return queryset model = view.get_model() expressions = [] for field in search_fields: for term in search_terms: expr = self.get_expression(model, field, term) if expr is not None: expressions.append(expr) return queryset.filter(or_(*expressions)) def get_expression(self, model, field, term): op = self.lookup_prefixes[""] if field[0] in self.lookup_prefixes: op = self.lookup_prefixes[field[0]] field = field[1:] expr = op(getattr(model, field), term) return expr
mit
radremedy/radremedy
remedy/rad/searchutils.py
2
4301
""" searchutils.py Contains utility functions for adding normalized searching values to dictionaries of search parameters. """ def add_string(search_params, key, value): """ Adds a string value to the provided search parameters dictionary if it is non-empty. Args: search_params: The parameter dictionary to update. key: The key to use. value: The value to normalize and use in the dictionary as appropriate. """ if value is None or len(value) == 0 or value.isspace(): return # Stick the trimmed version in the search params search_params[key] = value.strip() def add_bool(search_params, key, value): """ Adds a Boolean value to the provided search parameters dictionary if it is non-empty. Args: search_params: The parameter dictionary to update. key: The key to use. value: The value to normalize and use in the dictionary as appropriate. """ if value is None or len(value) == 0 or value.isspace(): return # Do a comparison against some basic truth values and # treat everything else as false at this point search_params[key] = value.strip().lower() in [ 'true', '1', 'selected', 'on'] def add_int(search_params, key, value, min_value=None, max_value=None): """ Adds an integer value to the provided search parameters dictionary if it can be converted. Args: search_params: The parameter dictionary to update. key: The key to use. value: The value to normalize and use in the dictionary as appropriate. min_value: The minimum value to validate against, if any. max_value: The maximum value to validate against, if any. """ if value is None: return try: value_int = int(value) # Validation against ranges, if specified if min_value is not None and value_int < min_value: return if max_value is not None and value_int > max_value: return search_params[key] = value_int except ValueError: return def add_int_set( search_params, key, value_list, min_value=None, max_value=None): """ Adds a set of integer values to the provided search parameters dictionary if any can be converted. Args: search_params: The parameter dictionary to update. key: The key to use. value_list: The list of values to normalize and use in the dictionary as appropriate. min_value: The minimum value to validate against, if any. max_value: The maximum value to validate against, if any. """ if value_list is None: return # Initialize an empty set int_set = set() # Now iterate over the list of values and validate each in turn for int_str in value_list: try: value_int = int(int_str) # Validation against ranges, if specified if min_value is not None and value_int < min_value: continue if max_value is not None and value_int > max_value: continue int_set.add(value_int) except ValueError: pass # If we had any valid values, set the search params key if len(int_set) > 0: search_params[key] = int_set def add_float(search_params, key, value, min_value=None, max_value=None): """ Adds a floating-point value to the provided search parameters dictionary if it can be converted. Args: search_params: The parameter dictionary to update. key: The key to use. value: The value to normalize and use in the dictionary as appropriate. min_value: The minimum value to validate against, if any. max_value: The maximum value to validate against, if any. """ if value is None: return try: value_float = float(value) # Validation against ranges, if specified if min_value is not None and value_float < min_value: return if max_value is not None and value_float > max_value: return search_params[key] = value_float except ValueError: return
mpl-2.0
henriquesouza/toply
src/view/Sync.py
1
5379
import urllib import os import gi import threading #simport src.SettingsManager from SettingsManager import SettingsManager gi.require_version('Gtk', '3.0') from gi.repository import Gtk, Gdk, GObject import html GObject.threads_init() class Sync: _window = None _100th_counting = 0 _phrase_list = None _length = None _song = None _sync = [] _sync_backup = [] _i = 0 _matching_position = [0] _settings_manager = SettingsManager def __init__(self, phrase_list, song): artist = song.artist title = song._title length = song.length/1000000 #converting from micro to seconds #print(length) self._length = length self._song = song # print(SettingsManager..get_settings("storage.custom-lyrics-folder")) phrase_list = phrase_list.split("\n") ''' css_provider = Gtk.CssProvider() css_provider.load_from_path("./style/css/theme.css") context = Gtk.StyleContext() context.add_provider_for_screen(self.get_sync_window().get_screen(), css_provider, Gtk.STYLE_PROVIDER_PRIORITY_USER) ''' self.sync_window(artist+" - "+title) self.screen_changed(self.get_sync_window(), False, False) self.sync_window_add(self.phrase_list(phrase_list)) self.get_sync_window().show_all() # last line Gtk.main() def screen_changed(self, widget, old_screen, userdata=None): global supports_alpha screen = widget.get_screen() visual = screen.get_rgba_visual() if visual is None: print("Your screen does not support alpha channels!") visual = screen.get_system_visual() supports_alpha = False else: #print("Your screen supports alpha channels!") supports_alpha = True widget.set_visual(visual) def sync_window(self, title): self.counting() self._song.media_player.Play() #testing sync makes no real sense self._window = Gtk.Window() self._window.connect("destroy", lambda _: Gtk.main_quit()) self._window.set_name("sync_window") self._window.set_title(title) self._window.set_resizable(True) #self._window.set_decorated(False) self._window.set_default_size(500,500) self._window.set_app_paintable(True) def get_sync_window(self): return self._window def sync_window_add(self, widget): self._window.add(widget) def phrase_list( self, phrases_list ): self._phrase_list = Gtk.ListStore(str) #print("\n\n lalala" + "".join(phrases_list)) self._phrase_list.append(["(...)"]) j = 0 for i in phrases_list: qi = urllib.parse.quote(i) if (i.replace(" ", "")).replace("\n", "") != "" and qi[qi.find("%")+1] == "2": #print("\""+urllib.parse.quote(i)+"\"") self._phrase_list.append([i]) self._sync.append(i+"\n") self._sync_backup.append(i+"\n") else: if j + 1 < len(phrases_list) and phrases_list[j+1].replace(" ", "") == "" and phrases_list[j+2].replace(" ", "") == "": break j = j + 1 self._phrase_list.append(["(end)"]) tree_view = Gtk.TreeView(model=self._phrase_list) renderer_text = Gtk.CellRendererText() column_text = Gtk.TreeViewColumn("", renderer_text, text=0) tree_view.append_column(column_text) tree_view.set_headers_visible(False) tree_view.connect("move-cursor", self.move) self._phrase_list.set_name("phrase_list") scrollable_treelist = Gtk.ScrolledWindow() scrollable_treelist.set_vexpand(True) scrollable_treelist.add(tree_view) return scrollable_treelist def move(self, o, t, th): s = self._100th_counting m = 0 if(th > 0): self._matching_position.append(s) if( th < 0 ): self.backwards() self._sync[self._i] = self._sync_backup[self._i] else: print(self._sync) while (s > 60): s = s - 60 m = m + 1 timing = str(m) + ":" + (str(s)).split(".")[0] + "." + (((str(s)).split("."))[1])[0:2] self._sync[self._i] = "[" + timing + "]" + self._sync[self._i] self._i = self._i + 1 def counting(self): if (self._100th_counting < self._length): #if (self._100th_counting < 3): self._100th_counting = self._100th_counting + 0.01 t = threading.Timer(0.01, self.counting) t.start() else: fo = open(self._settings_manager.get_settings(self, "storage.custom-lyrics-folder")+self._song.artist+" "+self._song.title+".lrc", "w+") fo.write("".join(self._sync)) def get_lrc(self): return self._sync def backwards(self): self._song.media_player.SetPosition(self._song.trackid, self._matching_position[-2]*1000000) if self._matching_position[-1] > 0: del self._matching_position[-1] self._100th_counting = self._matching_position[-1] #print(open("../../tmp/lrc").read()) #sync = Sync(open("../../tmp/lrc").read(), "Sabaton", "The Last Stand", 238)
gpl-3.0
ofcourseican/django-openid-auth
example_consumer/urls.py
13
1793
# django-openid-auth - OpenID integration for django.contrib.auth # # Copyright (C) 2007 Simon Willison # Copyright (C) 2008-2010 Canonical Ltd. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from django.conf.urls.defaults import * from django.contrib import admin import views admin.autodiscover() urlpatterns = patterns('', (r'^$', views.index), (r'^openid/', include('django_openid_auth.urls')), (r'^logout/$', 'django.contrib.auth.views.logout'), (r'^private/$', views.require_authentication), (r'^admin/(.*)', admin.site.root), )
bsd-2-clause
Adnn/django
tests/custom_pk/models.py
282
1272
# -*- coding: utf-8 -*- """ Using a custom primary key By default, Django adds an ``"id"`` field to each model. But you can override this behavior by explicitly adding ``primary_key=True`` to a field. """ from __future__ import unicode_literals from django.db import models from django.utils.encoding import python_2_unicode_compatible from .fields import MyAutoField @python_2_unicode_compatible class Employee(models.Model): employee_code = models.IntegerField(primary_key=True, db_column='code') first_name = models.CharField(max_length=20) last_name = models.CharField(max_length=20) class Meta: ordering = ('last_name', 'first_name') def __str__(self): return "%s %s" % (self.first_name, self.last_name) @python_2_unicode_compatible class Business(models.Model): name = models.CharField(max_length=20, primary_key=True) employees = models.ManyToManyField(Employee) class Meta: verbose_name_plural = 'businesses' def __str__(self): return self.name @python_2_unicode_compatible class Bar(models.Model): id = MyAutoField(primary_key=True, db_index=True) def __str__(self): return repr(self.pk) class Foo(models.Model): bar = models.ForeignKey(Bar, models.CASCADE)
bsd-3-clause
SerCeMan/intellij-community
python/helpers/profiler/thriftpy3/Thrift.py
253
4543
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import sys class TType: STOP = 0 VOID = 1 BOOL = 2 BYTE = 3 I08 = 3 DOUBLE = 4 I16 = 6 I32 = 8 I64 = 10 STRING = 11 UTF7 = 11 STRUCT = 12 MAP = 13 SET = 14 LIST = 15 UTF8 = 16 UTF16 = 17 _VALUES_TO_NAMES = ('STOP', 'VOID', 'BOOL', 'BYTE', 'DOUBLE', None, 'I16', None, 'I32', None, 'I64', 'STRING', 'STRUCT', 'MAP', 'SET', 'LIST', 'UTF8', 'UTF16') class TMessageType: CALL = 1 REPLY = 2 EXCEPTION = 3 ONEWAY = 4 class TProcessor: """Base class for procsessor, which works on two streams.""" def process(iprot, oprot): pass class TException(Exception): """Base class for all thrift exceptions.""" # BaseException.message is deprecated in Python v[2.6,3.0) if (2, 6, 0) <= sys.version_info < (3, 0): def _get_message(self): return self._message def _set_message(self, message): self._message = message message = property(_get_message, _set_message) def __init__(self, message=None): Exception.__init__(self, message) self.message = message class TApplicationException(TException): """Application level thrift exceptions.""" UNKNOWN = 0 UNKNOWN_METHOD = 1 INVALID_MESSAGE_TYPE = 2 WRONG_METHOD_NAME = 3 BAD_SEQUENCE_ID = 4 MISSING_RESULT = 5 INTERNAL_ERROR = 6 PROTOCOL_ERROR = 7 INVALID_TRANSFORM = 8 INVALID_PROTOCOL = 9 UNSUPPORTED_CLIENT_TYPE = 10 def __init__(self, type=UNKNOWN, message=None): TException.__init__(self, message) self.type = type def __str__(self): if self.message: return self.message elif self.type == self.UNKNOWN_METHOD: return 'Unknown method' elif self.type == self.INVALID_MESSAGE_TYPE: return 'Invalid message type' elif self.type == self.WRONG_METHOD_NAME: return 'Wrong method name' elif self.type == self.BAD_SEQUENCE_ID: return 'Bad sequence ID' elif self.type == self.MISSING_RESULT: return 'Missing result' elif self.type == self.INTERNAL_ERROR: return 'Internal error' elif self.type == self.PROTOCOL_ERROR: return 'Protocol error' elif self.type == self.INVALID_TRANSFORM: return 'Invalid transform' elif self.type == self.INVALID_PROTOCOL: return 'Invalid protocol' elif self.type == self.UNSUPPORTED_CLIENT_TYPE: return 'Unsupported client type' else: return 'Default (unknown) TApplicationException' def read(self, iprot): iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.message = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.I32: self.type = iprot.readI32() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): oprot.writeStructBegin('TApplicationException') if self.message is not None: oprot.writeFieldBegin('message', TType.STRING, 1) oprot.writeString(self.message) oprot.writeFieldEnd() if self.type is not None: oprot.writeFieldBegin('type', TType.I32, 2) oprot.writeI32(self.type) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd()
apache-2.0
icomms/wqmanager
apps/django_rest_interface/responder.py
4
12224
""" MORE INFO AT: http://code.google.com/p/django-rest-interface/wiki/RestifyDjango Data format classes ("responders") that can be plugged into model_resource.ModelResource and determine how the objects of a ModelResource instance are rendered (e.g. serialized to XML, rendered by templates, ...). """ from django.core import serializers from django.core.handlers.wsgi import STATUS_CODE_TEXT from django.core.paginator import QuerySetPaginator, InvalidPage # the correct paginator for Model objects is the QuerySetPaginator, # not the Paginator! (see Django doc) from django.core.xheaders import populate_xheaders from django import forms from django.http import Http404, HttpResponse from django.forms.util import ErrorDict from django.shortcuts import render_to_response from django.template import loader, RequestContext from django.utils import simplejson from django.utils.xmlutils import SimplerXMLGenerator from django.views.generic.simple import direct_to_template class SerializeResponder(object): """ Class for all data formats that are possible with Django's serializer framework. """ def __init__(self, format, mimetype=None, paginate_by=None, allow_empty=False): """ format: may be every format that works with Django's serializer framework. By default: xml, python, json, (yaml). mimetype: if the default None is not changed, any HttpResponse calls use settings.DEFAULT_CONTENT_TYPE and settings.DEFAULT_CHARSET paginate_by: Number of elements per page. Default: All elements. """ self.format = format self.mimetype = mimetype self.paginate_by = paginate_by self.allow_empty = allow_empty self.expose_fields = [] def render(self, object_list): """ Serializes a queryset to the format specified in self.format. """ # Hide unexposed fields hidden_fields = [] for obj in list(object_list): for field in obj._meta.fields: if not field.name in self.expose_fields and field.serialize: field.serialize = False hidden_fields.append(field) response = serializers.serialize(self.format, object_list) # Show unexposed fields again for field in hidden_fields: field.serialize = True return response def element(self, request, elem): """ Renders single model objects to HttpResponse. """ return HttpResponse(self.render([elem]), self.mimetype) def error(self, request, status_code, error_dict=None): """ Handles errors in a RESTful way. - appropriate status code - appropriate mimetype - human-readable error message """ if not error_dict: error_dict = ErrorDict() response = HttpResponse(mimetype = self.mimetype) response.write('%d %s' % (status_code, STATUS_CODE_TEXT[status_code])) if error_dict: response.write('\n\nErrors:\n') response.write(error_dict.as_text()) response.status_code = status_code return response def list(self, request, queryset, page=None): """ Renders a list of model objects to HttpResponse. """ if self.paginate_by: paginator = QuerySetPaginator(queryset, self.paginate_by) if not page: page = request.GET.get('page', 1) try: page = int(page) object_list = paginator.page(page).object_list except (InvalidPage, ValueError): if page == 1 and self.allow_empty: object_list = [] else: return self.error(request, 404) else: object_list = list(queryset) return HttpResponse(self.render(object_list), self.mimetype) class JSONResponder(SerializeResponder): """ JSON data format class. """ def __init__(self, paginate_by=None, allow_empty=False): SerializeResponder.__init__(self, 'json', 'application/json', paginate_by=paginate_by, allow_empty=allow_empty) def error(self, request, status_code, error_dict=None): """ Return JSON error response that includes a human readable error message, application-specific errors and a machine readable status code. """ if not error_dict: error_dict = ErrorDict() response = HttpResponse(mimetype = self.mimetype) response.status_code = status_code response_dict = { "error-message" : '%d %s' % (status_code, STATUS_CODE_TEXT[status_code]), "status-code" : status_code, "model-errors" : error_dict.as_ul() } simplejson.dump(response_dict, response) return response class XMLResponder(SerializeResponder): """ XML data format class. """ def __init__(self, paginate_by=None, allow_empty=False): SerializeResponder.__init__(self, 'xml', 'application/xml', paginate_by=paginate_by, allow_empty=allow_empty) def error(self, request, status_code, error_dict=None): """ Return XML error response that includes a human readable error message, application-specific errors and a machine readable status code. """ from django.conf import settings if not error_dict: error_dict = ErrorDict() response = HttpResponse(mimetype = self.mimetype) response.status_code = status_code xml = SimplerXMLGenerator(response, settings.DEFAULT_CHARSET) xml.startDocument() xml.startElement("django-error", {}) xml.addQuickElement(name="error-message", contents='%d %s' % (status_code, STATUS_CODE_TEXT[status_code])) xml.addQuickElement(name="status-code", contents=str(status_code)) if error_dict: xml.startElement("model-errors", {}) for (model_field, errors) in error_dict.items(): for error in errors: xml.addQuickElement(name=model_field, contents=error) xml.endElement("model-errors") xml.endElement("django-error") xml.endDocument() return response class TemplateResponder(object): """ Data format class that uses templates (similar to Django's generic views). """ def __init__(self, template_dir, paginate_by=None, template_loader=loader, extra_context=None, allow_empty=False, context_processors=None, template_object_name='object', mimetype=None): self.template_dir = template_dir self.paginate_by = paginate_by self.template_loader = template_loader if not extra_context: extra_context = {} for key, value in extra_context.items(): if callable(value): extra_context[key] = value() self.extra_context = extra_context self.allow_empty = allow_empty self.context_processors = context_processors self.template_object_name = template_object_name self.mimetype = mimetype self.expose_fields = None # Set by Collection.__init__ def _hide_unexposed_fields(self, obj, allowed_fields): """ Remove fields from a model that should not be public. """ for field in obj._meta.fields: if not field.name in allowed_fields and \ not field.name + '_id' in allowed_fields: obj.__dict__.pop(field.name) def list(self, request, queryset, page=None): """ Renders a list of model objects to HttpResponse. """ template_name = '%s/%s_list.html' % (self.template_dir, queryset.model._meta.module_name) if self.paginate_by: paginator = QuerySetPaginator(queryset, self.paginate_by) if not page: page = request.GET.get('page', 1) try: page = int(page) object_list = paginator.page(page).object_list except (InvalidPage, ValueError): if page == 1 and self.allow_empty: object_list = [] else: raise Http404 current_page = paginator.page(page) c = RequestContext(request, { '%s_list' % self.template_object_name: object_list, 'is_paginated': paginator.num_pages > 1, 'results_per_page': self.paginate_by, 'has_next': current_page.has_next(), 'has_previous': current_page.has_previous(), 'page': page, 'next': page + 1, 'previous': page - 1, 'last_on_page': current_page.end_index(), 'first_on_page': current_page.start_index(), 'pages': paginator.num_pages, 'hits' : paginator.count, }, self.context_processors) else: object_list = queryset c = RequestContext(request, { '%s_list' % self.template_object_name: object_list, 'is_paginated': False }, self.context_processors) if not self.allow_empty and len(queryset) == 0: raise Http404 # Hide unexposed fields for obj in object_list: self._hide_unexposed_fields(obj, self.expose_fields) c.update(self.extra_context) t = self.template_loader.get_template(template_name) return HttpResponse(t.render(c), mimetype=self.mimetype) def element(self, request, elem): """ Renders single model objects to HttpResponse. """ template_name = '%s/%s_detail.html' % (self.template_dir, elem._meta.module_name) t = self.template_loader.get_template(template_name) c = RequestContext(request, { self.template_object_name : elem, }, self.context_processors) # Hide unexposed fields self._hide_unexposed_fields(elem, self.expose_fields) c.update(self.extra_context) response = HttpResponse(t.render(c), mimetype=self.mimetype) populate_xheaders(request, response, elem.__class__, getattr(elem, elem._meta.pk.name)) return response def error(self, request, status_code, error_dict=None): """ Renders error template (template name: error status code). """ if not error_dict: error_dict = ErrorDict() response = direct_to_template(request, template = '%s/%s.html' % (self.template_dir, str(status_code)), extra_context = { 'errors' : error_dict }, mimetype = self.mimetype) response.status_code = status_code return response def create_form(self, request, queryset, form_class): """ Render form for creation of new collection entry. """ ResourceForm = forms.form_for_model(queryset.model, form=form_class) if request.POST: form = ResourceForm(request.POST) else: form = ResourceForm() template_name = '%s/%s_form.html' % (self.template_dir, queryset.model._meta.module_name) return render_to_response(template_name, {'form':form}) def update_form(self, request, pk, queryset, form_class): """ Render edit form for single entry. """ # Remove queryset cache by cloning the queryset queryset = queryset._clone() elem = queryset.get(**{queryset.model._meta.pk.name : pk}) ResourceForm = forms.form_for_instance(elem, form=form_class) if request.PUT: form = ResourceForm(request.PUT) else: form = ResourceForm() template_name = '%s/%s_form.html' % (self.template_dir, elem._meta.module_name) return render_to_response(template_name, {'form':form, 'update':True, self.template_object_name:elem})
bsd-3-clause
mbernasocchi/inasafe
safe_extras/raven/processors.py
11
5388
""" raven.core.processors ~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import import re import warnings from raven.utils.compat import string_types, text_type, PY3 from raven.utils import varmap class Processor(object): def __init__(self, client): self.client = client def get_data(self, data, **kwargs): return def process(self, data, **kwargs): resp = self.get_data(data, **kwargs) if resp: data = resp if 'exception' in data: if 'values' in data['exception']: for value in data['exception'].get('values', []): if 'stacktrace' in value: self.filter_stacktrace(value['stacktrace']) if 'request' in data: self.filter_http(data['request']) if 'extra' in data: data['extra'] = self.filter_extra(data['extra']) return data def filter_stacktrace(self, data): pass def filter_http(self, data): pass def filter_extra(self, data): return data class RemovePostDataProcessor(Processor): """Removes HTTP post data.""" def filter_http(self, data, **kwargs): data.pop('data', None) class RemoveStackLocalsProcessor(Processor): """Removes local context variables from stacktraces.""" def filter_stacktrace(self, data, **kwargs): for frame in data.get('frames', []): frame.pop('vars', None) class SanitizeKeysProcessor(Processor): """ Asterisk out things that correspond to a configurable set of keys. """ MASK = '*' * 8 @property def sanitize_keys(self): keys = getattr(self.client, 'sanitize_keys') if keys is None: raise ValueError('The sanitize_keys setting must be present to use SanitizeKeysProcessor') return keys def sanitize(self, item, value): if value is None: return if not item: # key can be a NoneType return value # Just in case we have bytes here, we want to make them into text # properly without failing so we can perform our check. if isinstance(item, bytes): item = item.decode('utf-8', 'replace') else: item = text_type(item) item = item.lower() for key in self.sanitize_keys: if key in item: # store mask as a fixed length for security return self.MASK return value def filter_stacktrace(self, data): for frame in data.get('frames', []): if 'vars' not in frame: continue frame['vars'] = varmap(self.sanitize, frame['vars']) def filter_http(self, data): for n in ('data', 'cookies', 'headers', 'env', 'query_string'): if n not in data: continue # data could be provided as bytes if PY3 and isinstance(data[n], bytes): data[n] = data[n].decode('utf-8', 'replace') if isinstance(data[n], string_types) and '=' in data[n]: # at this point we've assumed it's a standard HTTP query # or cookie if n == 'cookies': delimiter = ';' else: delimiter = '&' data[n] = self._sanitize_keyvals(data[n], delimiter) else: data[n] = varmap(self.sanitize, data[n]) if n == 'headers' and 'Cookie' in data[n]: data[n]['Cookie'] = self._sanitize_keyvals( data[n]['Cookie'], ';' ) def filter_extra(self, data): return varmap(self.sanitize, data) def _sanitize_keyvals(self, keyvals, delimiter): sanitized_keyvals = [] for keyval in keyvals.split(delimiter): keyval = keyval.split('=') if len(keyval) == 2: sanitized_keyvals.append((keyval[0], self.sanitize(*keyval))) else: sanitized_keyvals.append(keyval) return delimiter.join('='.join(keyval) for keyval in sanitized_keyvals) class SanitizePasswordsProcessor(SanitizeKeysProcessor): """ Asterisk out things that look like passwords, credit card numbers, and API keys in frames, http, and basic extra data. """ KEYS = frozenset([ 'password', 'secret', 'passwd', 'authorization', 'api_key', 'apikey', 'sentry_dsn', 'access_token', ]) VALUES_RE = re.compile(r'^(?:\d[ -]*?){13,16}$') @property def sanitize_keys(self): return self.KEYS @property def FIELDS(self): warnings.warn( "`SanitizePasswordsProcessor.Fields` has been deprecated. Use " "`SanitizePasswordsProcessor.KEYS` or `SanitizePasswordsProcessor.sanitize_keys` " "instead", DeprecationWarning, ) return self.KEYS def sanitize(self, item, value): value = super(SanitizePasswordsProcessor, self).sanitize(item, value) if isinstance(value, string_types) and self.VALUES_RE.match(value): return self.MASK return value
gpl-3.0
kenda/codingcontest2012
tastypie/utils/formatting.py
3
1090
import email import datetime from django.utils import dateformat from tastypie.utils.timezone import make_aware, make_naive, aware_datetime # Try to use dateutil for maximum date-parsing niceness. Fall back to # hard-coded RFC2822 parsing if that's not possible. try: from dateutil.parser import parse as mk_datetime except ImportError: def mk_datetime(string): return make_aware(datetime.datetime.fromtimestamp(time.mktime(email.utils.parsedate(string)))) def format_datetime(dt): """ RFC 2822 datetime formatter """ return dateformat.format(make_naive(dt), 'r') def format_date(d): """ RFC 2822 date formatter """ # workaround because Django's dateformat utility requires a datetime # object (not just date) dt = aware_datetime(d.year, d.month, d.day, 0, 0, 0) return dateformat.format(dt, 'j M Y') def format_time(t): """ RFC 2822 time formatter """ # again, workaround dateformat input requirement dt = aware_datetime(2000, 1, 1, t.hour, t.minute, t.second) return dateformat.format(dt, 'H:i:s O')
gpl-3.0
rado0x54/project-euler
python/problem0067.py
1
1818
#!/usr/bin/env python3 """Project Euler - Problem 67 Module""" import os def problem67(triangle_fileloc): """Problem 67 - Maximum path sum II""" # We model tree node with dict: # node = { 'value':123, 'left': {}, 'right': {}, 'depth':1} root = {} cur_depth = [root] d = 0 d_nodelist = [] # read file with open(triangle_fileloc, 'r') as f: for line in f: d_nodelist.append(cur_depth) counter = 0 next_depth = [] for value in line.split(): cur_depth[counter]['value'] = int(value) cur_depth[counter]['depth'] = d if not next_depth: cur_depth[counter]['left'] = {} next_depth.append(cur_depth[counter]['left']) else: cur_depth[counter]['left'] = next_depth[-1] cur_depth[counter]['right'] = {} next_depth.append(cur_depth[counter]['right']) counter += 1 cur_depth = next_depth d += 1 # Correct Stuff d -= 1 while d >= 0: for x in d_nodelist[d]: cur_max = x['value'] if ('cur_max' in x['left'] and 'cur_max' in x['right']): if (x['left']['cur_max'] > x['right']['cur_max']): cur_max += x['left']['cur_max'] else: cur_max += x['right']['cur_max'] x['cur_max'] = cur_max d -= 1 return root['cur_max'] FILENAME = 'problem0067.txt' __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) def run(): """Default Run Method""" return problem67(os.path.join(__location__, FILENAME)) if __name__ == '__main__': print("Result: ", run())
mit
tedelhourani/ansible
lib/ansible/modules/packaging/os/apt.py
7
39021
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2012, Flowroute LLC # Written by Matthew Williams <[email protected]> # Based on yum module written by Seth Vidal <skvidal at fedoraproject.org> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'core'} DOCUMENTATION = ''' --- module: apt short_description: Manages apt-packages description: - Manages I(apt) packages (such as for Debian/Ubuntu). version_added: "0.0.2" options: name: description: - A package name, like C(foo), or package specifier with version, like C(foo=1.0). Name wildcards (fnmatch) like C(apt*) and version wildcards like C(foo=1.0*) are also supported. Note that the apt-get commandline supports implicit regex matches here but we do not because it can let typos through easier (If you typo C(foo) as C(fo) apt-get would install packages that have "fo" in their name with a warning and a prompt for the user. Since we don't have warnings and prompts before installing we disallow this. Use an explicit fnmatch pattern if you want wildcarding) required: false default: null aliases: [ 'pkg', 'package' ] state: description: - Indicates the desired package state. C(latest) ensures that the latest version is installed. C(build-dep) ensures the package build dependencies are installed. required: false default: present choices: [ "latest", "absent", "present", "build-dep" ] update_cache: description: - Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step. required: false default: no choices: [ "yes", "no" ] cache_valid_time: description: - Update the apt cache if its older than the I(cache_valid_time). This option is set in seconds. As of Ansible 2.4, this implicitly sets I(update_cache) if set. required: false default: 0 purge: description: - Will force purging of configuration files if the module state is set to I(absent). required: false default: no choices: [ "yes", "no" ] default_release: description: - Corresponds to the C(-t) option for I(apt) and sets pin priorities required: false default: null install_recommends: description: - Corresponds to the C(--no-install-recommends) option for I(apt). C(yes) installs recommended packages. C(no) does not install recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed. required: false default: null choices: [ "yes", "no" ] force: description: - If C(yes), force installs/removes. required: false default: "no" choices: [ "yes", "no" ] allow_unauthenticated: description: - Ignore if packages cannot be authenticated. This is useful for bootstrapping environments that manage their own apt-key setup. required: false default: "no" choices: [ "yes", "no" ] version_added: "2.1" upgrade: description: - 'If yes or safe, performs an aptitude safe-upgrade.' - 'If full, performs an aptitude full-upgrade.' - 'If dist, performs an apt-get dist-upgrade.' - 'Note: This does not upgrade a specific package, use state=latest for that.' - 'Note: Since 2.4, apt-get is used as a fall-back if aptitude is not present.' version_added: "1.1" required: false default: "no" choices: [ "no", "yes", "safe", "full", "dist"] dpkg_options: description: - Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"' - Options should be supplied as comma separated list required: false default: 'force-confdef,force-confold' deb: description: - Path to a .deb package on the remote machine. - If :// in the path, ansible will attempt to download deb before installing. (Version added 2.1) required: false version_added: "1.6" autoremove: description: - If C(yes), remove unused dependency packages for all module states except I(build-dep). It can also be used as the only option. - Previous to version 2.4, autoclean was also an alias for autoremove, now it is its own separate command. See documentation for further information. required: false default: no choices: [ "yes", "no" ] version_added: "2.1" autoclean: description: - If C(yes), cleans the local repository of retrieved package files that can no longer be downloaded. required: false default: no choices: [ "yes", "no" ] version_added: "2.4" only_upgrade: description: - Only upgrade a package if it is already installed. required: false default: false version_added: "2.1" force_apt_get: description: - Force usage of apt-get instead of aptitude required: false default: false version_added: "2.4" requirements: - python-apt (python 2) - python3-apt (python 3) - aptitude (before 2.4) author: "Matthew Williams (@mgwilliams)" notes: - Three of the upgrade modes (C(full), C(safe) and its alias C(yes)) required C(aptitude) up to 2.3, since 2.4 C(apt-get) is used as a fall-back. - apt starts newly installed services by default, this is what the underlying tooling does, to avoid this you can set the ``RUNLEVEL`` environment variable to 1. ''' EXAMPLES = ''' - name: Update repositories cache and install "foo" package apt: name: foo update_cache: yes - name: Install apache service but avoid starting it immediately apt: name=apache2 state=present environment: RUNLEVLEL: 1 - name: Remove "foo" package apt: name=foo state=absent - name: Install the package "foo" apt: name: foo state: present - name: Install the version '1.00' of package "foo" apt: name: foo=1.00 state: present - name: Update the repository cache and update package "nginx" to latest version using default release squeeze-backport apt: name: nginx state: latest default_release: squeeze-backports update_cache: yes - name: Install latest version of "openjdk-6-jdk" ignoring "install-recommends" apt: name: openjdk-6-jdk state: latest install_recommends: no - name: Upgrade all packages to the latest version apt: name: "*" state: latest - name: Update all packages to the latest version apt: upgrade: dist - name: Run the equivalent of "apt-get update" as a separate step apt: update_cache: yes - name: Only run "update_cache=yes" if the last one is more than 3600 seconds ago apt: update_cache: yes cache_valid_time: 3600 - name: Pass options to dpkg on run apt: upgrade: dist update_cache: yes dpkg_options: 'force-confold,force-confdef' - name: Install a .deb package apt: deb: /tmp/mypackage.deb - name: Install the build dependencies for package "foo" apt: pkg: foo state: build-dep - name: Install a .deb package from the internet. apt: deb: https://example.com/python-ppq_0.1-1_all.deb - name: Remove useless packages from the cache apt: autoclean: yes - name: Remove dependencies that are no longer required apt: autoremove: yes ''' RETURN = ''' cache_updated: description: if the cache was updated or not returned: success, in some cases type: boolean sample: True cache_update_time: description: time of the last cache update (0 if unknown) returned: success, in some cases type: int sample: 1425828348000 stdout: description: output from apt returned: success, when needed type: string sample: "Reading package lists...\nBuilding dependency tree...\nReading state information...\nThe following extra packages will be installed:\n apache2-bin ..." stderr: description: error output from apt returned: success, when needed type: string sample: "AH00558: apache2: Could not reliably determine the server's fully qualified domain name, using 127.0.1.1. Set the 'ServerName' directive globally to ..." ''' # NOQA # added to stave off future warnings about apt api import warnings warnings.filterwarnings('ignore', "apt API not stable yet", FutureWarning) import datetime import fnmatch import itertools import os import re import sys import time from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.pycompat24 import get_exception from ansible.module_utils._text import to_bytes, to_native from ansible.module_utils.urls import fetch_url # APT related constants APT_ENV_VARS = dict( DEBIAN_FRONTEND='noninteractive', DEBIAN_PRIORITY='critical', # We screenscrape apt-get and aptitude output for information so we need # to make sure we use the C locale when running commands LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C', ) DPKG_OPTIONS = 'force-confdef,force-confold' APT_GET_ZERO = "\n0 upgraded, 0 newly installed" APTITUDE_ZERO = "\n0 packages upgraded, 0 newly installed" APT_LISTS_PATH = "/var/lib/apt/lists" APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp" HAS_PYTHON_APT = True try: import apt import apt.debfile import apt_pkg except ImportError: HAS_PYTHON_APT = False if sys.version_info[0] < 3: PYTHON_APT = 'python-apt' else: PYTHON_APT = 'python3-apt' def package_split(pkgspec): parts = pkgspec.split('=', 1) version = None if len(parts) > 1: version = parts[1] return parts[0], version def package_versions(pkgname, pkg, pkg_cache): try: versions = set(p.version for p in pkg.versions) except AttributeError: # assume older version of python-apt is installed # apt.package.Package#versions require python-apt >= 0.7.9. pkg_cache_list = (p for p in pkg_cache.Packages if p.Name == pkgname) pkg_versions = (p.VersionList for p in pkg_cache_list) versions = set(p.VerStr for p in itertools.chain(*pkg_versions)) return versions def package_version_compare(version, other_version): try: return apt_pkg.version_compare(version, other_version) except AttributeError: return apt_pkg.VersionCompare(version, other_version) def package_status(m, pkgname, version, cache, state): try: # get the package from the cache, as well as the # low-level apt_pkg.Package object which contains # state fields not directly accessible from the # higher-level apt.package.Package object. pkg = cache[pkgname] ll_pkg = cache._cache[pkgname] # the low-level package object except KeyError: if state == 'install': try: provided_packages = cache.get_providing_packages(pkgname) if provided_packages: is_installed = False upgradable = False # when virtual package providing only one package, look up status of target package if cache.is_virtual_package(pkgname) and len(provided_packages) == 1: package = provided_packages[0] installed, upgradable, has_files = package_status(m, package.name, version, cache, state='install') if installed: is_installed = True return is_installed, upgradable, False m.fail_json(msg="No package matching '%s' is available" % pkgname) except AttributeError: # python-apt version too old to detect virtual packages # mark as upgradable and let apt-get install deal with it return False, True, False else: return False, False, False try: has_files = len(pkg.installed_files) > 0 except UnicodeDecodeError: has_files = True except AttributeError: has_files = False # older python-apt cannot be used to determine non-purged try: package_is_installed = ll_pkg.current_state == apt_pkg.CURSTATE_INSTALLED except AttributeError: # python-apt 0.7.X has very weak low-level object try: # might not be necessary as python-apt post-0.7.X should have current_state property package_is_installed = pkg.is_installed except AttributeError: # assume older version of python-apt is installed package_is_installed = pkg.isInstalled if version: versions = package_versions(pkgname, pkg, cache._cache) avail_upgrades = fnmatch.filter(versions, version) if package_is_installed: try: installed_version = pkg.installed.version except AttributeError: installed_version = pkg.installedVersion # Only claim the package is installed if the version is matched as well package_is_installed = fnmatch.fnmatch(installed_version, version) # Only claim the package is upgradable if a candidate matches the version package_is_upgradable = False for candidate in avail_upgrades: if package_version_compare(candidate, installed_version) > 0: package_is_upgradable = True break else: package_is_upgradable = bool(avail_upgrades) else: try: package_is_upgradable = pkg.is_upgradable except AttributeError: # assume older version of python-apt is installed package_is_upgradable = pkg.isUpgradable return package_is_installed, package_is_upgradable, has_files def expand_dpkg_options(dpkg_options_compressed): options_list = dpkg_options_compressed.split(',') dpkg_options = "" for dpkg_option in options_list: dpkg_options = '%s -o "Dpkg::Options::=--%s"' \ % (dpkg_options, dpkg_option) return dpkg_options.strip() def expand_pkgspec_from_fnmatches(m, pkgspec, cache): # Note: apt-get does implicit regex matching when an exact package name # match is not found. Something like this: # matches = [pkg.name for pkg in cache if re.match(pkgspec, pkg.name)] # (Should also deal with the ':' for multiarch like the fnmatch code below) # # We have decided not to do similar implicit regex matching but might take # a PR to add some sort of explicit regex matching: # https://github.com/ansible/ansible-modules-core/issues/1258 new_pkgspec = [] if pkgspec: for pkgspec_pattern in pkgspec: pkgname_pattern, version = package_split(pkgspec_pattern) # note that none of these chars is allowed in a (debian) pkgname if frozenset('*?[]!').intersection(pkgname_pattern): # handle multiarch pkgnames, the idea is that "apt*" should # only select native packages. But "apt*:i386" should still work if ":" not in pkgname_pattern: # Filter the multiarch packages from the cache only once try: pkg_name_cache = _non_multiarch except NameError: pkg_name_cache = _non_multiarch = [pkg.name for pkg in cache if ':' not in pkg.name] # noqa: F841 else: # Create a cache of pkg_names including multiarch only once try: pkg_name_cache = _all_pkg_names except NameError: pkg_name_cache = _all_pkg_names = [pkg.name for pkg in cache] # noqa: F841 matches = fnmatch.filter(pkg_name_cache, pkgname_pattern) if not matches: m.fail_json(msg="No package(s) matching '%s' available" % str(pkgname_pattern)) else: new_pkgspec.extend(matches) else: # No wildcards in name new_pkgspec.append(pkgspec_pattern) return new_pkgspec def parse_diff(output): diff = to_native(output).splitlines() try: # check for start marker from aptitude diff_start = diff.index('Resolving dependencies...') except ValueError: try: # check for start marker from apt-get diff_start = diff.index('Reading state information...') except ValueError: # show everything diff_start = -1 try: # check for end marker line from both apt-get and aptitude diff_end = next(i for i, item in enumerate(diff) if re.match('[0-9]+ (packages )?upgraded', item)) except StopIteration: diff_end = len(diff) diff_start += 1 diff_end += 1 return {'prepared': '\n'.join(diff[diff_start:diff_end])} def install(m, pkgspec, cache, upgrade=False, default_release=None, install_recommends=None, force=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS), build_dep=False, autoremove=False, only_upgrade=False, allow_unauthenticated=False): pkg_list = [] packages = "" pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache) for package in pkgspec: if build_dep: # Let apt decide what to install pkg_list.append("'%s'" % package) continue name, version = package_split(package) installed, upgradable, has_files = package_status(m, name, version, cache, state='install') if (not installed and not only_upgrade) or (upgrade and upgradable): pkg_list.append("'%s'" % package) if installed and upgradable and version: # This happens when the package is installed, a newer version is # available, and the version is a wildcard that matches both # # We do not apply the upgrade flag because we cannot specify both # a version and state=latest. (This behaviour mirrors how apt # treats a version with wildcard in the package) pkg_list.append("'%s'" % package) packages = ' '.join(pkg_list) if packages: if force: force_yes = '--force-yes' else: force_yes = '' if m.check_mode: check_arg = '--simulate' else: check_arg = '' if autoremove: autoremove = '--auto-remove' else: autoremove = '' if only_upgrade: only_upgrade = '--only-upgrade' else: only_upgrade = '' if build_dep: cmd = "%s -y %s %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, only_upgrade, force_yes, check_arg, packages) else: cmd = "%s -y %s %s %s %s %s install %s" % (APT_GET_CMD, dpkg_options, only_upgrade, force_yes, autoremove, check_arg, packages) if default_release: cmd += " -t '%s'" % (default_release,) if install_recommends is False: cmd += " -o APT::Install-Recommends=no" elif install_recommends is True: cmd += " -o APT::Install-Recommends=yes" # install_recommends is None uses the OS default if allow_unauthenticated: cmd += " --allow-unauthenticated" rc, out, err = m.run_command(cmd) if m._diff: diff = parse_diff(out) else: diff = {} status = True data = dict(changed=True, stdout=out, stderr=err, diff=diff) if rc: status = False data = dict(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc) return (status, data) else: return (True, dict(changed=False)) def get_field_of_deb(m, deb_file, field="Version"): cmd_dpkg = m.get_bin_path("dpkg", True) cmd = cmd_dpkg + " --field %s %s" % (deb_file, field) rc, stdout, stderr = m.run_command(cmd) if rc != 0: m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr) return to_native(stdout).strip('\n') def install_deb(m, debs, cache, force, install_recommends, allow_unauthenticated, dpkg_options): changed = False deps_to_install = [] pkgs_to_install = [] for deb_file in debs.split(','): try: pkg = apt.debfile.DebPackage(deb_file) pkg_name = get_field_of_deb(m, deb_file, "Package") pkg_version = get_field_of_deb(m, deb_file, "Version") if len(apt_pkg.get_architectures()) > 1: pkg_arch = get_field_of_deb(m, deb_file, "Architecture") pkg_key = "%s:%s" % (pkg_name, pkg_arch) else: pkg_key = pkg_name try: installed_pkg = apt.Cache()[pkg_key] installed_version = installed_pkg.installed.version if package_version_compare(pkg_version, installed_version) == 0: # Does not need to down-/upgrade, move on to next package continue except Exception: # Must not be installed, continue with installation pass # Check if package is installable if not pkg.check() and not force: m.fail_json(msg=pkg._failure_string) # add any missing deps to the list of deps we need # to install so they're all done in one shot deps_to_install.extend(pkg.missing_deps) except Exception: e = get_exception() m.fail_json(msg="Unable to install package: %s" % str(e)) # and add this deb to the list of packages to install pkgs_to_install.append(deb_file) # install the deps through apt retvals = {} if deps_to_install: (success, retvals) = install(m=m, pkgspec=deps_to_install, cache=cache, install_recommends=install_recommends, dpkg_options=expand_dpkg_options(dpkg_options)) if not success: m.fail_json(**retvals) changed = retvals.get('changed', False) if pkgs_to_install: options = ' '.join(["--%s"% x for x in dpkg_options.split(",")]) if m.check_mode: options += " --simulate" if force: options += " --force-all" cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install)) rc, out, err = m.run_command(cmd) if "stdout" in retvals: stdout = retvals["stdout"] + out else: stdout = out if "diff" in retvals: diff = retvals["diff"] if 'prepared' in diff: diff['prepared'] += '\n\n' + out else: diff = parse_diff(out) if "stderr" in retvals: stderr = retvals["stderr"] + err else: stderr = err if rc == 0: m.exit_json(changed=True, stdout=stdout, stderr=stderr, diff=diff) else: m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr) else: m.exit_json(changed=changed, stdout=retvals.get('stdout', ''), stderr=retvals.get('stderr', ''), diff=retvals.get('diff', '')) def remove(m, pkgspec, cache, purge=False, force=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False): pkg_list = [] pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache) for package in pkgspec: name, version = package_split(package) installed, upgradable, has_files = package_status(m, name, version, cache, state='remove') if installed or (has_files and purge): pkg_list.append("'%s'" % package) packages = ' '.join(pkg_list) if not packages: m.exit_json(changed=False) else: if force: force_yes = '--force-yes' else: force_yes = '' if purge: purge = '--purge' else: purge = '' if autoremove: autoremove = '--auto-remove' else: autoremove = '' if m.check_mode: check_arg = '--simulate' else: check_arg = '' cmd = "%s -q -y %s %s %s %s %s remove %s" % (APT_GET_CMD, dpkg_options, purge, force_yes, autoremove, check_arg, packages) rc, out, err = m.run_command(cmd) if m._diff: diff = parse_diff(out) else: diff = {} if rc: m.fail_json(msg="'apt-get remove %s' failed: %s" % (packages, err), stdout=out, stderr=err, rc=rc) m.exit_json(changed=True, stdout=out, stderr=err, diff=diff) def cleanup(m, purge=False, force=False, operation=None, dpkg_options=expand_dpkg_options(DPKG_OPTIONS)): if force: force_yes = '--force-yes' else: force_yes = '' if purge: purge = '--purge' else: purge = '' if m.check_mode: check_arg = '--simulate' else: check_arg = '' cmd = "%s -y %s %s %s %s %s" % (APT_GET_CMD, dpkg_options, purge, force_yes, operation, check_arg) rc, out, err = m.run_command(cmd) if m._diff: diff = parse_diff(out) else: diff = {} if rc: m.fail_json(msg="'apt-get %s' failed: %s" % (operation, err), stdout=out, stderr=err, rc=rc) m.exit_json(changed=bool(len(diff)), stdout=out, stderr=err, diff=diff) def upgrade(m, mode="yes", force=False, default_release=None, use_apt_get=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=None): if autoremove: autoremove = '--auto-remove' else: autoremove = '' if m.check_mode: check_arg = '--simulate' else: check_arg = '' apt_cmd = None prompt_regex = None if mode == "dist" or (mode == "full" and use_apt_get): # apt-get dist-upgrade apt_cmd = APT_GET_CMD upgrade_command = "dist-upgrade" elif mode == "full" and not use_apt_get: # aptitude full-upgrade apt_cmd = APTITUDE_CMD upgrade_command = "full-upgrade" else: if use_apt_get: apt_cmd = APT_GET_CMD upgrade_command = "upgrade --with-new-pkgs %s" % (autoremove) else: # aptitude safe-upgrade # mode=yes # default apt_cmd = APTITUDE_CMD upgrade_command = "safe-upgrade" prompt_regex = r"(^Do you want to ignore this warning and proceed anyway\?|^\*\*\*.*\[default=.*\])" if force: if apt_cmd == APT_GET_CMD: force_yes = '--force-yes' else: force_yes = '--assume-yes --allow-untrusted' else: force_yes = '' apt_cmd_path = m.get_bin_path(apt_cmd, required=True) cmd = '%s -y %s %s %s %s' % (apt_cmd_path, dpkg_options, force_yes, check_arg, upgrade_command) if default_release: cmd += " -t '%s'" % (default_release,) rc, out, err = m.run_command(cmd, prompt_regex=prompt_regex) if m._diff: diff = parse_diff(out) else: diff = {} if rc: m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out, rc=rc) if (apt_cmd == APT_GET_CMD and APT_GET_ZERO in out) or (apt_cmd == APTITUDE_CMD and APTITUDE_ZERO in out): m.exit_json(changed=False, msg=out, stdout=out, stderr=err) m.exit_json(changed=True, msg=out, stdout=out, stderr=err, diff=diff) def download(module, deb): tempdir = os.path.dirname(__file__) package = os.path.join(tempdir, str(deb.rsplit('/', 1)[1])) # When downloading a deb, how much of the deb to download before # saving to a tempfile (64k) BUFSIZE = 65536 try: rsp, info = fetch_url(module, deb, method='GET') if info['status'] != 200: module.fail_json(msg="Failed to download %s, %s" % (deb, info['msg'])) # Ensure file is open in binary mode for Python 3 f = open(package, 'wb') # Read 1kb at a time to save on ram while True: data = rsp.read(BUFSIZE) data = to_bytes(data, errors='surrogate_or_strict') if len(data) < 1: break # End of file, break while loop f.write(data) f.close() deb = package except Exception: e = get_exception() module.fail_json(msg="Failure downloading %s, %s" % (deb, e)) return deb def get_cache_mtime(): """Return mtime of a valid apt cache file. Stat the apt cache file and if no cache file is found return 0 :returns: ``int`` """ cache_time = 0 if os.path.exists(APT_UPDATE_SUCCESS_STAMP_PATH): cache_time = os.stat(APT_UPDATE_SUCCESS_STAMP_PATH).st_mtime elif os.path.exists(APT_LISTS_PATH): cache_time = os.stat(APT_LISTS_PATH).st_mtime return cache_time def get_updated_cache_time(): """Return the mtime time stamp and the updated cache time. Always retrieve the mtime of the apt cache or set the `cache_mtime` variable to 0 :returns: ``tuple`` """ cache_mtime = get_cache_mtime() mtimestamp = datetime.datetime.fromtimestamp(cache_mtime) updated_cache_time = int(time.mktime(mtimestamp.timetuple())) return mtimestamp, updated_cache_time # https://github.com/ansible/ansible-modules-core/issues/2951 def get_cache(module): '''Attempt to get the cache object and update till it works''' cache = None try: cache = apt.Cache() except SystemError: e = get_exception() if '/var/lib/apt/lists/' in str(e).lower(): # update cache until files are fixed or retries exceeded retries = 0 while retries < 2: (rc, so, se) = module.run_command(['apt-get', 'update', '-q']) retries += 1 if rc == 0: break if rc != 0: module.fail_json(msg='Updating the cache to correct corrupt package lists failed:\n%s\n%s' % (str(e), str(so) + str(se)), rc=rc) # try again cache = apt.Cache() else: module.fail_json(msg=str(e)) return cache def main(): module = AnsibleModule( argument_spec=dict( state=dict(default='present', choices=['installed', 'latest', 'removed', 'absent', 'present', 'build-dep']), update_cache=dict(aliases=['update-cache'], type='bool'), cache_valid_time=dict(type='int', default=0), purge=dict(default=False, type='bool'), package=dict(default=None, aliases=['pkg', 'name'], type='list'), deb=dict(default=None, type='path'), default_release=dict(default=None, aliases=['default-release']), install_recommends=dict(default=None, aliases=['install-recommends'], type='bool'), force=dict(default='no', type='bool'), upgrade=dict(choices=['no', 'yes', 'safe', 'full', 'dist']), dpkg_options=dict(default=DPKG_OPTIONS), autoremove=dict(type='bool', default='no'), autoclean=dict(type='bool', default='no'), only_upgrade=dict(type='bool', default=False), force_apt_get=dict(type='bool', default=False), allow_unauthenticated=dict(default='no', aliases=['allow-unauthenticated'], type='bool'), ), mutually_exclusive=[['package', 'upgrade', 'deb']], required_one_of=[['package', 'upgrade', 'update_cache', 'deb', 'autoremove']], supports_check_mode=True ) module.run_command_environ_update = APT_ENV_VARS if not HAS_PYTHON_APT: if module.check_mode: module.fail_json(msg="%s must be installed to use check mode. " "If run normally this module can auto-install it." % PYTHON_APT) try: module.run_command(['apt-get', 'update'], check_rc=True) module.run_command(['apt-get', 'install', PYTHON_APT, '-y', '-q'], check_rc=True) global apt, apt_pkg import apt import apt.debfile import apt_pkg except ImportError: module.fail_json(msg="Could not import python modules: apt, apt_pkg. " "Please install %s package." % PYTHON_APT) global APTITUDE_CMD APTITUDE_CMD = module.get_bin_path("aptitude", False) global APT_GET_CMD APT_GET_CMD = module.get_bin_path("apt-get") p = module.params if p['upgrade'] == 'no': p['upgrade'] = None use_apt_get = p['force_apt_get'] if not use_apt_get and not APTITUDE_CMD and p.get('upgrade', None) in ['full', 'safe', 'yes']: module.warn("Could not find aptitude. Using apt-get instead.") use_apt_get = True updated_cache = False updated_cache_time = 0 install_recommends = p['install_recommends'] allow_unauthenticated = p['allow_unauthenticated'] dpkg_options = expand_dpkg_options(p['dpkg_options']) autoremove = p['autoremove'] autoclean = p['autoclean'] # Deal with deprecated aliases if p['state'] == 'installed': module.deprecate("State 'installed' is deprecated. Using state 'present' instead.", version="2.9") p['state'] = 'present' if p['state'] == 'removed': module.deprecate("State 'removed' is deprecated. Using state 'absent' instead.", version="2.9") p['state'] = 'absent' # Get the cache object cache = get_cache(module) try: if p['default_release']: try: apt_pkg.config['APT::Default-Release'] = p['default_release'] except AttributeError: apt_pkg.Config['APT::Default-Release'] = p['default_release'] # reopen cache w/ modified config cache.open(progress=None) mtimestamp, updated_cache_time = get_updated_cache_time() # Cache valid time is default 0, which will update the cache if # needed and `update_cache` was set to true updated_cache = False if p['update_cache'] or p['cache_valid_time']: now = datetime.datetime.now() tdelta = datetime.timedelta(seconds=p['cache_valid_time']) if not mtimestamp + tdelta >= now: # Retry to update the cache up to 3 times for retry in range(3): try: cache.update() break except apt.cache.FetchFailedException: pass else: module.fail_json(msg='Failed to update apt cache.') cache.open(progress=None) updated_cache = True mtimestamp, updated_cache_time = get_updated_cache_time() # If there is nothing else to do exit. This will set state as # changed based on if the cache was updated. if not p['package'] and not p['upgrade'] and not p['deb']: module.exit_json( changed=updated_cache, cache_updated=updated_cache, cache_update_time=updated_cache_time ) force_yes = p['force'] if p['upgrade']: upgrade(module, p['upgrade'], force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove) if p['deb']: if p['state'] != 'present': module.fail_json(msg="deb only supports state=present") if '://' in p['deb']: p['deb'] = download(module, p['deb']) install_deb(module, p['deb'], cache, install_recommends=install_recommends, allow_unauthenticated=allow_unauthenticated, force=force_yes, dpkg_options=p['dpkg_options']) unfiltered_packages = p['package'] or () packages = [package for package in unfiltered_packages if package != '*'] all_installed = '*' in unfiltered_packages latest = p['state'] == 'latest' if latest and all_installed: if packages: module.fail_json(msg='unable to install additional packages when ugrading all installed packages') upgrade(module, 'yes', force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove) if packages: for package in packages: if package.count('=') > 1: module.fail_json(msg="invalid package spec: %s" % package) if latest and '=' in package: module.fail_json(msg='version number inconsistent with state=latest: %s' % package) if not packages: if autoclean: cleanup(module, p['purge'], force=force_yes, operation='autoclean', dpkg_options=dpkg_options) if autoremove: cleanup(module, p['purge'], force=force_yes, operation='autoremove', dpkg_options=dpkg_options) if p['state'] in ('latest', 'present', 'build-dep'): state_upgrade = False state_builddep = False if p['state'] == 'latest': state_upgrade = True if p['state'] == 'build-dep': state_builddep = True success, retvals = install( module, packages, cache, upgrade=state_upgrade, default_release=p['default_release'], install_recommends=install_recommends, force=force_yes, dpkg_options=dpkg_options, build_dep=state_builddep, autoremove=autoremove, only_upgrade=p['only_upgrade'], allow_unauthenticated=allow_unauthenticated ) # Store if the cache has been updated retvals['cache_updated'] = updated_cache # Store when the update time was last retvals['cache_update_time'] = updated_cache_time if success: module.exit_json(**retvals) else: module.fail_json(**retvals) elif p['state'] == 'absent': remove(module, packages, cache, p['purge'], force=force_yes, dpkg_options=dpkg_options, autoremove=autoremove) except apt.cache.LockFailedException: module.fail_json(msg="Failed to lock apt for exclusive operation") except apt.cache.FetchFailedException: module.fail_json(msg="Could not fetch updated apt files") if __name__ == "__main__": main()
gpl-3.0
robertmattmueller/sdac-compiler
sympy/utilities/benchmarking.py
24
6483
"""benchmarking through py.test""" from __future__ import print_function, division import py from py.__.test.item import Item from py.__.test.terminal.terminal import TerminalSession from math import ceil as _ceil, floor as _floor, log10 import timeit from inspect import getsource from sympy.core.compatibility import exec_ # from IPython.Magic.magic_timeit #units = ["s", "ms", "\xc2\xb5s", "ns"] units = ["s", "ms", "us", "ns"] scaling = [1, 1e3, 1e6, 1e9] unitn = dict((s, i) for i, s in enumerate(units)) precision = 3 # like py.test Directory but scan for 'bench_<smth>.py' class Directory(py.test.collect.Directory): def filefilter(self, path): b = path.purebasename ext = path.ext return b.startswith('bench_') and ext == '.py' # like py.test Module but scane for 'bench_<smth>' and 'timeit_<smth>' class Module(py.test.collect.Module): def funcnamefilter(self, name): return name.startswith('bench_') or name.startswith('timeit_') # Function level benchmarking driver class Timer(timeit.Timer): def __init__(self, stmt, setup='pass', timer=timeit.default_timer, globals=globals()): # copy of timeit.Timer.__init__ # similarity index 95% self.timer = timer stmt = timeit.reindent(stmt, 8) setup = timeit.reindent(setup, 4) src = timeit.template % {'stmt': stmt, 'setup': setup} self.src = src # Save for traceback display code = compile(src, timeit.dummy_src_name, "exec") ns = {} #exec code in globals(), ns -- original timeit code exec_(code, globals, ns) # -- we use caller-provided globals instead self.inner = ns["inner"] class Function(py.__.test.item.Function): def __init__(self, *args, **kw): super(Function, self).__init__(*args, **kw) self.benchtime = None self.benchtitle = None def execute(self, target, *args): # get func source without first 'def func(...):' line src = getsource(target) src = '\n'.join( src.splitlines()[1:] ) # extract benchmark title if target.func_doc is not None: self.benchtitle = target.func_doc else: self.benchtitle = src.splitlines()[0].strip() # XXX we ignore args timer = Timer(src, globals=target.func_globals) if self.name.startswith('timeit_'): # from IPython.Magic.magic_timeit repeat = 3 number = 1 for i in range(1, 10): t = timer.timeit(number) if t >= 0.2: number *= (0.2 / t) number = int(_ceil(number)) break if t <= 0.02: # we are not close enough to that 0.2s number *= 10 else: # since we are very close to be > 0.2s we'd better adjust number # so that timing time is not too high number *= (0.2 / t) number = int(_ceil(number)) break self.benchtime = min(timer.repeat(repeat, number)) / number # 'bench_<smth>' else: self.benchtime = timer.timeit(1) class BenchSession(TerminalSession): def header(self, colitems): #self.out.sep("-", "benchmarking starts") super(BenchSession, self).header(colitems) def footer(self, colitems): super(BenchSession, self).footer(colitems) #self.out.sep("-", "benchmarking ends") self.out.write('\n') self.print_bench_results() def print_bench_results(self): self.out.write('==============================\n') self.out.write(' *** BENCHMARKING RESULTS *** \n') self.out.write('==============================\n') self.out.write('\n') # benchname, time, benchtitle results = [] for item, outcome in self._memo: if isinstance(item, Item): best = item.benchtime if best is None: # skipped or failed benchmarks tstr = '---' else: # from IPython.Magic.magic_timeit if best > 0.0: order = min(-int(_floor(log10(best)) // 3), 3) else: order = 3 tstr = "%.*g %s" % ( precision, best * scaling[order], units[order]) results.append( [item.name, tstr, item.benchtitle] ) # dot/unit align second column # FIXME simpler? this is crappy -- shame on me... wm = [0]*len(units) we = [0]*len(units) for s in results: tstr = s[1] n, u = tstr.split() # unit n un = unitn[u] try: m, e = n.split('.') except ValueError: m, e = n, '' wm[un] = max(len(m), wm[un]) we[un] = max(len(e), we[un]) for s in results: tstr = s[1] n, u = tstr.split() un = unitn[u] try: m, e = n.split('.') except ValueError: m, e = n, '' m = m.rjust(wm[un]) e = e.ljust(we[un]) if e.strip(): n = '.'.join((m, e)) else: n = ' '.join((m, e)) # let's put the number into the right place txt = '' for i in range(len(units)): if i == un: txt += n else: txt += ' '*(wm[i] + we[i] + 1) s[1] = '%s %s' % (txt, u) # align all columns besides the last one for i in range(2): w = max(len(s[i]) for s in results) for s in results: s[i] = s[i].ljust(w) # show results for s in results: self.out.write('%s | %s | %s\n' % tuple(s)) def main(args=None): # hook our Directory/Module/Function as defaults from py.__.test import defaultconftest defaultconftest.Directory = Directory defaultconftest.Module = Module defaultconftest.Function = Function # hook BenchSession as py.test session config = py.test.config config._getsessionclass = lambda: BenchSession py.test.cmdline.main(args)
gpl-3.0
Wilee999/panda3d
contrib/src/sceneeditor/quad.py
8
26662
######################################################################################################################################### # This file implements a Quad View for the level editor # This feature is not yet enabled in the level editor because picking objects in quad view doesnt work # I have tried to send the picking function in seSelection.py the correct camera and mouse coordinates but something seems to be wron # There are two classes in this file..the QuadView and the viewPort...there are four instances of viewport, one for each view in QuadView ######################################################################################################################################### from direct.showbase.ShowBaseGlobal import * from direct.interval.IntervalGlobal import * from direct.showbase.DirectObject import DirectObject from pandac.PandaModules import * import math #Manakel 2/12/2005: replace from pandac import by from pandac.PandaModules import from pandac.PandaModules import MouseWatcher class ViewPort: ######################################################################################################################################### # The ViewPort class has the camera and associated display region set up for actually rendering the four sub-views # The constructor needs the bounds, window layer, camera, color, projection type, name and scene for the view ######################################################################################################################################### def __init__(self,X1,X2,Y1,Y2,layer,cam,background=Vec4(0.3,0.3,0.3,1),projection="perspective",type="top",scene=render): self.VPType=type self.VP_X1=X1 self.VP_Y1=Y1 self.VP_X2=X2 self.VP_Y2=Y2 self.VP_width=self.VP_X2 - self.VP_X1 self.VP_height=self.VP_Y2 - self.VP_Y1 self.the_viewport=layer.makeDisplayRegion(self.VP_X1, self.VP_X2,self.VP_Y1, self.VP_Y2) self.the_viewport.setCamera(cam) self.the_viewport.setClearDepthActive(1) self.the_viewport.setClearColorActive(1) self.the_viewport.setClearColor(background) self.cam=cam # Set up the cameras to look in the right place. if(type=="top"): self.cam.setP(-90) self.cam.setZ(-40) elif(type=="left"): self.cam.setH(-90) self.cam.setX(10) elif(type=="front"): self.cam.setY(-10) elif(type=="perspective"): cam.setY(-100) #cam.setX(10) #cam.setZ(-10) #cam.setH(45) #cam.setP(-45) #print "aa" if(projection=="ortho"): self.lens=OrthographicLens() self.lens.setAspectRatio((self.VP_X2-self.VP_X1)/(self.VP_Y2-self.VP_Y1)) self.lens.setFilmSize(self.VP_width*200,self.VP_height*200) #lens.setFilmOffset((self.VP_X2 + self.VP_X1) * 0.5, (self.VP_Y2 + self.VP_Y1) * 0.5) self.lens.setNearFar(-1000, 1000) self.cam.node().setLens(self.lens) self.cam.node().setScene(scene) elif(projection=="perspective"): self.lens=base.cam.node().getLens() self.lens.setAspectRatio((self.VP_X2-self.VP_X1)/(self.VP_Y2-self.VP_Y1)) self.cam.node().setLens(self.lens) self.cam.node().setScene(scene) self.the_viewport.setCamera(self.cam) def resizeX(self,width_increment): if(self.VPType=="top" or self.VPType=="left"): self.the_viewport.setDimensions(self.VP_X1,self.VP_X2+width_increment,self.VP_Y1,self.VP_Y2) elif(self.VPType=="perspective" or self.VPType=="front"): self.the_viewport.setDimensions(self.VP_X1+width_increment,self.VP_X2,self.VP_Y1,self.VP_Y2) def resizeY(self,height_increment,direction): if(self.VPType=="left" or self.type=="perspective"): self.the_viewport.setDimensions(self.VP_X1,self.VP_X2,self.VP_Y1,self.VP_Y2+height_increment) else: self.the_viewport.setDimensions(self.VP_X1,self.VP_X2,self.VP_Y1+height_increment,self.VP_Y2) def AdjustAspect(self,x,y): if (y==0): y=1 self.lens.setAspectRatio(x/y) self.cam.node().setLens(self.lens) def resize(self,x,y): if(self.VPType=="left"): self.the_viewport.setDimensions(0,x,0,y) w=abs(x-self.VP_X1) h=abs(y-self.VP_Y1) if(h==0): h=1 self.lens.setAspectRatio(w/h) self.cam.node().setLens(self.lens) if(self.VPType=="top"): self.the_viewport.setDimensions(0,x,y,1) w=abs(x-self.VP_X1) h=abs(self.VP_Y2-y) if(h==0): h=1 self.lens.setAspectRatio(w/h) self.cam.node().setLens(self.lens) if(self.VPType=="front"): self.the_viewport.setDimensions(x,1,y,1) w=abs(self.VP_X2-x) h=abs(self.VP_Y2-y) if(h==0): h=1 self.lens.setAspectRatio(w/h) self.cam.node().setLens(self.lens) if(self.VPType=="perspective"): self.the_viewport.setDimensions(x,1,0,y) w=abs(self.VP_X2-x) h=abs(y-self.VP_Y1) if(h==0): h=1 self.lens.setAspectRatio(w/h) self.cam.node().setLens(self.lens) def setScene(self,scene): self.cam.node().setScene(scene) def setDR(self,mouseWatcher): #mouseWatcher.setDisplayRegion(self.the_viewport) pass def setCam(self): #base.cam=self.cam #base.cam.node().setLens(self.cam.node().getLens()) base.camNode=self.cam.node() #base.camNode.setLens(self.cam.node().getLens()) #base.camLens=self.cam.node().getLens() def getCam(self): return self.cam class QuadView(DirectObject): ######################################################################################################################################### # This class sets up four cameras for the scene (ideally we want four instances of render too) # and then instatiates a ViewPort class for each of them # ######################################################################################################################################### def __init__(self): self.PTracker=1 self.ControlPressed=0 self.AltPressed=0 self.PanConstantX=50 self.PanConstantY=50 self.ZoomConstant=1 self.FrontWidth=100 self.FrontHeight=100 self.TopWidth=100 self.TopHeight=100 self.LeftWidth=100 self.LeftHeight=100 self.MouseButton=0 self.CurrentQuad=4 self.HorizontalAxis=0.0 self.VerticalAxis=0.0 #base.disableMouse() self.MouseDragging=0 self.currX= 0 self.oldX=self.currX self.currY= 0 self.oldY=self.currY self.FrontTexture=1 self.LeftTexture=1 self.PerspectiveTexture=1 self.TopTexture=1 self.FrontWire=0 self.LeftWire=0 self.PerspectiveWire=0 self.TopWire=0 # Keep track of the currently selected window... values are 1-4 for four quadrants of a standard # Cartesian coordinate system # These are the orthographic cameras # They will be restricted to panning and zooming i.e. no rotation # Top could be flipped to back, left to right and front to back self.topCam= render.attachNewNode(Camera('topCam')) self.frontCam = render.attachNewNode(Camera('frontCam')) self.leftCam= render.attachNewNode(Camera('leftCam')) # This camera will have a trackball control since its perspective self.perspectiveCam = render.attachNewNode(Camera('perspectiveCam')) #self.toplens=OrthographicLens() #self.leftLens=OrthographicLens() #self.frontLens=OrthographicLens() #self.perspectiveLens=base.cam.node().getLens() # For now all lenses are same as that of base.cam #self.topCamLens=OrthographicLens() #self.frontCamLens= base.cam.node().getLens() #self.leftCamLens= base.cam.node().getLens() #self.perspectiveCamLens= base.cam.node().getLens() # Manipulate lenses here if need be #self.topCamLens.setFilmSize(250) # Set the Lenses #self.topCam.node().setLens(self.topCamLens) #self.frontCam.node().setLens(self.frontCamLens) #self.leftCam.node().setLens(self.leftCamLens) #self.perspectiveCam.node().setLens(self.perspectiveCamLens) #self.badwiz = loader.loadModel('badwizard1') #self.badwiz.reparentTo(render) # Create four separate display regions for the quad view. # These will overlap the main display region # To stack these overlapping DisplayRegions, we need a new layer. If # they didn't overlap, we could put them in the same layer. self.newLayer = base.win.getChannel(0).makeLayer() self.PerspectiveScene=NodePath('PerspectiveScene') self.FrontScene=NodePath('FrontScene') self.TopScene=NodePath('TopScene') self.LeftScene=NodePath('LeftScene') self.SceneParent=NodePath('SceneParent') #self.PerspectiveScene=render.copyTo(self.SceneParent) #self.FrontScene=render.copyTo(self.SceneParent) #self.TopScene=render.copyTo(self.SceneParent) #self.LeftScene=render.copyTo(self.SceneParent) self.PerspectiveScene=render self.FrontScene=render self.TopScene=render self.LeftScene=render #self.PerspectiveScene.reparentTo(self.SceneParent) #self.FrontScene.reparentTo(self.SceneParent) #self.TopScene.reparentTo(self.SceneParent) #self.LeftScene.reparentTo(self.SceneParent) self.Perspective=ViewPort(0.5,1.0,0.0,0.5,self.newLayer,self.perspectiveCam,Vec4(0.75,0.75,0.75,1),"perspective","perspective",self.PerspectiveScene) self.Top=ViewPort(0.0,0.5,0.5,1.0,self.newLayer,self.topCam,Vec4(0.80,0.80,0.80,1),"ortho","top",self.TopScene) self.Left=ViewPort(0.0,0.5,0.0,0.5,self.newLayer,self.leftCam,Vec4(0.85,0.85,0.85,1),"ortho","left",self.LeftScene) self.Front=ViewPort(0.5,1.0,0.5,1.0,self.newLayer,self.frontCam,Vec4(0.85,0.85,0.85,1),"ortho","front",self.FrontScene) #self.Perspective=None #self.Top=None #self.Front=None #self.Left=None #self.raycaster = RayCaster( camera ) #self.lastPickPoint = None #base.useTrackball() #self.dataRoot = NodePath('dataRoot') # Cache the node so we do not ask for it every frame #self.dataRootNode = self.dataRoot.node() #self.dataUnused = NodePath('dataUnused') #self.mak=None #self.mak = self.dataRoot.attachNewNode(MouseAndKeyboard(base.win, 0, 'mak')) #self.mak.node().setSource(base.win, 0) self.mouseWatcherNode = MouseWatcher('mouseWatcher') self.mouseWatcher = base.mak.attachNewNode(self.mouseWatcherNode) #self.Perspective.setDR(self.mouseWatcherNode) self.buttonThrower = self.mouseWatcher.attachNewNode(ButtonThrower('buttons')) #ddr=DisplayRegionContext(self.Perspective.getCam()) #base.setMouseOnNode(self.smiley.node()) # Let Mouse Control Perspective View for now #base.enableSoftwareMousePointer() # Message Handlers self.accept("a",self.setLeft) self.accept("q",self.setTop) self.accept("w",self.setFront) self.accept("s",self.setPerspective) self.accept("mouse1",self.MouseTell,[1]) self.accept("mouse2",self.MouseTell,[2]) self.accept("mouse3",self.MouseTell,[3]) self.accept("mouse1-up",self.MouseTellUp,[4]) self.accept("mouse2-up",self.MouseTellUp,[5]) self.accept("mouse3-up",self.MouseTellUp,[6]) self.accept("mouse2-scroll",self.resizedr) self.accept("r",self.resizedr) self.accept("alt",self.AltHandler) self.accept("alt-up",self.AltUpHandler) self.accept("alt-mouse1",self.AltDown) self.accept("alt-mouse1-up",self.AltUp) self.accept("control-mouse1",self.CtlDown) self.accept("control-mouse1-up",self.CtlUp) # Methods #def setLastPickPoint( self ): # mouseX, mouseY = self.mouseWatcherNode.getMouseX(), self.mouseWatcherNode.getMouseY() # self.lastPickPoint = self.raycaster.pick( mouseX, mouseY ) # print self.lastPickPoint def AltDown(self): self.AltPressed=1 def AltUp(self): self.AltPressed=0 def CtlDown(self): self.ControlPressed=1 def CtlUp(self): self.ControlPressed=0 def ToggleWire(self): if (self.CurrentQuad==1): # Front View if(self.FrontWire): # Wireframe is On so turn it off self.FrontScene.setRenderModeWireframe(100) self.FrontScene.setTwoSided(1) self.FrontScene.setTextureOff(100) self.FrontWire=0 else: self.FrontScene.clearRenderMode() #self.FrontScene.setTwoSided(not self.backfaceCullingEnabled) if(self.FrontTexture): self.FrontScene.clearTexture() self.FrontWire=1 elif (self.CurrentQuad==2): # Front View if(self.TopWire): # Wireframe is On so turn it off self.TopScene.setRenderModeWireframe(100) self.TopScene.setTwoSided(1) self.TopScene.setTextureOff(100) self.TopWire=0 else: self.TopScene.clearRenderMode() #self.TopScene.setTwoSided(not self.backfaceCullingEnabled) if(self.TopTexture): self.TopScene.clearTexture() self.TopWire=1 elif (self.CurrentQuad==3): # Front View if(self.LeftWire): # Wireframe is On so turn it off self.LeftScene.setRenderModeWireframe(100) self.LeftScene.setTwoSided(1) self.LeftScene.setTextureOff(100) self.LeftWire=0 else: self.LeftScene.clearRenderMode() #self.LeftScene.setTwoSided(not self.backfaceCullingEnabled) if(self.LeftTexture): self.LeftScene.clearTexture() self.LeftWire=1 elif (self.CurrentQuad==4): # Front View if(self.PerspectiveWire): # Wireframe is On so turn it off self.PerspectiveScene.setRenderModeWireframe(100) self.PerspectiveScene.setTwoSided(1) self.PerspectiveScene.setTextureOff(100) self.PerspectiveWire=0 else: self.PerspectiveScene.clearRenderMode() #self.PerspectiveScene.setTwoSided(not self.backfaceCullingEnabled) if(self.PerspectiveTexture): self.PerspectiveScene.clearTexture() self.PerspectiveWire=1 def ToggleTexture(self): if (self.CurrentQuad==1): # Front View if(self.FrontTexture): # Texture is on so turn it off self.FrontScene.setTextureOff(100) self.FrontTexture=0 else: self.FrontScene.clearTexture() self.FrontTexture=1 elif (self.CurrentQuad==2): # Top View if(self.TopTexture): # Texture is on so turn it off self.TopScene.setTextureOff(100) self.TopTexture=0 else: self.TopScene.clearTexture() self.TopTexture=1 elif (self.CurrentQuad==3): # Left View if(self.LeftTexture): # Texture is on so turn it off self.LeftScene.setTextureOff(100) self.LeftTexture=0 else: self.LeftScene.clearTexture() self.LeftTexture=1 elif (self.CurrentQuad==4): # Perspective View if(self.PerspectiveTexture): # Texture is on so turn it off self.PerspectiveScene.setTextureOff(100) self.PerspectiveTexture=0 else: self.PerspectiveScene.clearTexture() self.PerspectiveTexture=1 def reparenter(self): #self.FrontScene.reparentTo(render) #self.Front.setScene(render) #self.Top.setScene(render) #self.Left.setScene(render) #self.Perspective.setScene(render) pass def unparenter(self): #self.PerspectiveScene=render.copyTo(render) #self.FrontScene=render.copyTo(render) #self.TopScene=render.copyTo(render) #self.LeftScene=render.copyTo(render) #self.SceneParent.reparentTo(render) #self.PerspectiveScene.reparentTo(self.SceneParent) #self.FrontScene.reparentTo(self.SceneParent) #self.TopScene.reparentTo(self.SceneParent) #self.LeftScene.reparentTo(self.SceneParent) pass def AltHandler(self): self.oldX=self.mouseWatcherNode.getMouseX() if(self.oldX<-1 or self.oldX>1): return self.oldY=self.mouseWatcherNode.getMouseY() if(self.oldY<-1 or self.oldY>1): return taskMgr.add(self.DragAction,'DragAction') def AltUpHandler(self): taskMgr.remove('DragAction') def gridtoggle(self): #grid=DirectGrid() #grid.enable() pass def resizedr(self,x,y): #print "X: " + str(x) + " Y: " + str(y) x=(x+1)/2.0 y=(y+1)/2.0 self.Perspective.resize(x,y) self.Top.resize(x,y) self.Front.resize(x,y) self.Left.resize(x,y) def setAppropriateViewPort(self,x,y): #print "SET APPROPRIATE:" + str(x) + " " + str(y) if(x<self.VerticalAxis): if(y<self.HorizontalAxis): self.setLeft() else: self.setTop() else: if(y<self.HorizontalAxis): self.setPerspective() else: self.setFront() def MouseTell(self,buttonCode): self.MouseButton=buttonCode self.setAppropriateViewPort(self.mouseWatcherNode.getMouseX(),self.mouseWatcherNode.getMouseY()) x=base.mouseWatcherNode.getMouseX() y=base.mouseWatcherNode.getMouseY() #Perspective and Front if(self.CurrentQuad==4 or self.CurrentQuad==1): x1=abs(x-self.VerticalAxis) w1=abs(1-self.VerticalAxis) x2=x1*2.0/w1 ansX=-1+x2 #Left and top if(self.CurrentQuad==2 or self.CurrentQuad==3): x1=abs(x-(-1.0)) w1=abs(self.VerticalAxis-(-1.0)) x2=x1*2.0/w1 ansX=-1.0+x2 #Left and Perspective if(self.CurrentQuad==4 or self.CurrentQuad==3): y1=abs(y-(-1.0)) h1=abs(self.HorizontalAxis-(-1.0)) y2=y1*2.0/h1 ansY=-1.0+y2 #Front and top if(self.CurrentQuad==1 or self.CurrentQuad==2): y1=abs(y-self.HorizontalAxis) h1=abs(1.0-self.HorizontalAxis) y2=y1*2.0/h1 ansY=-1.0+y2 self.xy=[ansX,ansY] print "Sent X:%f Sent Y:%f"%(ansX,ansY) #SEditor.iRay.pick(render,self.xy) SEditor.manipulationControl.manipulationStop(self.xy) #print "MouseX " + str(base.mouseWatcherNode.getMouseX()) + "MouseY " + str(base.mouseWatcherNode.getMouseY()) + "\n" #print "MouseX " + str(self.mouseWatcherNode.getMouseX()) + "MouseY " + str(self.mouseWatcherNode.getMouseY()) + "\n" base.mouseWatcherNode=self.mouseWatcherNode self.oldX=self.mouseWatcherNode.getMouseX() if(self.oldX<-1 or self.oldX>1): return self.oldY=self.mouseWatcherNode.getMouseY() if(self.oldY<-1 or self.oldY>1): return self.Mouse_Dragging=1 taskMgr.add(self.DragAction,'DragAction') def MouseTellUp(self,buttoncode): #self.MouseButton=0 self.PanConstantX= 50 self.PanConstantY= 50 self.ZoomConstant=1 taskMgr.remove('DragAction') self.Mouse_Draggin=0 #print "Mouse Up" def Max_Style_Mouse_View(self,buttoncode): pass def ChangeBaseDR(self): dr=base.win.getDisplayRegion(0) if(self.CurrentQuad==1): #Front dr.setDimensions(0.5,1,0.5,1) elif(self.CurrentQuad==2): #Top dr.setDimensions(0,0.5,0.5,1) elif(self.CurrentQuad==3): #Left dr.setDimensions(0,0.5,0,0.5) elif(self.CurrentQuad==4): #Perspective dr.setDimensions(0.5,1,0,0.5) def setLeft(self): print "LEFT" self.CurrentQuad=3 self.ChangeBaseDR() self.Left.setCam() #self.Left.setDR(self.mouseWatcherNode) def setTop(self): print "TOP" self.CurrentQuad=2 self.ChangeBaseDR() self.Top.setCam() #self.Top.setDR(self.mouseWatcherNode) def setPerspective(self): print "PERSPECTIVE" self.CurrentQuad=4 self.ChangeBaseDR() self.Perspective.setCam() #self.Perspective.setDR(self.mouseWatcherNode) def setFront(self): print "FRONT" self.CurrentQuad=1 self.ChangeBaseDR() self.Front.setCam() #self.Front.setDR(self.mouseWatcherNode) def DragAction(self,task): #if(self.MouseDragging==1): self.currX= self.mouseWatcherNode.getMouseX() if(self.currX<-1 or self.currX>1): return self.currY= self.mouseWatcherNode.getMouseY() if(self.currY<-1 or self.currY>1): return self.diffX=self.currX-self.oldX self.diffY=self.currY-self.oldY if(self.ControlPressed): # Change Size of the ViewPorts #if(base.getControl()): self.VerticalAxis=self.currX self.HorizontalAxis=self.currY if(self.HorizontalAxis<-1 or self.HorizontalAxis>1 or self.VerticalAxis<-1 or self.VerticalAxis>1): return self.resizedr(self.VerticalAxis,self.HorizontalAxis) #if(self.AltPressed): # View Camera Transforms -> Maya style elif(1): #print "ALTPRESSED" if(self.PanConstantX<4096): self.PanConstantX= self.PanConstantX * 2 self.PanConstantY= self.PanConstantY * 2 self.ZoomConstant= self.ZoomConstant + 50 if(self.MouseButton==1): # TrackBall rotation only for Perspective View if(self.CurrentQuad==4): pass elif(self.MouseButton==2): # Do Panning if(self.CurrentQuad==1): # Y and Z values change meanings for different cameras self.MoveCamera(-self.diffX*self.PanConstantX,0,-self.diffY*self.PanConstantY,self.CurrentQuad) elif(self.CurrentQuad==2): self.MoveCamera(-self.diffX*self.PanConstantX,-self.diffY*self.PanConstantY,0,self.CurrentQuad) elif(self.CurrentQuad==3): self.MoveCamera(0,self.diffX*self.PanConstantX,-self.diffY*self.PanConstantY,self.CurrentQuad) elif(self.CurrentQuad==4): pass elif(self.MouseButton==3): # Do Zoom if(self.CurrentQuad==1): # Y and Z values change meanings for different cameras #lens = OrthographicLens() #lens.setFilmSize(l,self.VP_height*200) #lens.setFilmOffset((self.VP_X2 + self.VP_X1) * 0.5, (self.VP_Y2 + self.VP_Y1) * 0.5) #lens.setNearFar(-1000, 1000) self.FrontWidth= self.FrontWidth + self.diffX self.FrontHeight= self.FrontHeight + self.diffX self.FrontWidth= self.FrontWidth + self.diffY self.FrontHeight= self.FrontHeight + self.diffY if(self.FrontWidth<=0): Frontwidth=1 if(self.FrontHeight<=0): FrontHeight=1 self.frontCam.node().getLens().setFilmSize(self.FrontWidth,self.FrontHeight) self.resizedr(self.VerticalAxis,self.HorizontalAxis) elif(self.CurrentQuad==2): self.TopWidth= self.TopWidth + self.diffX self.TopHeight= self.TopHeight + self.diffX self.TopWidth= self.TopWidth + self.diffY self.TopHeight= self.TopHeight + self.diffY self.topCam.node().getLens().setFilmSize(self.TopWidth,self.TopHeight) self.resizedr(self.VerticalAxis,self.HorizontalAxis) elif(self.CurrentQuad==3): self.LeftWidth= self.LeftWidth + self.diffX self.LeftHeight= self.LeftHeight + self.diffX self.LeftWidth= self.LeftWidth + self.diffY self.LeftHeight= self.LeftHeight + self.diffY self.leftCam.node().getLens().setFilmSize(self.LeftWidth,self.LeftHeight) self.resizedr(self.VerticalAxis,self.HorizontalAxis) elif(self.CurrentQuad==4): pass else: pass self.oldX=self.currX self.oldY=self.currY return Task.cont def MoveCamera(self,X_amt,Y_amt,Z_amt,quad): if(quad==1): self.frontCam.setPos(self.frontCam.getX()+X_amt,self.frontCam.getY()+Y_amt,self.frontCam.getZ()+Z_amt) elif(quad==2): self.topCam.setPos(self.topCam.getX()+X_amt,self.topCam.getY()+Y_amt,self.topCam.getZ()+Z_amt) elif(quad==3): self.leftCam.setPos(self.leftCam.getX()+X_amt,self.leftCam.getY()+Y_amt,self.leftCam.getZ()+Z_amt) elif(quad==4): self.perspectiveCam.setPos(self.perspectiveCam.getX()+X_amt,self.perspectiveCam.getY()+Y_amt,self.perspectiveCam.getZ()+Z_amt) #View=QuadView() #run()
bsd-3-clause
sunsettrack4/android_kernel_oneplus_msm8996
scripts/rt-tester/rt-tester.py
1106
5305
#!/usr/bin/python # # rt-mutex tester # # (C) 2006 Thomas Gleixner <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # import os import sys import getopt import shutil import string # Globals quiet = 0 test = 0 comments = 0 sysfsprefix = "/sys/devices/system/rttest/rttest" statusfile = "/status" commandfile = "/command" # Command opcodes cmd_opcodes = { "schedother" : "1", "schedfifo" : "2", "lock" : "3", "locknowait" : "4", "lockint" : "5", "lockintnowait" : "6", "lockcont" : "7", "unlock" : "8", "signal" : "11", "resetevent" : "98", "reset" : "99", } test_opcodes = { "prioeq" : ["P" , "eq" , None], "priolt" : ["P" , "lt" , None], "priogt" : ["P" , "gt" , None], "nprioeq" : ["N" , "eq" , None], "npriolt" : ["N" , "lt" , None], "npriogt" : ["N" , "gt" , None], "unlocked" : ["M" , "eq" , 0], "trylock" : ["M" , "eq" , 1], "blocked" : ["M" , "eq" , 2], "blockedwake" : ["M" , "eq" , 3], "locked" : ["M" , "eq" , 4], "opcodeeq" : ["O" , "eq" , None], "opcodelt" : ["O" , "lt" , None], "opcodegt" : ["O" , "gt" , None], "eventeq" : ["E" , "eq" , None], "eventlt" : ["E" , "lt" , None], "eventgt" : ["E" , "gt" , None], } # Print usage information def usage(): print "rt-tester.py <-c -h -q -t> <testfile>" print " -c display comments after first command" print " -h help" print " -q quiet mode" print " -t test mode (syntax check)" print " testfile: read test specification from testfile" print " otherwise from stdin" return # Print progress when not in quiet mode def progress(str): if not quiet: print str # Analyse a status value def analyse(val, top, arg): intval = int(val) if top[0] == "M": intval = intval / (10 ** int(arg)) intval = intval % 10 argval = top[2] elif top[0] == "O": argval = int(cmd_opcodes.get(arg, arg)) else: argval = int(arg) # progress("%d %s %d" %(intval, top[1], argval)) if top[1] == "eq" and intval == argval: return 1 if top[1] == "lt" and intval < argval: return 1 if top[1] == "gt" and intval > argval: return 1 return 0 # Parse the commandline try: (options, arguments) = getopt.getopt(sys.argv[1:],'chqt') except getopt.GetoptError, ex: usage() sys.exit(1) # Parse commandline options for option, value in options: if option == "-c": comments = 1 elif option == "-q": quiet = 1 elif option == "-t": test = 1 elif option == '-h': usage() sys.exit(0) # Select the input source if arguments: try: fd = open(arguments[0]) except Exception,ex: sys.stderr.write("File not found %s\n" %(arguments[0])) sys.exit(1) else: fd = sys.stdin linenr = 0 # Read the test patterns while 1: linenr = linenr + 1 line = fd.readline() if not len(line): break line = line.strip() parts = line.split(":") if not parts or len(parts) < 1: continue if len(parts[0]) == 0: continue if parts[0].startswith("#"): if comments > 1: progress(line) continue if comments == 1: comments = 2 progress(line) cmd = parts[0].strip().lower() opc = parts[1].strip().lower() tid = parts[2].strip() dat = parts[3].strip() try: # Test or wait for a status value if cmd == "t" or cmd == "w": testop = test_opcodes[opc] fname = "%s%s%s" %(sysfsprefix, tid, statusfile) if test: print fname continue while 1: query = 1 fsta = open(fname, 'r') status = fsta.readline().strip() fsta.close() stat = status.split(",") for s in stat: s = s.strip() if s.startswith(testop[0]): # Separate status value val = s[2:].strip() query = analyse(val, testop, dat) break if query or cmd == "t": break progress(" " + status) if not query: sys.stderr.write("Test failed in line %d\n" %(linenr)) sys.exit(1) # Issue a command to the tester elif cmd == "c": cmdnr = cmd_opcodes[opc] # Build command string and sys filename cmdstr = "%s:%s" %(cmdnr, dat) fname = "%s%s%s" %(sysfsprefix, tid, commandfile) if test: print fname continue fcmd = open(fname, 'w') fcmd.write(cmdstr) fcmd.close() except Exception,ex: sys.stderr.write(str(ex)) sys.stderr.write("\nSyntax error in line %d\n" %(linenr)) if not test: fd.close() sys.exit(1) # Normal exit pass print "Pass" sys.exit(0)
gpl-2.0
vmware/ansible-module-chaperone
nsxt/nsxt_transport_node_edge_details.py
1
2088
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright © 2019 VMware, Inc. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import yaml import yamlordereddictloader from collections import OrderedDict import logging logger = logging.getLogger('vswitch') hdlr = logging.FileHandler('/var/log/chaperone/ChaperoneNSXtLog.log') formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(funcName)s: %(message)s') hdlr.setFormatter(formatter) logger.addHandler(hdlr) logger.setLevel(10) def main(): module = AnsibleModule( argument_spec=dict( ), supports_check_mode=True ) final_dict = {} sub_dict = {} main_dict = {} main_list= list() stream1 = open('/var/lib/chaperone/answerfile.yml', 'r') dict1 = yaml.load(stream1, Loader=yamlordereddictloader.Loader) try: for data in dict1: if data.startswith('check_edge_node') == True: sub_dict[data] = dict1[data] for count in range(len(sub_dict)): hostname = str(count+1) + "_host_name" tnnode = "_tn_node"+ str(count+1)+ "_name" for content in dict1: if content.startswith('nsx_edge') == True: if hostname in content: main_dict["host_name"]=dict1[content] logger.info(main_dict) if tnnode in content: main_dict["node_name"]=dict1[content] #logger.info('Node_name {}'.format(dict1[content])) logger.info(main_dict) main_list.append(main_dict) main_dict={} #logger.info(main_list) #logger.info(main_dict) final_dict['transport_edge_nodes']=main_list module.exit_json(changed=True, id=final_dict, msg= "Successfully got the information") except Exception as err: module.fail_json(changed=False, msg= "Failure: %s" %(err)) from ansible.module_utils.basic import * if __name__ == '__main__': main()
apache-2.0
adieu/django-nonrel
tests/regressiontests/requests/tests.py
26
8301
import time from datetime import datetime, timedelta from StringIO import StringIO from django.core.handlers.modpython import ModPythonRequest from django.core.handlers.wsgi import WSGIRequest, LimitedStream from django.http import HttpRequest, HttpResponse, parse_cookie from django.utils import unittest from django.utils.http import cookie_date class RequestsTests(unittest.TestCase): def test_httprequest(self): request = HttpRequest() self.assertEqual(request.GET.keys(), []) self.assertEqual(request.POST.keys(), []) self.assertEqual(request.COOKIES.keys(), []) self.assertEqual(request.META.keys(), []) def test_wsgirequest(self): request = WSGIRequest({'PATH_INFO': 'bogus', 'REQUEST_METHOD': 'bogus', 'wsgi.input': StringIO('')}) self.assertEqual(request.GET.keys(), []) self.assertEqual(request.POST.keys(), []) self.assertEqual(request.COOKIES.keys(), []) self.assertEqual(set(request.META.keys()), set(['PATH_INFO', 'REQUEST_METHOD', 'SCRIPT_NAME', 'wsgi.input'])) self.assertEqual(request.META['PATH_INFO'], 'bogus') self.assertEqual(request.META['REQUEST_METHOD'], 'bogus') self.assertEqual(request.META['SCRIPT_NAME'], '') def test_modpythonrequest(self): class FakeModPythonRequest(ModPythonRequest): def __init__(self, *args, **kwargs): super(FakeModPythonRequest, self).__init__(*args, **kwargs) self._get = self._post = self._meta = self._cookies = {} class Dummy: def get_options(self): return {} req = Dummy() req.uri = 'bogus' request = FakeModPythonRequest(req) self.assertEqual(request.path, 'bogus') self.assertEqual(request.GET.keys(), []) self.assertEqual(request.POST.keys(), []) self.assertEqual(request.COOKIES.keys(), []) self.assertEqual(request.META.keys(), []) def test_parse_cookie(self): self.assertEqual(parse_cookie('invalid:key=true'), {}) def test_httprequest_location(self): request = HttpRequest() self.assertEqual(request.build_absolute_uri(location="https://www.example.com/asdf"), 'https://www.example.com/asdf') request.get_host = lambda: 'www.example.com' request.path = '' self.assertEqual(request.build_absolute_uri(location="/path/with:colons"), 'http://www.example.com/path/with:colons') def test_near_expiration(self): "Cookie will expire when an near expiration time is provided" response = HttpResponse() # There is a timing weakness in this test; The # expected result for max-age requires that there be # a very slight difference between the evaluated expiration # time, and the time evaluated in set_cookie(). If this # difference doesn't exist, the cookie time will be # 1 second larger. To avoid the problem, put in a quick sleep, # which guarantees that there will be a time difference. expires = datetime.utcnow() + timedelta(seconds=10) time.sleep(0.001) response.set_cookie('datetime', expires=expires) datetime_cookie = response.cookies['datetime'] self.assertEqual(datetime_cookie['max-age'], 10) def test_far_expiration(self): "Cookie will expire when an distant expiration time is provided" response = HttpResponse() response.set_cookie('datetime', expires=datetime(2028, 1, 1, 4, 5, 6)) datetime_cookie = response.cookies['datetime'] self.assertEqual(datetime_cookie['expires'], 'Sat, 01-Jan-2028 04:05:06 GMT') def test_max_age_expiration(self): "Cookie will expire if max_age is provided" response = HttpResponse() response.set_cookie('max_age', max_age=10) max_age_cookie = response.cookies['max_age'] self.assertEqual(max_age_cookie['max-age'], 10) self.assertEqual(max_age_cookie['expires'], cookie_date(time.time()+10)) def test_httponly_cookie(self): response = HttpResponse() response.set_cookie('example', httponly=True) example_cookie = response.cookies['example'] # A compat cookie may be in use -- check that it has worked # both as an output string, and using the cookie attributes self.assertTrue('; httponly' in str(example_cookie)) self.assertTrue(example_cookie['httponly']) def test_limited_stream(self): # Read all of a limited stream stream = LimitedStream(StringIO('test'), 2) self.assertEqual(stream.read(), 'te') # Reading again returns nothing. self.assertEqual(stream.read(), '') # Read a number of characters greater than the stream has to offer stream = LimitedStream(StringIO('test'), 2) self.assertEqual(stream.read(5), 'te') # Reading again returns nothing. self.assertEqual(stream.readline(5), '') # Read sequentially from a stream stream = LimitedStream(StringIO('12345678'), 8) self.assertEqual(stream.read(5), '12345') self.assertEqual(stream.read(5), '678') # Reading again returns nothing. self.assertEqual(stream.readline(5), '') # Read lines from a stream stream = LimitedStream(StringIO('1234\n5678\nabcd\nefgh\nijkl'), 24) # Read a full line, unconditionally self.assertEqual(stream.readline(), '1234\n') # Read a number of characters less than a line self.assertEqual(stream.readline(2), '56') # Read the rest of the partial line self.assertEqual(stream.readline(), '78\n') # Read a full line, with a character limit greater than the line length self.assertEqual(stream.readline(6), 'abcd\n') # Read the next line, deliberately terminated at the line end self.assertEqual(stream.readline(4), 'efgh') # Read the next line... just the line end self.assertEqual(stream.readline(), '\n') # Read everything else. self.assertEqual(stream.readline(), 'ijkl') # Regression for #15018 # If a stream contains a newline, but the provided length # is less than the number of provided characters, the newline # doesn't reset the available character count stream = LimitedStream(StringIO('1234\nabcdef'), 9) self.assertEqual(stream.readline(10), '1234\n') self.assertEqual(stream.readline(3), 'abc') # Now expire the available characters self.assertEqual(stream.readline(3), 'd') # Reading again returns nothing. self.assertEqual(stream.readline(2), '') # Same test, but with read, not readline. stream = LimitedStream(StringIO('1234\nabcdef'), 9) self.assertEqual(stream.read(6), '1234\na') self.assertEqual(stream.read(2), 'bc') self.assertEqual(stream.read(2), 'd') self.assertEqual(stream.read(2), '') self.assertEqual(stream.read(), '') def test_stream(self): request = WSGIRequest({'REQUEST_METHOD': 'POST', 'wsgi.input': StringIO('name=value')}) self.assertEqual(request.read(), 'name=value') def test_read_after_value(self): """ Reading from request is allowed after accessing request contents as POST or raw_post_data. """ request = WSGIRequest({'REQUEST_METHOD': 'POST', 'wsgi.input': StringIO('name=value')}) self.assertEqual(request.POST, {u'name': [u'value']}) self.assertEqual(request.raw_post_data, 'name=value') self.assertEqual(request.read(), 'name=value') def test_value_after_read(self): """ Construction of POST or raw_post_data is not allowed after reading from request. """ request = WSGIRequest({'REQUEST_METHOD': 'POST', 'wsgi.input': StringIO('name=value')}) self.assertEqual(request.read(2), 'na') self.assertRaises(Exception, lambda: request.raw_post_data) self.assertEqual(request.POST, {}) def test_read_by_lines(self): request = WSGIRequest({'REQUEST_METHOD': 'POST', 'wsgi.input': StringIO('name=value')}) self.assertEqual(list(request), ['name=value'])
bsd-3-clause
sss/calibre-at-bzr
src/calibre/ebooks/oeb/polish/replace.py
3
3150
#!/usr/bin/env python # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai from __future__ import (unicode_literals, division, absolute_import, print_function) __license__ = 'GPL v3' __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>' __docformat__ = 'restructuredtext en' import codecs from urlparse import urlparse from cssutils import replaceUrls from calibre.ebooks.chardet import strip_encoding_declarations from calibre.ebooks.oeb.polish.container import guess_type from calibre.ebooks.oeb.base import (OEB_DOCS, OEB_STYLES, rewrite_links) class LinkReplacer(object): def __init__(self, base, container, link_map, frag_map): self.base = base self.frag_map = frag_map self.link_map = link_map self.container = container self.replaced = False def __call__(self, url): name = self.container.href_to_name(url, self.base) if not name: return url nname = self.link_map.get(name, None) if not nname: return url purl = urlparse(url) href = self.container.name_to_href(nname, self.base) if purl.fragment: nfrag = self.frag_map(name, purl.fragment) if nfrag: href += '#%s'%nfrag if href != url: self.replaced = True return href def replace_links(container, link_map, frag_map=lambda name, frag:frag): ncx_type = guess_type('toc.ncx') for name, media_type in container.mime_map.iteritems(): repl = LinkReplacer(name, container, link_map, frag_map) if media_type.lower() in OEB_DOCS: rewrite_links(container.parsed(name), repl) elif media_type.lower() in OEB_STYLES: replaceUrls(container.parsed(name), repl) elif media_type.lower() == ncx_type: for elem in container.parsed(name).xpath('//*[@src]'): src = elem.get('src') nsrc = repl(src) if src != nsrc: elem.set('src', nsrc) if repl.replaced: container.dirty(name) def smarten_punctuation(container, report): from calibre.ebooks.conversion.preprocess import smarten_punctuation for path in container.spine_items: name = container.abspath_to_name(path) changed = False with container.open(name, 'r+b') as f: html = container.decode(f.read()) newhtml = smarten_punctuation(html, container.log) if newhtml != html: changed = True report(_('Smartened punctuation in: %s')%name) newhtml = strip_encoding_declarations(newhtml) f.seek(0) f.truncate() f.write(codecs.BOM_UTF8 + newhtml.encode('utf-8')) if changed: # Add an encoding declaration (it will be added automatically when # serialized) root = container.parsed(name) for m in root.xpath('descendant::*[local-name()="meta" and @http-equiv]'): m.getparent().remove(m) container.dirty(name)
gpl-3.0
SnakeJenny/TensorFlow
tensorflow/python/ops/control_flow_grad.py
48
8960
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gradients for operators defined in control_flow_ops.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops # go/tf-wildcard-import # pylint: disable=wildcard-import,undefined-variable from tensorflow.python.ops.control_flow_ops import * from tensorflow.python.ops.gen_control_flow_ops import * # pylint: enable=wildcard-import def _SwitchGrad(op, *grad): """Gradients for a Switch op is calculated using a Merge op. If the switch is a loop switch, it will be visited twice. We create the merge on the first visit, and update the other input of the merge on the second visit. A next_iteration is also added on second visit. """ graph = ops.get_default_graph() # pylint: disable=protected-access op_ctxt = op._get_control_flow_context() grad_ctxt = graph._get_control_flow_context() # pylint: enable=protected-access if isinstance(op_ctxt, WhileContext): merge_grad = grad_ctxt.grad_state.switch_map.get(op) if merge_grad is not None: # This is the second time this Switch is visited. It comes from # the non-exit branch of the Switch, so update the second input # to the Merge. # TODO: Perform shape inference with this new input. if grad[1] is not None: # pylint: disable=protected-access control_flow_ops._AddNextAndBackEdge(merge_grad, grad[1]) # pylint: enable=protected-access return None, None elif grad[0] is not None: # This is the first time this Switch is visited. It comes from # the Exit branch, which is grad[0]. grad[1] is empty at this point. # Use grad[0] for both inputs to merge for now, but update the second # input of merge when we see this Switch the second time. merge_grad = merge([grad[0], grad[0]], name="b_switch")[0] grad_ctxt.grad_state.switch_map[op] = merge_grad return merge_grad, None else: # This is the first time this Switch is visited. It comes from the # Identity branch. Such a Switch has `None` gradient for the Exit branch, # meaning the output is not differentiable. return None, None elif isinstance(op_ctxt, CondContext): good_grad = grad[op_ctxt.branch] zero_grad = grad[1 - op_ctxt.branch] # At this point, we have created zero_grad guarded by the right switch. return merge([good_grad, zero_grad], name="cond_grad")[0], None else: false_grad = switch(grad[0], op.inputs[1])[0] true_grad = switch(grad[1], op.inputs[1])[1] return merge([false_grad, true_grad])[0], None ops.RegisterGradient("Switch")(_SwitchGrad) ops.RegisterGradient("RefSwitch")(_SwitchGrad) @ops.RegisterGradient("Merge") def _MergeGrad(op, grad, _): """Gradients for a Merge op are calculated using a Switch op.""" input_op = op.inputs[0].op graph = ops.get_default_graph() # pylint: disable=protected-access op_ctxt = control_flow_ops._GetOutputContext(input_op) grad_ctxt = graph._get_control_flow_context() # pylint: enable=protected-access if isinstance(op_ctxt, WhileContext): # pylint: disable=protected-access return control_flow_ops._SwitchRefOrTensor(grad, grad_ctxt.pivot) # pylint: enable=protected-access elif isinstance(op_ctxt, CondContext): pred = op_ctxt.pred if grad_ctxt and grad_ctxt.grad_state: # This Merge node is part of a cond within a loop. # The backprop needs to have the value of this predicate for every # iteration. So we must have its values accumulated in the forward, and # use the accumulated values as the predicate for this backprop switch. grad_state = grad_ctxt.grad_state real_pred = grad_state.history_map.get(pred.name) if real_pred is None: # Remember the value of pred for every iteration. grad_ctxt = grad_state.grad_context grad_ctxt.Exit() history_pred = grad_state.AddForwardAccumulator(pred) grad_ctxt.Enter() # Add the stack pop op. If pred.op is in a (outer) CondContext, # the stack pop will be guarded with a switch. real_pred = grad_state.AddBackPropAccumulatedValue(history_pred, pred) grad_state.history_map[pred.name] = real_pred pred = real_pred # pylint: disable=protected-access return control_flow_ops._SwitchRefOrTensor(grad, pred, name="cond_grad") # pylint: enable=protected-access else: num_inputs = len(op.inputs) cond = [math_ops.equal(op.outputs[1], i) for i in xrange(num_inputs)] # pylint: disable=protected-access return [control_flow_ops._SwitchRefOrTensor(grad, cond[i])[1] for i in xrange(num_inputs)] # pylint: enable=protected-access @ops.RegisterGradient("RefMerge") def _RefMergeGrad(op, grad, _): return _MergeGrad(op, grad, _) @ops.RegisterGradient("Exit") def _ExitGrad(op, grad): """Gradients for an exit op are calculated using an Enter op.""" graph = ops.get_default_graph() # pylint: disable=protected-access grad_ctxt = graph._get_control_flow_context() # pylint: enable=protected-access if not grad_ctxt.back_prop: # The flag `back_prop` is set by users to suppress gradient # computation for this loop. If the attribute `back_prop` is false, # no gradient computation. return None # pylint: disable=protected-access if op._get_control_flow_context().grad_state: raise TypeError("Second-order gradient for while loops not supported.") # pylint: enable=protected-access if isinstance(grad, ops.Tensor): grad_ctxt.AddName(grad.name) else: if not isinstance(grad, (ops.IndexedSlices, sparse_tensor.SparseTensor)): raise TypeError("Type %s not supported" % type(grad)) grad_ctxt.AddName(grad.values.name) grad_ctxt.AddName(grad.indices.name) dense_shape = grad.dense_shape if dense_shape is not None: grad_ctxt.AddName(dense_shape.name) enter_fn = control_flow_ops._Enter # pylint: disable=protected-access grad_ctxt.Enter() result = enter_fn(grad, grad_ctxt.name, is_constant=False, parallel_iterations=grad_ctxt.parallel_iterations, name="b_exit") grad_ctxt.Exit() return result ops.RegisterGradient("RefExit")(_ExitGrad) @ops.RegisterGradient("NextIteration") def _NextIterationGrad(_, grad): """A forward next_iteration is translated into a backprop identity. Note that the backprop next_iteration is added in switch grad. """ return grad @ops.RegisterGradient("RefNextIteration") def _RefNextIterationGrad(_, grad): return _NextIterationGrad(_, grad) @ops.RegisterGradient("Enter") def _EnterGrad(op, grad): """Gradients for an Enter are calculated using an Exit op. For loop variables, grad is the gradient so just add an exit. For loop invariants, we need to add an accumulator loop. """ graph = ops.get_default_graph() # pylint: disable=protected-access grad_ctxt = graph._get_control_flow_context() # pylint: enable=protected-access if not grad_ctxt.back_prop: # Skip gradient computation, if the attribute `back_prop` is false. return grad if grad_ctxt.grad_state is None: # Pass the gradient through if we are not in a gradient while context. return grad if op.get_attr("is_constant"): # Add a gradient accumulator for each loop invariant. if isinstance(grad, ops.Tensor): result = grad_ctxt.AddBackPropAccumulator(op, grad) elif isinstance(grad, ops.IndexedSlices): result = grad_ctxt.AddBackPropIndexedSlicesAccumulator(op, grad) else: # TODO(yuanbyu, lukasr): Add support for SparseTensor. raise TypeError("Type %s not supported" % type(grad)) else: result = exit(grad) grad_ctxt.loop_exits.append(result) grad_ctxt.ExitResult([result]) return result @ops.RegisterGradient("RefEnter") def _RefEnterGrad(op, grad): return _EnterGrad(op, grad) @ops.RegisterGradient("LoopCond") def _LoopCondGrad(_): """Stop backprop for the predicate of a while loop.""" return None
apache-2.0
c86j224s/snippet
Python_asyncio_binary_echo/pyclient2/Lib/site-packages/setuptools/_vendor/packaging/specifiers.py
1107
28025
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import abc import functools import itertools import re from ._compat import string_types, with_metaclass from .version import Version, LegacyVersion, parse class InvalidSpecifier(ValueError): """ An invalid specifier was found, users should refer to PEP 440. """ class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): @abc.abstractmethod def __str__(self): """ Returns the str representation of this Specifier like object. This should be representative of the Specifier itself. """ @abc.abstractmethod def __hash__(self): """ Returns a hash value for this Specifier like object. """ @abc.abstractmethod def __eq__(self, other): """ Returns a boolean representing whether or not the two Specifier like objects are equal. """ @abc.abstractmethod def __ne__(self, other): """ Returns a boolean representing whether or not the two Specifier like objects are not equal. """ @abc.abstractproperty def prereleases(self): """ Returns whether or not pre-releases as a whole are allowed by this specifier. """ @prereleases.setter def prereleases(self, value): """ Sets whether or not pre-releases as a whole are allowed by this specifier. """ @abc.abstractmethod def contains(self, item, prereleases=None): """ Determines if the given item is contained within this specifier. """ @abc.abstractmethod def filter(self, iterable, prereleases=None): """ Takes an iterable of items and filters them so that only items which are contained within this specifier are allowed in it. """ class _IndividualSpecifier(BaseSpecifier): _operators = {} def __init__(self, spec="", prereleases=None): match = self._regex.search(spec) if not match: raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec)) self._spec = ( match.group("operator").strip(), match.group("version").strip(), ) # Store whether or not this Specifier should accept prereleases self._prereleases = prereleases def __repr__(self): pre = ( ", prereleases={0!r}".format(self.prereleases) if self._prereleases is not None else "" ) return "<{0}({1!r}{2})>".format( self.__class__.__name__, str(self), pre, ) def __str__(self): return "{0}{1}".format(*self._spec) def __hash__(self): return hash(self._spec) def __eq__(self, other): if isinstance(other, string_types): try: other = self.__class__(other) except InvalidSpecifier: return NotImplemented elif not isinstance(other, self.__class__): return NotImplemented return self._spec == other._spec def __ne__(self, other): if isinstance(other, string_types): try: other = self.__class__(other) except InvalidSpecifier: return NotImplemented elif not isinstance(other, self.__class__): return NotImplemented return self._spec != other._spec def _get_operator(self, op): return getattr(self, "_compare_{0}".format(self._operators[op])) def _coerce_version(self, version): if not isinstance(version, (LegacyVersion, Version)): version = parse(version) return version @property def operator(self): return self._spec[0] @property def version(self): return self._spec[1] @property def prereleases(self): return self._prereleases @prereleases.setter def prereleases(self, value): self._prereleases = value def __contains__(self, item): return self.contains(item) def contains(self, item, prereleases=None): # Determine if prereleases are to be allowed or not. if prereleases is None: prereleases = self.prereleases # Normalize item to a Version or LegacyVersion, this allows us to have # a shortcut for ``"2.0" in Specifier(">=2") item = self._coerce_version(item) # Determine if we should be supporting prereleases in this specifier # or not, if we do not support prereleases than we can short circuit # logic if this version is a prereleases. if item.is_prerelease and not prereleases: return False # Actually do the comparison to determine if this item is contained # within this Specifier or not. return self._get_operator(self.operator)(item, self.version) def filter(self, iterable, prereleases=None): yielded = False found_prereleases = [] kw = {"prereleases": prereleases if prereleases is not None else True} # Attempt to iterate over all the values in the iterable and if any of # them match, yield them. for version in iterable: parsed_version = self._coerce_version(version) if self.contains(parsed_version, **kw): # If our version is a prerelease, and we were not set to allow # prereleases, then we'll store it for later incase nothing # else matches this specifier. if (parsed_version.is_prerelease and not (prereleases or self.prereleases)): found_prereleases.append(version) # Either this is not a prerelease, or we should have been # accepting prereleases from the begining. else: yielded = True yield version # Now that we've iterated over everything, determine if we've yielded # any values, and if we have not and we have any prereleases stored up # then we will go ahead and yield the prereleases. if not yielded and found_prereleases: for version in found_prereleases: yield version class LegacySpecifier(_IndividualSpecifier): _regex_str = ( r""" (?P<operator>(==|!=|<=|>=|<|>)) \s* (?P<version> [^,;\s)]* # Since this is a "legacy" specifier, and the version # string can be just about anything, we match everything # except for whitespace, a semi-colon for marker support, # a closing paren since versions can be enclosed in # them, and a comma since it's a version separator. ) """ ) _regex = re.compile( r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) _operators = { "==": "equal", "!=": "not_equal", "<=": "less_than_equal", ">=": "greater_than_equal", "<": "less_than", ">": "greater_than", } def _coerce_version(self, version): if not isinstance(version, LegacyVersion): version = LegacyVersion(str(version)) return version def _compare_equal(self, prospective, spec): return prospective == self._coerce_version(spec) def _compare_not_equal(self, prospective, spec): return prospective != self._coerce_version(spec) def _compare_less_than_equal(self, prospective, spec): return prospective <= self._coerce_version(spec) def _compare_greater_than_equal(self, prospective, spec): return prospective >= self._coerce_version(spec) def _compare_less_than(self, prospective, spec): return prospective < self._coerce_version(spec) def _compare_greater_than(self, prospective, spec): return prospective > self._coerce_version(spec) def _require_version_compare(fn): @functools.wraps(fn) def wrapped(self, prospective, spec): if not isinstance(prospective, Version): return False return fn(self, prospective, spec) return wrapped class Specifier(_IndividualSpecifier): _regex_str = ( r""" (?P<operator>(~=|==|!=|<=|>=|<|>|===)) (?P<version> (?: # The identity operators allow for an escape hatch that will # do an exact string match of the version you wish to install. # This will not be parsed by PEP 440 and we cannot determine # any semantic meaning from it. This operator is discouraged # but included entirely as an escape hatch. (?<====) # Only match for the identity operator \s* [^\s]* # We just match everything, except for whitespace # since we are only testing for strict identity. ) | (?: # The (non)equality operators allow for wild card and local # versions to be specified so we have to define these two # operators separately to enable that. (?<===|!=) # Only match for equals and not equals \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)* # release (?: # pre release [-_\.]? (a|b|c|rc|alpha|beta|pre|preview) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? # You cannot use a wild card and a dev or local version # together so group them with a | and make them optional. (?: (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local | \.\* # Wild card syntax of .* )? ) | (?: # The compatible operator requires at least two digits in the # release segment. (?<=~=) # Only match for the compatible operator \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) (?: # pre release [-_\.]? (a|b|c|rc|alpha|beta|pre|preview) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release ) | (?: # All other operators only allow a sub set of what the # (non)equality operators do. Specifically they do not allow # local versions to be specified nor do they allow the prefix # matching wild cards. (?<!==|!=|~=) # We have special cases for these # operators so we want to make sure they # don't match here. \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)* # release (?: # pre release [-_\.]? (a|b|c|rc|alpha|beta|pre|preview) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release ) ) """ ) _regex = re.compile( r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) _operators = { "~=": "compatible", "==": "equal", "!=": "not_equal", "<=": "less_than_equal", ">=": "greater_than_equal", "<": "less_than", ">": "greater_than", "===": "arbitrary", } @_require_version_compare def _compare_compatible(self, prospective, spec): # Compatible releases have an equivalent combination of >= and ==. That # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to # implement this in terms of the other specifiers instead of # implementing it ourselves. The only thing we need to do is construct # the other specifiers. # We want everything but the last item in the version, but we want to # ignore post and dev releases and we want to treat the pre-release as # it's own separate segment. prefix = ".".join( list( itertools.takewhile( lambda x: (not x.startswith("post") and not x.startswith("dev")), _version_split(spec), ) )[:-1] ) # Add the prefix notation to the end of our string prefix += ".*" return (self._get_operator(">=")(prospective, spec) and self._get_operator("==")(prospective, prefix)) @_require_version_compare def _compare_equal(self, prospective, spec): # We need special logic to handle prefix matching if spec.endswith(".*"): # In the case of prefix matching we want to ignore local segment. prospective = Version(prospective.public) # Split the spec out by dots, and pretend that there is an implicit # dot in between a release segment and a pre-release segment. spec = _version_split(spec[:-2]) # Remove the trailing .* # Split the prospective version out by dots, and pretend that there # is an implicit dot in between a release segment and a pre-release # segment. prospective = _version_split(str(prospective)) # Shorten the prospective version to be the same length as the spec # so that we can determine if the specifier is a prefix of the # prospective version or not. prospective = prospective[:len(spec)] # Pad out our two sides with zeros so that they both equal the same # length. spec, prospective = _pad_version(spec, prospective) else: # Convert our spec string into a Version spec = Version(spec) # If the specifier does not have a local segment, then we want to # act as if the prospective version also does not have a local # segment. if not spec.local: prospective = Version(prospective.public) return prospective == spec @_require_version_compare def _compare_not_equal(self, prospective, spec): return not self._compare_equal(prospective, spec) @_require_version_compare def _compare_less_than_equal(self, prospective, spec): return prospective <= Version(spec) @_require_version_compare def _compare_greater_than_equal(self, prospective, spec): return prospective >= Version(spec) @_require_version_compare def _compare_less_than(self, prospective, spec): # Convert our spec to a Version instance, since we'll want to work with # it as a version. spec = Version(spec) # Check to see if the prospective version is less than the spec # version. If it's not we can short circuit and just return False now # instead of doing extra unneeded work. if not prospective < spec: return False # This special case is here so that, unless the specifier itself # includes is a pre-release version, that we do not accept pre-release # versions for the version mentioned in the specifier (e.g. <3.1 should # not match 3.1.dev0, but should match 3.0.dev0). if not spec.is_prerelease and prospective.is_prerelease: if Version(prospective.base_version) == Version(spec.base_version): return False # If we've gotten to here, it means that prospective version is both # less than the spec version *and* it's not a pre-release of the same # version in the spec. return True @_require_version_compare def _compare_greater_than(self, prospective, spec): # Convert our spec to a Version instance, since we'll want to work with # it as a version. spec = Version(spec) # Check to see if the prospective version is greater than the spec # version. If it's not we can short circuit and just return False now # instead of doing extra unneeded work. if not prospective > spec: return False # This special case is here so that, unless the specifier itself # includes is a post-release version, that we do not accept # post-release versions for the version mentioned in the specifier # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). if not spec.is_postrelease and prospective.is_postrelease: if Version(prospective.base_version) == Version(spec.base_version): return False # Ensure that we do not allow a local version of the version mentioned # in the specifier, which is techincally greater than, to match. if prospective.local is not None: if Version(prospective.base_version) == Version(spec.base_version): return False # If we've gotten to here, it means that prospective version is both # greater than the spec version *and* it's not a pre-release of the # same version in the spec. return True def _compare_arbitrary(self, prospective, spec): return str(prospective).lower() == str(spec).lower() @property def prereleases(self): # If there is an explicit prereleases set for this, then we'll just # blindly use that. if self._prereleases is not None: return self._prereleases # Look at all of our specifiers and determine if they are inclusive # operators, and if they are if they are including an explicit # prerelease. operator, version = self._spec if operator in ["==", ">=", "<=", "~=", "==="]: # The == specifier can include a trailing .*, if it does we # want to remove before parsing. if operator == "==" and version.endswith(".*"): version = version[:-2] # Parse the version, and if it is a pre-release than this # specifier allows pre-releases. if parse(version).is_prerelease: return True return False @prereleases.setter def prereleases(self, value): self._prereleases = value _prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") def _version_split(version): result = [] for item in version.split("."): match = _prefix_regex.search(item) if match: result.extend(match.groups()) else: result.append(item) return result def _pad_version(left, right): left_split, right_split = [], [] # Get the release segment of our versions left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) # Get the rest of our versions left_split.append(left[len(left_split[0]):]) right_split.append(right[len(right_split[0]):]) # Insert our padding left_split.insert( 1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])), ) right_split.insert( 1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])), ) return ( list(itertools.chain(*left_split)), list(itertools.chain(*right_split)), ) class SpecifierSet(BaseSpecifier): def __init__(self, specifiers="", prereleases=None): # Split on , to break each indidivual specifier into it's own item, and # strip each item to remove leading/trailing whitespace. specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] # Parsed each individual specifier, attempting first to make it a # Specifier and falling back to a LegacySpecifier. parsed = set() for specifier in specifiers: try: parsed.add(Specifier(specifier)) except InvalidSpecifier: parsed.add(LegacySpecifier(specifier)) # Turn our parsed specifiers into a frozen set and save them for later. self._specs = frozenset(parsed) # Store our prereleases value so we can use it later to determine if # we accept prereleases or not. self._prereleases = prereleases def __repr__(self): pre = ( ", prereleases={0!r}".format(self.prereleases) if self._prereleases is not None else "" ) return "<SpecifierSet({0!r}{1})>".format(str(self), pre) def __str__(self): return ",".join(sorted(str(s) for s in self._specs)) def __hash__(self): return hash(self._specs) def __and__(self, other): if isinstance(other, string_types): other = SpecifierSet(other) elif not isinstance(other, SpecifierSet): return NotImplemented specifier = SpecifierSet() specifier._specs = frozenset(self._specs | other._specs) if self._prereleases is None and other._prereleases is not None: specifier._prereleases = other._prereleases elif self._prereleases is not None and other._prereleases is None: specifier._prereleases = self._prereleases elif self._prereleases == other._prereleases: specifier._prereleases = self._prereleases else: raise ValueError( "Cannot combine SpecifierSets with True and False prerelease " "overrides." ) return specifier def __eq__(self, other): if isinstance(other, string_types): other = SpecifierSet(other) elif isinstance(other, _IndividualSpecifier): other = SpecifierSet(str(other)) elif not isinstance(other, SpecifierSet): return NotImplemented return self._specs == other._specs def __ne__(self, other): if isinstance(other, string_types): other = SpecifierSet(other) elif isinstance(other, _IndividualSpecifier): other = SpecifierSet(str(other)) elif not isinstance(other, SpecifierSet): return NotImplemented return self._specs != other._specs def __len__(self): return len(self._specs) def __iter__(self): return iter(self._specs) @property def prereleases(self): # If we have been given an explicit prerelease modifier, then we'll # pass that through here. if self._prereleases is not None: return self._prereleases # If we don't have any specifiers, and we don't have a forced value, # then we'll just return None since we don't know if this should have # pre-releases or not. if not self._specs: return None # Otherwise we'll see if any of the given specifiers accept # prereleases, if any of them do we'll return True, otherwise False. return any(s.prereleases for s in self._specs) @prereleases.setter def prereleases(self, value): self._prereleases = value def __contains__(self, item): return self.contains(item) def contains(self, item, prereleases=None): # Ensure that our item is a Version or LegacyVersion instance. if not isinstance(item, (LegacyVersion, Version)): item = parse(item) # Determine if we're forcing a prerelease or not, if we're not forcing # one for this particular filter call, then we'll use whatever the # SpecifierSet thinks for whether or not we should support prereleases. if prereleases is None: prereleases = self.prereleases # We can determine if we're going to allow pre-releases by looking to # see if any of the underlying items supports them. If none of them do # and this item is a pre-release then we do not allow it and we can # short circuit that here. # Note: This means that 1.0.dev1 would not be contained in something # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 if not prereleases and item.is_prerelease: return False # We simply dispatch to the underlying specs here to make sure that the # given version is contained within all of them. # Note: This use of all() here means that an empty set of specifiers # will always return True, this is an explicit design decision. return all( s.contains(item, prereleases=prereleases) for s in self._specs ) def filter(self, iterable, prereleases=None): # Determine if we're forcing a prerelease or not, if we're not forcing # one for this particular filter call, then we'll use whatever the # SpecifierSet thinks for whether or not we should support prereleases. if prereleases is None: prereleases = self.prereleases # If we have any specifiers, then we want to wrap our iterable in the # filter method for each one, this will act as a logical AND amongst # each specifier. if self._specs: for spec in self._specs: iterable = spec.filter(iterable, prereleases=bool(prereleases)) return iterable # If we do not have any specifiers, then we need to have a rough filter # which will filter out any pre-releases, unless there are no final # releases, and which will filter out LegacyVersion in general. else: filtered = [] found_prereleases = [] for item in iterable: # Ensure that we some kind of Version class for this item. if not isinstance(item, (LegacyVersion, Version)): parsed_version = parse(item) else: parsed_version = item # Filter out any item which is parsed as a LegacyVersion if isinstance(parsed_version, LegacyVersion): continue # Store any item which is a pre-release for later unless we've # already found a final version or we are accepting prereleases if parsed_version.is_prerelease and not prereleases: if not filtered: found_prereleases.append(item) else: filtered.append(item) # If we've found no items except for pre-releases, then we'll go # ahead and use the pre-releases if not filtered and found_prereleases and prereleases is None: return found_prereleases return filtered
apache-2.0
rmcgibbo/msmbuilder3
msmbuilder3/command/kcentersapp.py
1
5928
import os import sys import numpy as np import mdtraj as md import tables from IPython.utils.traitlets import Int, Enum, Instance, Bool, List from msmbuilder3.config.app import MSMBuilderApp from msmbuilder3.cluster import KCenters from msmbuilder3 import DataSet from .ticaapp import TICAApp from .vectorapp import VectorApp class KCentersApp(MSMBuilderApp): name = 'kcenters' path = 'msmbuilder3.command.kcentersapp.KCentersApp' short_description = '''K-Centers clustering''' long_description = '' n_clusters = Int(100, config=True, help='Number of clusters') seed = Int(-1, config=True, help='''The seed serves to initialize the first cluster. If -1, the first cluster will be randomly chosen from the dataset. Otherwise, `seed` should be an integer between zero and one minus the number of samples in the dataset.''') source = Enum(['tICA', 'vector', 'precomputed'], default_value='tICA', config=True, help='''KCenters takes as input a set of multivariate timeseries. Using `tICA`, these timeseries are computed on-the-fly by internally building an instance of the `msmb tICA` app, whose output is effectively piped (in unix parlance) into this app. To control the settings about how this tICA is done, you can pass options to the tICAApp using the --tICAApp.<setting>=option syntax. Using`vector`, the timeseries can be computed instead directly from your molecular dynamics trajectories, by internally building an instance of the `msmb vector` app. Alertnatively, using `precomputed`, you may pass in timeseries data that has been precalculated.''') mode = Enum(['fit', 'fit_predict', 'predict'], default_value='fit_predict', config=True, help='''Mode in which to operate this app. When mode==`fit`, the model will be fit on training data and then the model itself will be saved to disk, with the filename given by `output`. Using `transform`, you can load up a pre-trained kcenters model (the model will be loaded from `trained_path`, and use it to assign your dataset. Finally, using `fit_transform`, you can run both of these steps together, training the model AND using it to assign your dataset.''') classes = [VectorApp, TICAApp] input_provenance = None is_fit = Bool(False, config=False, help='Is the model currently fit?', ) breakpoints = List([0], config=False, help='''The index of the breakpoints between trajectories. This is necessary to reconstruct the trajectories in state-space, since they were concantenated together for clustering''') traj_filenames = List([''], config=False) kcenters = Instance(KCenters, config=False) def _kcenters_default(self): seed = 'random' if self.seed < 0 else seed return KCenters(n_clusters=self.n_clusters, seed=seed) ticaapp = Instance(TICAApp, config=False) def _ticaapp_default(self): ticaapp = TICAApp(config=self.config) ticaapp.fit() return ticaapp vectorapp = Instance(VectorApp, config=False) def _vectorapp_default(self): return VectorApp(config=self.config) def start(self): self.fit() if self.mode == 'fit': self.log.info('Saving fit KCenters model to `%s`' % self.output) with tables.open_file(self.output, 'w') as f: self.kcenters.to_pytables(f.root) elif self.mode in ['fit_predict', 'predict']: self.log.info('Writing DataSet: %s' % self.output) dataset = DataSet(self.output, mode='w', name='KCenters') if self.source == 'precomputed': dataset.provenance = self.input_provenance for i, (data, fn) in enumerate(self.yield_transform(with_filenames=True)): dataset[i] = data dataset.set_trajfn(i, fn) dataset.close() else: raise RuntimeError(self.mode) def fit(self): if self.is_fit: return if self.mode == 'transform': # DONT run the fit, just load a prefitted model from disk with tables.open_file(self.load_from) as f: self.kcenters = KCenters.from_pytables(f.root.KCenters) else: dataset = [] for data, fn in self._yield_input(with_filenames=True): self.breakpoints.append(sum(self.breakpoints) + len(data)) self.traj_filenames.append(fn) dataset.append(data) dataset = np.concatenate(dataset) self.log.info('** Starting fitting KCenters...') self.kcenters.fit(dataset) self.log.info('** Finished fitting KCenters') self.is_fit = True def yield_transform(self, with_filenames=False): self.fit() n_trajs = len(self.breakpoints)-1 for i in range(n_trajs): labels = self.kcenters.labels_[self.breakpoints[i]:self.breakpoints[i+1]] if with_filenames: yield labels, self.traj_filenames[i+1] else: yield labels def _yield_input(self, with_filenames=False): if self.source == 'tICA': for data in self.ticaapp.yield_transform(with_filenames): yield data elif self.source == 'vector': for data in self.vector.yield_transform(with_filenames): yield data elif self.source == 'precomputed': dataset = DataSet(self.input) for key in dataset.keys(): if with_filenames: yield dataset[key], dataset.get_trajfn(key) else: yield dataset[key] self.input_provenance = dataset.provenance dataset.close() else: raise RuntimeError(self.source)
gpl-3.0
hbutau/vimdotfiles
pymode/libs2/rope/refactor/importutils/importinfo.py
22
5759
class ImportStatement(object): """Represent an import in a module `readonly` attribute controls whether this import can be changed by import actions or not. """ def __init__(self, import_info, start_line, end_line, main_statement=None, blank_lines=0): self.start_line = start_line self.end_line = end_line self.readonly = False self.main_statement = main_statement self._import_info = None self.import_info = import_info self._is_changed = False self.new_start = None self.blank_lines = blank_lines def _get_import_info(self): return self._import_info def _set_import_info(self, new_import): if not self.readonly and \ new_import is not None and not new_import == self._import_info: self._is_changed = True self._import_info = new_import import_info = property(_get_import_info, _set_import_info) def get_import_statement(self): if self._is_changed or self.main_statement is None: return self.import_info.get_import_statement() else: return self.main_statement def empty_import(self): self.import_info = ImportInfo.get_empty_import() def move(self, lineno, blank_lines=0): self.new_start = lineno self.blank_lines = blank_lines def get_old_location(self): return self.start_line, self.end_line def get_new_start(self): return self.new_start def is_changed(self): return self._is_changed or (self.new_start is not None or self.new_start != self.start_line) def accept(self, visitor): return visitor.dispatch(self) class ImportInfo(object): def get_imported_primaries(self, context): pass def get_imported_names(self, context): return [primary.split('.')[0] for primary in self.get_imported_primaries(context)] def get_import_statement(self): pass def is_empty(self): pass def __hash__(self): return hash(self.get_import_statement()) def _are_name_and_alias_lists_equal(self, list1, list2): if len(list1) != len(list2): return False for pair1, pair2 in zip(list1, list2): if pair1 != pair2: return False return True def __eq__(self, obj): return isinstance(obj, self.__class__) and \ self.get_import_statement() == obj.get_import_statement() def __ne__(self, obj): return not self.__eq__(obj) @staticmethod def get_empty_import(): return EmptyImport() class NormalImport(ImportInfo): def __init__(self, names_and_aliases): self.names_and_aliases = names_and_aliases def get_imported_primaries(self, context): result = [] for name, alias in self.names_and_aliases: if alias: result.append(alias) else: result.append(name) return result def get_import_statement(self): result = 'import ' for name, alias in self.names_and_aliases: result += name if alias: result += ' as ' + alias result += ', ' return result[:-2] def is_empty(self): return len(self.names_and_aliases) == 0 class FromImport(ImportInfo): def __init__(self, module_name, level, names_and_aliases): self.module_name = module_name self.level = level self.names_and_aliases = names_and_aliases def get_imported_primaries(self, context): if self.names_and_aliases[0][0] == '*': module = self.get_imported_module(context) return [name for name in module if not name.startswith('_')] result = [] for name, alias in self.names_and_aliases: if alias: result.append(alias) else: result.append(name) return result def get_imported_resource(self, context): """Get the imported resource Returns `None` if module was not found. """ if self.level == 0: return context.project.find_module( self.module_name, folder=context.folder) else: return context.project.find_relative_module( self.module_name, context.folder, self.level) def get_imported_module(self, context): """Get the imported `PyModule` Raises `rope.base.exceptions.ModuleNotFoundError` if module could not be found. """ if self.level == 0: return context.project.get_module( self.module_name, context.folder) else: return context.project.get_relative_module( self.module_name, context.folder, self.level) def get_import_statement(self): result = 'from ' + '.' * self.level + self.module_name + ' import ' for name, alias in self.names_and_aliases: result += name if alias: result += ' as ' + alias result += ', ' return result[:-2] def is_empty(self): return len(self.names_and_aliases) == 0 def is_star_import(self): return len(self.names_and_aliases) > 0 and \ self.names_and_aliases[0][0] == '*' class EmptyImport(ImportInfo): names_and_aliases = [] def is_empty(self): return True def get_imported_primaries(self, context): return [] class ImportContext(object): def __init__(self, project, folder): self.project = project self.folder = folder
lgpl-3.0
zittix/cappuccino
Tools/Documentation/support/massage_text.py
24
1378
#!/usr/bin/env python # # $1 Generated documentation directory # The following transforms are performed: # - Strip useless "[implementation]" littering the docs # - Change "Static Public Member Functions" to "Class Methods" # - Change "Public Member Functions" to "Instance Methods" # - Change "Member Function Documentation" to "Method Documentation" # - Remove empty line left at the end of multi-parameter method prototypes import glob import os.path import re import sys transforms = [ re.compile(r"<code> \[implementation\]</code>"), "&emsp;", re.compile(r"Static Public Member Functions"), "Class Methods", re.compile(r"Public Member Functions"), "Instance Methods", re.compile(r"Protected Attributes"), "Instance Variables", re.compile(r"Member Function Documentation"), "Method Documentation", re.compile(r"Member Data Documentation"), "Instance Variable Documentation", re.compile(r"(AppKit|Foundation)\.doc"), r"\1", re.compile(r"\s*<tr>\n(\s*<td></td>\n){2}\s*(<td></td>){2}<td>&emsp;</td>\n\s*</tr>"), "" ] html = glob.glob(os.path.join(sys.argv[1], "*.html")) for count, filename in enumerate(html): f = open(filename, "r+") text = f.read() i = 0 while i < len(transforms): text = transforms[i].sub(transforms[i + 1], text) i += 2 f.seek(0) f.truncate() f.write(text) f.close()
lgpl-2.1
mollstam/UnrealPy
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Twisted-15.2.1/twisted/trial/unittest.py
3
1040
# -*- test-case-name: twisted.trial.test -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Things likely to be used by writers of unit tests. """ from __future__ import division, absolute_import # Define the public API from the two implementation modules from twisted.trial._synctest import ( FailTest, SkipTest, SynchronousTestCase, PyUnitResultAdapter, Todo, makeTodo) from twisted.trial._asynctest import TestCase from twisted.python.compat import _PY3 if not _PY3: from twisted.trial._asyncrunner import ( TestSuite, TestDecorator, decorate) # Further obscure the origins of these objects, to reduce surprise (and this is # what the values were before code got shuffled around between files, but was # otherwise unchanged). FailTest.__module__ = SkipTest.__module__ = __name__ __all__ = [ 'decorate', 'FailTest', 'makeTodo', 'PyUnitResultAdapter', 'SkipTest', 'SynchronousTestCase', 'TestCase', 'TestDecorator', 'TestSuite', 'Todo', ]
mit
ljschumacher/tierpsy-tracker
tierpsy/analysis/ske_create/segWormPython/cleanWorm.py
2
22911
# -*- coding: utf-8 -*- """ Created on Fri May 22 18:10:04 2015 @author: ajaver """ from .cython_files.cleanWorm_cython import extremaPeaksCircDist, removeSmallSegments, cleanContour from .cython_files.circCurvature import circCurvature import numpy as np def circCurvature_old(points, edgeLength): ''' TODO: This is the help from segworm, it must be changed, particularly copyright... %CIRCCURVATURE Compute the curvature for a clockwise, circularly-connected %vector of points. % % ANGLES = CIRCCURVATURE(POINTS, EDGELENGTH) % % ANGLES = CIRCCURVATURE(POINTS, EDGELENGTH, CHAINCODELENGTHS) % % Inputs: % points - the vector of clockwise, circularly-connected % points ((x,y) pairs). % edgeLength - the length of edges from the angle vertex. % chainCodeLengths - the chain-code length at each point; % if empty, the array indices are used instead % Output: % angles - the angles of curvature per point (0 = none to +-180 = % maximum curvature). The sign represents whether the angle % is convex (+) or concave (-). % % See also CURVATURE, CIRCCOMPUTECHAINCODELENGTHS % % % © Medical Research Council 2012 % You will not remove any copyright or other notices from the Software; % you must reproduce all copyright notices and other proprietary % notices on any copies of the Software.''' #%% edgeLength = int(edgeLength) #% Initialize the edges. p1 = np.roll(points, edgeLength, axis=0) p2 = np.roll(points, -edgeLength, axis=0) t2 = np.arctan2(points[:, 0] - p2[:, 0], points[:, 1] - p2[:, 1]) t1 = np.arctan2(p1[:, 0] - points[:, 0], p1[:, 1] - points[:, 1]) #% Use the difference in tangents to measure the angle. angles = t2 - t1 angles[angles > np.pi] -= 2 * np.pi angles[angles < -np.pi] += 2 * np.pi angles = angles * 180 / np.pi #%% return angles def circConv(a, b): ''' TODO: This is the help from segworm, it must be changed, particularly copyright... %CIRCCONV Convolve the circularly connected vector a with b. % % [C] = CIRCCONV(A, B) % % Inputs: % a - a circularly connected vector % b - the vector to convolve with a % % Outputs: % c - the convolution of the circularly connected vector a with b % % % © Medical Research Council 2012 % You will not remove any copyright or other notices from the Software; % you must reproduce all copyright notices and other proprietary % notices on any copies of the Software.''' assert a.size > b.size # maybe it would be better to replace with a warning #% Wrap the ends of A and convolve with B. wrapSize = int(np.ceil(b.size / 2)) wrapA = np.lib.pad(a, (wrapSize, wrapSize), 'wrap') wrapA = np.convolve(wrapA, b, 'same') #% Strip away the wrapped ends of A. # return wrapA[wrapSize+1:wrapA.size-wrapSize+1] return wrapA[wrapSize:-wrapSize] # segworm returns this, I think it might more correct # to use wrapA[wrapSize:-wrapSize], but there might be a reason so i leave # it like this def circSmooth(angles, blurLength): if blurLength > 1: blurWin = np.empty(int(blurLength)) blurWin.fill(1. / blurLength) return circConv(angles, blurWin) else: return angles def getPossibleConnections(worm_contour, maxI, minI, nearSize): #%% # TODO There is a bug in this function, where the size of the array conns # is too small to hold all "posible connections" #% Connect sharp convexities that are nearby on the contour and/or, #% nearby in distance and separated by a sharp concavity. #% Note: the worm's width is approximately the size of a muscle segment. #% Binarization may yield a split with diagonally-offset, forking #% convexities. Therefore, 2 segments is a good size to bound the #% distance between nearby, split convexities. #% Note 2: the connections are organized as the vector triplet: #% [startContourIndex endContourIndex isWrapping] #% Contour points between startContourIndex and endContourIndex are removed. conns = np.zeros((maxI.size, 4)) connsI = 0 # % the current index for connections for i in range(0, (maxI.size - 1)): #% Are there any sharp convexities nearby? for j in range(i + 1, maxI.size): R = np.sqrt( np.sum( (worm_contour[ maxI[i], :] - worm_contour[ maxI[j], :])**2)) if R <= nearSize: #% Which side is shorter? #% Side1 is continuous and goes from start (iI) to end (jI) #% in positive, index increments. #% Side2 wraps and always goes from start (iI) to end (jI) #% in negative, index increments. iI = maxI[i] jI = maxI[j] dSide1 = jI - iI # % The continuous side is shorter. # % The wrapping side is shorter so check it instead. dSide2 = iI + worm_contour.shape[0] - jI if dSide1 < dSide2: #% The continuous side is shorter. #% Is the convexity nearby on the contour. if dSide1 <= nearSize: conns[connsI, :] = np.array((iI, jI, 0, dSide1)) connsI += 1 #% Is there a concavity separating us on our shorter, #% continuous side? else: for mini in minI: if mini > iI and mini < jI: conns[connsI, :] = np.array( (iI, jI, 0, dSide1)) connsI += 1 break else: #% The wrapping side is shorter so check it instead. if dSide2 <= nearSize: conns[connsI, :] = np.array((jI, iI, 1, dSide2)) connsI += 1 #% Is there a concavity separating us on our shorter, #% continuous side? else: for mini in minI: if mini < iI or mini > jI: conns[connsI, :] = np.array( (jI, iI, 1, dSide2)) connsI += 1 break conns = conns[:connsI, :].copy() if conns.shape[0] > 1: #% Sort the connections by size if there is more than one conns = conns[conns[:, -1].argsort(), ] return conns def connectPeaks(conns, maxI): #%% #% Connect the peaks until there are at least 2 left. numPeaks = maxI.size if numPeaks > 2 and conns.shape[0] >= 2: peaks_index = np.zeros((numPeaks)) peaks_label = np.zeros((numPeaks)) peaks_index[0:2] = conns[0, 0:2] # % connect the peaks peaks_label[0:2] = 1 # % label the new, unique peak connection j = 2 # % the peaks index label = 2 # % the unique peak label index numPeaks = numPeaks - 1 # % the number of unique peaks i = 1 # % the conns index while numPeaks > 2 and i < conns.shape[0]: #% Are either of the peaks new? peak1_label = peaks_label[peaks_index[0:j] == conns[i, 0]] peak2_label = peaks_label[peaks_index[0:j] == conns[i, 1]] #% Both peaks are new. if peak1_label.size == 0: if peak2_label.size == 0: peaks_index[j:(j + 2)] = conns[i, 0:2] peaks_label[j:(j + 2)] = label j = j + 2 label = label + 1 #% The first peak is new. else: peaks_index[j] = conns[i, 0] peaks_label[j] = peak2_label[0] j += 1 #% We lost a peak to the connection. numPeaks -= 1 #% The second peak is new. elif peak2_label.size == 0: peaks_index[j] = conns[i, 1] peaks_label[j] = peak1_label[0] j = j + 1 #% We lost a peak to the connection. numPeaks -= 1 #% Relabel the second peak and its connections. elif peak1_label < peak2_label: peaks_label[peaks_label[0:j] == peak2_label] = peak1_label #% We lost a peak to the connection. numPeaks -= 1 #% Relabel the first peak and its connections. elif peak1_label > peak2_label: peaks_label[peaks_label[0:j] == peak1_label] = peak2_label #% We lost a peak to the connection. numPeaks -= 1 #% Advance. i += 1 conns = conns[:i + 1, :] #%% return conns def connectConnections(conns): if conns.shape[0] == 0: return conns #%% #% Connect the connections. prevConnsSize = conns.shape[0] newConnsI = 0 # % the current index for new connections #conns_ori = conns.copy() while newConnsI < prevConnsSize: # % the new connections (pre-allocate memory) newConns = np.zeros((2 * conns.shape[0], 3)) # print newConns.shape newConnsI = 0 for i in range(conns.shape[0]): connected = False # % have we made any connections? for j in range(i + 1, conns.shape[0]): #% Are both connections continuous? if not conns[i, 2]: if not conns[j, 2]: #% Does connection j intersect i? if conns[i, 1] - \ conns[i, 0] >= conns[j, 1] - conns[j, 0]: if ( conns[ i, 0] <= conns[ j, 0] and conns[ i, 1] >= conns[ j, 0]) or ( conns[ i, 0] <= conns[ j, 1] and conns[ i, 1] >= conns[ j, 1]): #% Take the union of connections i and j. newConns[newConnsI, 0] = min( conns[i, 0], conns[j, 0]) newConns[newConnsI, 1] = max( conns[i, 1], conns[j, 1]) newConns[newConnsI, 2] = 0 newConnsI += 1 connected = True #% Does connection i intersect j? else: if ( conns[ i, 0] >= conns[ j, 0] and conns[ i, 0] <= conns[ j, 1]) or ( conns[ i, 1] >= conns[ j, 0] and conns[ i, 1] <= conns[ j, 1]): #% Take the union of connections i and j. newConns[newConnsI, 0] = min( conns[i, 0], conns[j, 0]) newConns[newConnsI, 1] = max( conns[i, 1], conns[j, 1]) newConns[newConnsI, 2] = 0 newConnsI += 1 connected = True #% Connection j wraps. else: #% Add connection i to the beginning of j. justConnected = False # % did we just connect? if conns[i, 1] >= conns[j, 0]: newConns[ newConnsI, 0] = min( conns[ i, 0], conns[ j, 0]) newConns[newConnsI, 1] = conns[j, 1] newConns[newConnsI, 2] = 1 newConnsI = newConnsI + 1 connected = True justConnected = True #% Add connection i to the end of j. if conns[i, 0] <= conns[j, 1]: if justConnected: newConns[ newConnsI - 1, 1] = max( conns[ i, 1], conns[ j, 1]) else: newConns[newConnsI, 0] = conns[j, 0] newConns[newConnsI, 1] = max( conns[i, 1], conns[j, 1]) newConns[newConnsI, 2] = 1 newConnsI = newConnsI + 1 connected = True #% Are both connections wrapping? else: if conns[j, 2]: #% Take the union of connections i and j. newConns[newConnsI, 0] = min(conns[i, 0], conns[j, 0]) newConns[newConnsI, 1] = max(conns[i, 1], conns[j, 1]) newConns[newConnsI, 2] = 1 newConnsI = newConnsI + 1 connected = True #% Connection j is continuous. else: #% Add connection j to the beginning of i. justConnected = False # % did we just connect? if conns[i, 0] <= conns[j, 1]: newConns[ newConnsI, 0] = min( conns[ i, 0], conns[ j, 0]) newConns[newConnsI, 1] = conns[i, 1] newConns[newConnsI, 2] = 1 newConnsI = newConnsI + 1 connected = True justConnected = True #% Add connection j to the end of i. if conns[i, 1] >= conns[j, 0]: if justConnected: newConns[ newConnsI - 1, 1] = max( conns[ i, 1], conns[ j, 1]) else: newConns[newConnsI, 0] = conns[i, 0] newConns[newConnsI, 1] = max( conns[i, 1], conns[j, 1]) newConns[newConnsI, 2] = 1 newConnsI = newConnsI + 1 connected = True #% Add the connection. if not connected: if newConnsI < newConns.shape[0]: newConns[newConnsI, :] = conns[i, 0:3] else: np.vstack((newConns, conns[i, 0:3])) newConnsI = newConnsI + 1 #% Collapse any extra memory. newConns = newConns[newConnsI - 1:] #% Have we made any new connections? prevConnsSize = conns.shape[0] conns = newConns #%% return conns def connectSplits(conns, worm_contour, maxI, minI): #%% #% Connect the contour splits. for i in range(conns.shape[0]): #% Connect the continuous contour split. if not conns[i, 2]: minI = conns[i, 0] maxI = conns[i, 1] minP = worm_contour[minI, :] maxP = worm_contour[maxI, :] points = maxI - minI + 1 worm_contour[ minI:maxI + 1, 0] = np.round( np.linspace( minP[0], maxP[0], points)) worm_contour[ minI:maxI + 1, 1] = np.round( np.linspace( minP[1], maxP[1], points)) #% Connect the wrapping contour split. else: minI = conns[i, 1] maxI = conns[i, 0] minP = worm_contour[minI, :] maxP = worm_contour[maxI, :] points = minI + worm_contour.shape[0] - maxI + 1 interPoints = np.zeros((points, 2)) interPoints[:, 0] = np.linspace(maxP[0], minP[0], points) interPoints[:, 1] = np.linspace(maxP[1], minP[1], points) worm_contour[maxI:, :] = np.round(interPoints[0:-minI - 1, :]) worm_contour[:minI + 1, :] = np.round(interPoints[-minI - 1:, :]) #%% return worm_contour def cleanWorm(contour, cWormSegs): '''%CLEANWORM Clean up the worm contour by connecting any splits ends. % % CONTOUR = CLEANWORM(CONTOUR, WORMSEGSIZE) % % Note: the worm's contour is still rough, especially at any split ends. % Therefore, index lengths, as opposed to chain-code lengths, are % used as the distance metric over the worm's contour. % % Inputs: % contour - the clockwise, circularly-connected worm contour. % wormSegSize - the size (in contour points) of a worm segment. % Note: The worm's contour is roughly divided into 50 % segments of musculature (i.e., hinges that represent % degrees of freedom). % Warning: before cleaning, the length of the contour % can vary significantly: from 1/4 its correct size, if % the worm is coiled up with its head and tail touching % its body, 180 degrees apart on the coil; to 2 times % its correct size, if the head and tail are both split % by invaginations that reach 1/4 into its body. % Additionally, there are various permutations in % between these extremes. Therefore, we use carefully % chosen approximations that are fail-safe to within a % large margin. Moreover, we use several other tricks % to ensure we don't incorrectly heal false worm splits % (e.g., we check for a sharp concavity before joining % sharp convexities). But, we remain labile in extreme % cases (e.g., omega bends where the head and tail are % very proximal). % % Output: % contour - the cleaned up worm contour. % % % © Medical Research Council 2012 % You will not remove any copyright or other notices from the Software; % you must reproduce all copyright notices and other proprietary % notices on any copies of the Software.''' wormSegSize = round(contour.shape[0] / cWormSegs) angles = circCurvature(contour, wormSegSize) #% On a small scale, noise causes contour imperfections that shift an angle #% from its correct location. Therefore, blurring angles by averaging them #% with their neighbors can localize them better. blurLength = np.ceil(wormSegSize / 2.) mAngles = circSmooth(angles, blurLength) #% Is the worm contour split at the head and/or tail? #% Note: often the head and tail have light colored internals that, when #% binarized, split the head and/or tail into two or more pieces. #% Note 2: We don't use the blurred angles for concavities. Unfortunately, #% blurring can erase high-frequency minima. Moreover, we don't need #% any improvements in localizing these concavities. #%% maxP, maxI = extremaPeaksCircDist(1, mAngles, wormSegSize) minP, minI = extremaPeaksCircDist(-1, angles, wormSegSize) # if DEBUG: # plt.figure() # plt.plot(mAngles) # plt.plot(angles) # # plt.plot(minI, minP, 'og') # plt.plot(maxI, maxP, 'xr') maxI = maxI[maxP > 60] minI = minI[minP < -90] #% Do we have multiple sharp convexities (potential contour splits) that are #% nearby on the contour and/or, nearby in distance and separated by a sharp #% concavity? nearSize = 2 * wormSegSize # % a nearby distance if minI.size > 0 or (maxI.size > 0 and ( np.any(np.diff(maxI)) or maxI[0] + mAngles.size - maxI[-1])): conns = getPossibleConnections(contour, maxI, minI, nearSize) #% Clean up the contour. if conns.shape[0] > 1: conns = connectPeaks(conns, maxI) conns = connectConnections(conns) contour = connectSplits(conns, contour, maxI, minI) #% Clean up the contour. contour = cleanContour(contour) if contour.shape[0] > 2: contour, keep = removeSmallSegments(contour) return contour
mit