repo_name
stringlengths
5
100
path
stringlengths
4
299
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1.03M
license
stringclasses
15 values
hash
int64
-9,223,351,895,964,839,000
9,223,297,778B
line_mean
float64
3.17
100
line_max
int64
7
1k
alpha_frac
float64
0.25
0.98
autogenerated
bool
1 class
openstack/tacker
tacker/db/migration/__init__.py
3
3774
# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from alembic import op import contextlib import sqlalchemy as sa from sqlalchemy.engine import reflection def alter_enum(table, column, enum_type, nullable): bind = op.get_bind() engine = bind.engine if engine.name == 'postgresql': values = {'table': table, 'column': column, 'name': enum_type.name} op.execute("ALTER TYPE %(name)s RENAME TO old_%(name)s" % values) enum_type.create(bind, checkfirst=False) op.execute("ALTER TABLE %(table)s RENAME COLUMN %(column)s TO " "old_%(column)s" % values) op.add_column(table, sa.Column(column, enum_type, nullable=nullable)) op.execute("UPDATE %(table)s SET %(column)s = " "old_%(column)s::text::%(name)s" % values) op.execute("ALTER TABLE %(table)s DROP COLUMN old_%(column)s" % values) op.execute("DROP TYPE old_%(name)s" % values) else: op.alter_column(table, column, type_=enum_type, existing_nullable=nullable) def create_foreign_key_constraint(table_name, fk_constraints): for fk in fk_constraints: op.create_foreign_key( constraint_name=fk['name'], source_table=table_name, referent_table=fk['referred_table'], local_cols=fk['constrained_columns'], remote_cols=fk['referred_columns'], ondelete=fk['options'].get('ondelete') ) def drop_foreign_key_constraint(table_name, fk_constraints): for fk in fk_constraints: op.drop_constraint( constraint_name=fk['name'], table_name=table_name, type_='foreignkey' ) @contextlib.contextmanager def modify_foreign_keys_constraint(table_names): inspector = reflection.Inspector.from_engine(op.get_bind()) try: for table in table_names: fk_constraints = inspector.get_foreign_keys(table) drop_foreign_key_constraint(table, fk_constraints) yield finally: for table in table_names: fk_constraints = inspector.get_foreign_keys(table) create_foreign_key_constraint(table, fk_constraints) def modify_foreign_keys_constraint_with_col_change( table_name, old_local_col, new_local_col, existing_type, nullable=False): inspector = reflection.Inspector.from_engine(op.get_bind()) fk_constraints = inspector.get_foreign_keys(table_name) for fk in fk_constraints: if old_local_col in fk['constrained_columns']: drop_foreign_key_constraint(table_name, [fk]) op.alter_column(table_name, old_local_col, new_column_name=new_local_col, existing_type=existing_type, nullable=nullable) fk_constraints = inspector.get_foreign_keys(table_name) for fk in fk_constraints: for i in range(len(fk['constrained_columns'])): if old_local_col == fk['constrained_columns'][i]: fk['constrained_columns'][i] = new_local_col create_foreign_key_constraint(table_name, [fk]) break
apache-2.0
3,403,528,042,031,222,300
38.726316
79
0.629041
false
zrong/wpcmd
wpcmd/mde/ditaa.py
2
4539
""" A Python Markdown extension to convert plain-text diagrams to images. """ # The MIT License (MIT) # # Copyright (c) 2014 Sergey Astanin # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import ctypes import os import platform import subprocess import tempfile import zlib from markdown.preprocessors import Preprocessor from markdown.extensions import Extension if platform.python_version_tuple() >= ('3', '0', '0'): def b(string): return bytes(string, "UTF-8") else: def b(string): return string DITAA_CMD = os.environ.get("DITAA_CMD", "ditaa {infile} {outfile} --overwrite") def generate_image_path(plaintext): adler32 = ctypes.c_uint32(zlib.adler32(b(plaintext))).value imgbasename = "diagram-%x.png" % adler32 ditaa_image_dir = os.environ.get("DITAA_IMAGE_DIR", ".") imgpath = os.path.join(ditaa_image_dir, imgbasename) return imgpath def generate_diagram(plaintext): """Run ditaa with plaintext input. Return relative path to the generated image. """ imgpath = generate_image_path(plaintext) srcfd, srcfname = tempfile.mkstemp(prefix="ditaasrc", text=True) outfd, outfname = tempfile.mkstemp(prefix="ditaaout", text=True) with os.fdopen(srcfd, "w") as src: src.write(plaintext) try: cmd = DITAA_CMD.format(infile=srcfname, outfile=imgpath).split() with os.fdopen(outfd, "w") as out: retval = subprocess.check_call(cmd, stdout=out) return os.path.relpath(imgpath, os.getcwd()) except: return None finally: os.unlink(srcfname) os.unlink(outfname) class DitaaPreprocessor(Preprocessor): def run(self, lines): START_TAG = "```ditaa" END_TAG = "```" new_lines = [] ditaa_prefix = "" ditaa_lines = [] in_diagram = False for ln in lines: if in_diagram: # lines of a diagram if ln == ditaa_prefix + END_TAG: # strip line prefix if any (whitespace, bird marks) plen = len(ditaa_prefix) ditaa_lines = [dln[plen:] for dln in ditaa_lines] ditaa_code = "\n".join(ditaa_lines) filename = generate_diagram(ditaa_code) if filename: new_lines.append(ditaa_prefix + "![%s](%s)" % (filename, filename)) else: md_code = [ditaa_prefix + " " + dln for dln in ditaa_lines] new_lines.extend([""] + md_code + [""]) in_diagram = False ditaa_lines = [] else: ditaa_lines.append(ln) else: # normal lines start = ln.find(START_TAG) prefix = ln[:start] if start >= 0 else "" # code block may be nested within a list item or a blockquote if start >= 0 and ln.endswith(START_TAG) and not prefix.strip(" \t>"): in_diagram = True ditaa_prefix = prefix else: new_lines.append(ln) return new_lines class DitaaExtension(Extension): def extendMarkdown(self, md, md_globals): md.registerExtension(self) location = "<fenced_code" if ("fenced_code" in md.preprocessors) else "_begin" md.preprocessors.add("ditaa", DitaaPreprocessor(md), location) def makeExtension(configs=None): return DitaaExtension(configs=configs)
bsd-3-clause
-5,705,855,473,612,955,000
35.312
91
0.623926
false
c2corg/v6_api
c2corg_api/views/user_follow.py
1
4832
import logging from c2corg_api import DBSession from c2corg_api.models.feed import FollowedUser from c2corg_api.views import cors_policy, restricted_json_view from c2corg_api.views.document_listings import get_documents_for_ids from c2corg_api.views.document_schemas import user_profile_documents_config from c2corg_api.views.validation import validate_id, \ validate_preferred_lang_param, validate_body_user_id from colander import MappingSchema, SchemaNode, Integer, required from cornice.resource import resource from cornice.validators import colander_body_validator log = logging.getLogger(__name__) class FollowSchema(MappingSchema): user_id = SchemaNode(Integer(), missing=required) def get_follower_relation(followed_user_id, follower_user_id): return DBSession. \ query(FollowedUser). \ filter(FollowedUser.followed_user_id == followed_user_id). \ filter(FollowedUser.follower_user_id == follower_user_id). \ first() @resource(path='/users/follow', cors_policy=cors_policy) class UserFollowRest(object): def __init__(self, request): self.request = request @restricted_json_view( schema=FollowSchema(), validators=[colander_body_validator, validate_body_user_id]) def post(self): """ Follow the given user. Creates a follower relation, so that the authenticated user is following the given user. Request: `POST` `/users/follow` Request body: {'user_id': @user_id@} """ followed_user_id = self.request.validated['user_id'] follower_user_id = self.request.authenticated_userid follower_relation = get_follower_relation( followed_user_id, follower_user_id) if not follower_relation: DBSession.add(FollowedUser( followed_user_id=followed_user_id, follower_user_id=follower_user_id)) return {} @resource(path='/users/unfollow', cors_policy=cors_policy) class UserUnfollowRest(object): def __init__(self, request): self.request = request @restricted_json_view( schema=FollowSchema(), validators=[colander_body_validator, validate_body_user_id]) def post(self): """ Unfollow the given user. Request: `POST` `/users/unfollow` Request body: {'user_id': @user_id@} """ followed_user_id = self.request.validated['user_id'] follower_user_id = self.request.authenticated_userid follower_relation = get_follower_relation( followed_user_id, follower_user_id) if follower_relation: DBSession.delete(follower_relation) else: log.warning( 'tried to delete not existing follower relation ' '({0}, {1})'.format(followed_user_id, follower_user_id)) return {} @resource(path='/users/following-user/{id}', cors_policy=cors_policy) class UserFollowingUserRest(object): def __init__(self, request): self.request = request @restricted_json_view(validators=[validate_id]) def get(self): """ Check if the authenticated user follows the given user. Request: `GET` `users/following-user/{user_id}` Example response: {'is_following': true} """ followed_user_id = self.request.validated['id'] follower_user_id = self.request.authenticated_userid follower_relation = get_follower_relation( followed_user_id, follower_user_id) return { 'is_following': follower_relation is not None } @resource(path='/users/following', cors_policy=cors_policy) class UserFollowingRest(object): def __init__(self, request): self.request = request @restricted_json_view(validators=[validate_preferred_lang_param]) def get(self): """ Get the users that the authenticated user is following. Request: `GET` `/users/following` Example response: { 'following': [ { 'document_id': 123, ... } ] } """ follower_user_id = self.request.authenticated_userid followed_user_ids = DBSession. \ query(FollowedUser.followed_user_id). \ filter(FollowedUser.follower_user_id == follower_user_id). \ all() followed_user_ids = [user_id for (user_id, ) in followed_user_ids] followed_users = get_documents_for_ids( followed_user_ids, None, user_profile_documents_config). \ get('documents') return { 'following': followed_users }
agpl-3.0
-5,470,727,215,828,370,000
28.463415
75
0.612583
false
saurabh6790/omni-apps
patches/may_2013/p02_update_valuation_rate.py
30
1376
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import webnotes def execute(): from stock.stock_ledger import update_entries_after item_warehouse = [] # update valuation_rate in transaction doctypes = {"Purchase Receipt": "purchase_receipt_details", "Purchase Invoice": "entries"} for dt in doctypes: for d in webnotes.conn.sql("""select name from `tab%s` where modified >= '2013-05-09' and docstatus=1""" % dt): rec = webnotes.get_obj(dt, d[0]) rec.update_valuation_rate(doctypes[dt]) for item in rec.doclist.get({"parentfield": doctypes[dt]}): webnotes.conn.sql("""update `tab%s Item` set valuation_rate = %s where name = %s"""% (dt, '%s', '%s'), tuple([item.valuation_rate, item.name])) if dt == "Purchase Receipt": webnotes.conn.sql("""update `tabStock Ledger Entry` set incoming_rate = %s where voucher_detail_no = %s""", (item.valuation_rate, item.name)) if [item.item_code, item.warehouse] not in item_warehouse: item_warehouse.append([item.item_code, item.warehouse]) for d in item_warehouse: try: update_entries_after({"item_code": d[0], "warehouse": d[1], "posting_date": "2013-01-01", "posting_time": "00:05:00"}) webnotes.conn.commit() except: pass
agpl-3.0
102,612,489,715,879,600
39.5
91
0.670058
false
openstack/heat
heat/tests/autoscaling/test_heat_scaling_policy.py
1
8756
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from heat.common import exception from heat.common import template_format from heat.engine import node_data from heat.engine import resource from heat.engine import scheduler from heat.tests.autoscaling import inline_templates from heat.tests import common from heat.tests import utils as_template = inline_templates.as_heat_template as_params = inline_templates.as_params class TestAutoScalingPolicy(common.HeatTestCase): def create_scaling_policy(self, t, stack, resource_name): rsrc = stack[resource_name] self.assertIsNone(rsrc.validate()) scheduler.TaskRunner(rsrc.create)() self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state) return rsrc def test_validate_scaling_policy_ok(self): t = template_format.parse(as_template) t['resources']['my-policy']['properties'][ 'scaling_adjustment'] = 33 t['resources']['my-policy']['properties'][ 'adjustment_type'] = 'percent_change_in_capacity' t['resources']['my-policy']['properties'][ 'min_adjustment_step'] = 2 stack = utils.parse_stack(t) self.assertIsNone(stack.validate()) def test_validate_scaling_policy_error(self): t = template_format.parse(as_template) t['resources']['my-policy']['properties'][ 'scaling_adjustment'] = 1 t['resources']['my-policy']['properties'][ 'adjustment_type'] = 'change_in_capacity' t['resources']['my-policy']['properties'][ 'min_adjustment_step'] = 2 stack = utils.parse_stack(t) ex = self.assertRaises(exception.ResourcePropertyValueDependency, stack.validate) self.assertIn('min_adjustment_step property should only ' 'be specified for adjustment_type with ' 'value percent_change_in_capacity.', str(ex)) def test_scaling_policy_bad_group(self): t = template_format.parse(inline_templates.as_heat_template_bad_group) stack = utils.parse_stack(t) up_policy = self.create_scaling_policy(t, stack, 'my-policy') ex = self.assertRaises(exception.ResourceFailure, up_policy.signal) self.assertIn('Alarm my-policy could ' 'not find scaling group', str(ex)) def test_scaling_policy_adjust_no_action(self): t = template_format.parse(as_template) stack = utils.parse_stack(t, params=as_params) up_policy = self.create_scaling_policy(t, stack, 'my-policy') group = stack['my-group'] self.patchobject(group, 'adjust', side_effect=resource.NoActionRequired()) self.assertRaises(resource.NoActionRequired, up_policy.handle_signal) def test_scaling_policy_adjust_size_changed(self): t = template_format.parse(as_template) stack = utils.parse_stack(t, params=as_params) up_policy = self.create_scaling_policy(t, stack, 'my-policy') group = stack['my-group'] self.patchobject(group, 'resize') self.patchobject(group, '_lb_reload') mock_fin_scaling = self.patchobject(group, '_finished_scaling') with mock.patch.object(group, '_check_scaling_allowed') as mock_isa: self.assertIsNone(up_policy.handle_signal()) mock_isa.assert_called_once_with(60) mock_fin_scaling.assert_called_once_with(60, 'change_in_capacity : 1', size_changed=True) def test_scaling_policy_cooldown_toosoon(self): t = template_format.parse(as_template) stack = utils.parse_stack(t, params=as_params) pol = self.create_scaling_policy(t, stack, 'my-policy') group = stack['my-group'] test = {'current': 'alarm'} with mock.patch.object( group, '_check_scaling_allowed', side_effect=resource.NoActionRequired) as mock_cip: self.assertRaises(resource.NoActionRequired, pol.handle_signal, details=test) mock_cip.assert_called_once_with(60) def test_scaling_policy_cooldown_ok(self): t = template_format.parse(as_template) stack = utils.parse_stack(t, params=as_params) pol = self.create_scaling_policy(t, stack, 'my-policy') group = stack['my-group'] test = {'current': 'alarm'} self.patchobject(group, '_finished_scaling') self.patchobject(group, '_lb_reload') mock_resize = self.patchobject(group, 'resize') with mock.patch.object(group, '_check_scaling_allowed') as mock_isa: pol.handle_signal(details=test) mock_isa.assert_called_once_with(60) mock_resize.assert_called_once_with(1) def test_scaling_policy_refid(self): t = template_format.parse(as_template) stack = utils.parse_stack(t) rsrc = stack['my-policy'] rsrc.resource_id = 'xyz' self.assertEqual('xyz', rsrc.FnGetRefId()) def test_scaling_policy_refid_convg_cache_data(self): t = template_format.parse(as_template) cache_data = {'my-policy': node_data.NodeData.from_dict({ 'uuid': mock.ANY, 'id': mock.ANY, 'action': 'CREATE', 'status': 'COMPLETE', 'reference_id': 'convg_xyz' })} stack = utils.parse_stack(t, cache_data=cache_data) rsrc = stack.defn['my-policy'] self.assertEqual('convg_xyz', rsrc.FnGetRefId()) class ScalingPolicyAttrTest(common.HeatTestCase): def setUp(self): super(ScalingPolicyAttrTest, self).setUp() t = template_format.parse(as_template) self.stack = utils.parse_stack(t, params=as_params) self.stack_name = self.stack.name self.policy = self.stack['my-policy'] self.assertIsNone(self.policy.validate()) scheduler.TaskRunner(self.policy.create)() self.assertEqual((self.policy.CREATE, self.policy.COMPLETE), self.policy.state) def test_alarm_attribute(self): heat_plugin = self.stack.clients.client_plugin('heat') heat_plugin.get_heat_cfn_url = mock.Mock( return_value='http://server.test:8000/v1') alarm_url = self.policy.FnGetAtt('alarm_url') base = alarm_url.split('?')[0].split('%3A') self.assertEqual('http://server.test:8000/v1/signal/arn', base[0]) self.assertEqual('openstack', base[1]) self.assertEqual('heat', base[2]) self.assertEqual('test_tenant_id', base[4]) res = base[5].split('/') self.assertEqual('stacks', res[0]) self.assertEqual(self.stack_name, res[1]) self.assertEqual('resources', res[3]) self.assertEqual('my-policy', res[4]) args = sorted(alarm_url.split('?')[1].split('&')) self.assertEqual('AWSAccessKeyId', args[0].split('=')[0]) self.assertEqual('Signature', args[1].split('=')[0]) self.assertEqual('SignatureMethod', args[2].split('=')[0]) self.assertEqual('SignatureVersion', args[3].split('=')[0]) def test_signal_attribute(self): heat_plugin = self.stack.clients.client_plugin('heat') heat_plugin.get_heat_url = mock.Mock( return_value='http://server.test:8000/v1/') self.assertEqual( 'http://server.test:8000/v1/test_tenant_id/stacks/' '%s/%s/resources/my-policy/signal' % ( self.stack.name, self.stack.id), self.policy.FnGetAtt('signal_url')) def test_signal_attribute_with_prefix(self): heat_plugin = self.stack.clients.client_plugin('heat') heat_plugin.get_heat_url = mock.Mock( return_value='http://server.test/heat-api/v1/1234') self.assertEqual( 'http://server.test/heat-api/v1/test_tenant_id/stacks/' '%s/%s/resources/my-policy/signal' % ( self.stack.name, self.stack.id), self.policy.FnGetAtt('signal_url'))
apache-2.0
6,347,586,116,568,195,000
41.921569
78
0.613522
false
raymondgom/pmip6ns3.13new
src/network/bindings/modulegen__gcc_ILP32.py
32
498757
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.network', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## packetbb.h (module 'network'): ns3::PbbAddressLength [enumeration] module.add_enum('PbbAddressLength', ['IPV4', 'IPV6']) ## ethernet-header.h (module 'network'): ns3::ethernet_header_t [enumeration] module.add_enum('ethernet_header_t', ['LENGTH', 'VLAN', 'QINQ']) ## error-model.h (module 'network'): ns3::ErrorUnit [enumeration] module.add_enum('ErrorUnit', ['EU_BIT', 'EU_BYTE', 'EU_PKT']) ## address.h (module 'network'): ns3::Address [class] module.add_class('Address') ## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration] module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address']) ## application-container.h (module 'network'): ns3::ApplicationContainer [class] module.add_class('ApplicationContainer') ## trace-helper.h (module 'network'): ns3::AsciiTraceHelper [class] module.add_class('AsciiTraceHelper') ## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice [class] module.add_class('AsciiTraceHelperForDevice', allow_subclassing=True) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) ## buffer.h (module 'network'): ns3::Buffer [class] module.add_class('Buffer') ## buffer.h (module 'network'): ns3::Buffer::Iterator [class] module.add_class('Iterator', outer_class=root_module['ns3::Buffer']) ## packet.h (module 'network'): ns3::ByteTagIterator [class] module.add_class('ByteTagIterator') ## packet.h (module 'network'): ns3::ByteTagIterator::Item [class] module.add_class('Item', outer_class=root_module['ns3::ByteTagIterator']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList [class] module.add_class('ByteTagList') ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class] module.add_class('Iterator', outer_class=root_module['ns3::ByteTagList']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct] module.add_class('Item', outer_class=root_module['ns3::ByteTagList::Iterator']) ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## channel-list.h (module 'network'): ns3::ChannelList [class] module.add_class('ChannelList') ## data-rate.h (module 'network'): ns3::DataRate [class] module.add_class('DataRate') ## event-id.h (module 'core'): ns3::EventId [class] module.add_class('EventId', import_from_module='ns.core') ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class] module.add_class('Inet6SocketAddress') ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class] root_module['ns3::Inet6SocketAddress'].implicitly_converts_to(root_module['ns3::Address']) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class] module.add_class('InetSocketAddress') ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class] root_module['ns3::InetSocketAddress'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] module.add_class('Ipv4Address') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class] module.add_class('Ipv4Mask') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] module.add_class('Ipv6Address') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class] module.add_class('Ipv6Prefix') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] module.add_class('Mac48Address') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address']) ## mac64-address.h (module 'network'): ns3::Mac64Address [class] module.add_class('Mac64Address') ## mac64-address.h (module 'network'): ns3::Mac64Address [class] root_module['ns3::Mac64Address'].implicitly_converts_to(root_module['ns3::Address']) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class] module.add_class('NetDeviceContainer') ## node-container.h (module 'network'): ns3::NodeContainer [class] module.add_class('NodeContainer') ## node-list.h (module 'network'): ns3::NodeList [class] module.add_class('NodeList') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## object-factory.h (module 'core'): ns3::ObjectFactory [class] module.add_class('ObjectFactory', import_from_module='ns.core') ## packet-metadata.h (module 'network'): ns3::PacketMetadata [class] module.add_class('PacketMetadata') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct] module.add_class('Item', outer_class=root_module['ns3::PacketMetadata']) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration] module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item']) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class] module.add_class('ItemIterator', outer_class=root_module['ns3::PacketMetadata']) ## packet-socket-address.h (module 'network'): ns3::PacketSocketAddress [class] module.add_class('PacketSocketAddress') ## packet-socket-address.h (module 'network'): ns3::PacketSocketAddress [class] root_module['ns3::PacketSocketAddress'].implicitly_converts_to(root_module['ns3::Address']) ## packet-socket-helper.h (module 'network'): ns3::PacketSocketHelper [class] module.add_class('PacketSocketHelper') ## packet.h (module 'network'): ns3::PacketTagIterator [class] module.add_class('PacketTagIterator') ## packet.h (module 'network'): ns3::PacketTagIterator::Item [class] module.add_class('Item', outer_class=root_module['ns3::PacketTagIterator']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList [class] module.add_class('PacketTagList') ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct] module.add_class('TagData', outer_class=root_module['ns3::PacketTagList']) ## packetbb.h (module 'network'): ns3::PbbAddressTlvBlock [class] module.add_class('PbbAddressTlvBlock') ## packetbb.h (module 'network'): ns3::PbbTlvBlock [class] module.add_class('PbbTlvBlock') ## pcap-file.h (module 'network'): ns3::PcapFile [class] module.add_class('PcapFile') ## trace-helper.h (module 'network'): ns3::PcapHelper [class] module.add_class('PcapHelper') ## trace-helper.h (module 'network'): ns3::PcapHelper [enumeration] module.add_enum('', ['DLT_NULL', 'DLT_EN10MB', 'DLT_PPP', 'DLT_RAW', 'DLT_IEEE802_11', 'DLT_PRISM_HEADER', 'DLT_IEEE802_11_RADIO'], outer_class=root_module['ns3::PcapHelper']) ## trace-helper.h (module 'network'): ns3::PcapHelperForDevice [class] module.add_class('PcapHelperForDevice', allow_subclassing=True) ## random-variable.h (module 'core'): ns3::RandomVariable [class] module.add_class('RandomVariable', import_from_module='ns.core') ## random-variable.h (module 'core'): ns3::SeedManager [class] module.add_class('SeedManager', import_from_module='ns.core') ## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned int, int> [class] module.add_class('SequenceNumber32') ## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned short, short> [class] module.add_class('SequenceNumber16') ## random-variable.h (module 'core'): ns3::SequentialVariable [class] module.add_class('SequentialVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simulator.h (module 'core'): ns3::Simulator [class] module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core') ## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs [class] module.add_class('SystemWallClockMs', import_from_module='ns.core') ## tag.h (module 'network'): ns3::Tag [class] module.add_class('Tag', parent=root_module['ns3::ObjectBase']) ## tag-buffer.h (module 'network'): ns3::TagBuffer [class] module.add_class('TagBuffer') ## random-variable.h (module 'core'): ns3::TriangularVariable [class] module.add_class('TriangularVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## random-variable.h (module 'core'): ns3::UniformVariable [class] module.add_class('UniformVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## random-variable.h (module 'core'): ns3::WeibullVariable [class] module.add_class('WeibullVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## random-variable.h (module 'core'): ns3::ZetaVariable [class] module.add_class('ZetaVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## random-variable.h (module 'core'): ns3::ZipfVariable [class] module.add_class('ZipfVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t [class] module.add_class('int64x64_t', import_from_module='ns.core') ## chunk.h (module 'network'): ns3::Chunk [class] module.add_class('Chunk', parent=root_module['ns3::ObjectBase']) ## random-variable.h (module 'core'): ns3::ConstantVariable [class] module.add_class('ConstantVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## random-variable.h (module 'core'): ns3::DeterministicVariable [class] module.add_class('DeterministicVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## random-variable.h (module 'core'): ns3::EmpiricalVariable [class] module.add_class('EmpiricalVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## random-variable.h (module 'core'): ns3::ErlangVariable [class] module.add_class('ErlangVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## random-variable.h (module 'core'): ns3::ExponentialVariable [class] module.add_class('ExponentialVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## flow-id-tag.h (module 'network'): ns3::FlowIdTag [class] module.add_class('FlowIdTag', parent=root_module['ns3::Tag']) ## random-variable.h (module 'core'): ns3::GammaVariable [class] module.add_class('GammaVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## header.h (module 'network'): ns3::Header [class] module.add_class('Header', parent=root_module['ns3::Chunk']) ## random-variable.h (module 'core'): ns3::IntEmpiricalVariable [class] module.add_class('IntEmpiricalVariable', import_from_module='ns.core', parent=root_module['ns3::EmpiricalVariable']) ## llc-snap-header.h (module 'network'): ns3::LlcSnapHeader [class] module.add_class('LlcSnapHeader', parent=root_module['ns3::Header']) ## random-variable.h (module 'core'): ns3::LogNormalVariable [class] module.add_class('LogNormalVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## random-variable.h (module 'core'): ns3::NormalVariable [class] module.add_class('NormalVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## packet-burst.h (module 'network'): ns3::PacketBurst [class] module.add_class('PacketBurst', parent=root_module['ns3::Object']) ## random-variable.h (module 'core'): ns3::ParetoVariable [class] module.add_class('ParetoVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper [class] module.add_class('PcapFileWrapper', parent=root_module['ns3::Object']) ## queue.h (module 'network'): ns3::Queue [class] module.add_class('Queue', parent=root_module['ns3::Object']) ## radiotap-header.h (module 'network'): ns3::RadiotapHeader [class] module.add_class('RadiotapHeader', parent=root_module['ns3::Header']) ## radiotap-header.h (module 'network'): ns3::RadiotapHeader [enumeration] module.add_enum('', ['FRAME_FLAG_NONE', 'FRAME_FLAG_CFP', 'FRAME_FLAG_SHORT_PREAMBLE', 'FRAME_FLAG_WEP', 'FRAME_FLAG_FRAGMENTED', 'FRAME_FLAG_FCS_INCLUDED', 'FRAME_FLAG_DATA_PADDING', 'FRAME_FLAG_BAD_FCS', 'FRAME_FLAG_SHORT_GUARD'], outer_class=root_module['ns3::RadiotapHeader']) ## radiotap-header.h (module 'network'): ns3::RadiotapHeader [enumeration] module.add_enum('', ['CHANNEL_FLAG_NONE', 'CHANNEL_FLAG_TURBO', 'CHANNEL_FLAG_CCK', 'CHANNEL_FLAG_OFDM', 'CHANNEL_FLAG_SPECTRUM_2GHZ', 'CHANNEL_FLAG_SPECTRUM_5GHZ', 'CHANNEL_FLAG_PASSIVE', 'CHANNEL_FLAG_DYNAMIC', 'CHANNEL_FLAG_GFSK'], outer_class=root_module['ns3::RadiotapHeader']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter<ns3::PbbAddressBlock> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::PbbAddressBlock', 'ns3::empty', 'ns3::DefaultDeleter<ns3::PbbAddressBlock>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbMessage, ns3::empty, ns3::DefaultDeleter<ns3::PbbMessage> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::PbbMessage', 'ns3::empty', 'ns3::DefaultDeleter<ns3::PbbMessage>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbPacket, ns3::Header, ns3::DefaultDeleter<ns3::PbbPacket> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::PbbPacket', 'ns3::Header', 'ns3::DefaultDeleter<ns3::PbbPacket>'], parent=root_module['ns3::Header'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter<ns3::PbbTlv> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::PbbTlv', 'ns3::empty', 'ns3::DefaultDeleter<ns3::PbbTlv>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## socket.h (module 'network'): ns3::Socket [class] module.add_class('Socket', parent=root_module['ns3::Object']) ## socket.h (module 'network'): ns3::Socket::SocketErrno [enumeration] module.add_enum('SocketErrno', ['ERROR_NOTERROR', 'ERROR_ISCONN', 'ERROR_NOTCONN', 'ERROR_MSGSIZE', 'ERROR_AGAIN', 'ERROR_SHUTDOWN', 'ERROR_OPNOTSUPP', 'ERROR_AFNOSUPPORT', 'ERROR_INVAL', 'ERROR_BADF', 'ERROR_NOROUTETOHOST', 'ERROR_NODEV', 'ERROR_ADDRNOTAVAIL', 'ERROR_ADDRINUSE', 'SOCKET_ERRNO_LAST'], outer_class=root_module['ns3::Socket']) ## socket.h (module 'network'): ns3::Socket::SocketType [enumeration] module.add_enum('SocketType', ['NS3_SOCK_STREAM', 'NS3_SOCK_SEQPACKET', 'NS3_SOCK_DGRAM', 'NS3_SOCK_RAW'], outer_class=root_module['ns3::Socket']) ## socket.h (module 'network'): ns3::SocketAddressTag [class] module.add_class('SocketAddressTag', parent=root_module['ns3::Tag']) ## socket-factory.h (module 'network'): ns3::SocketFactory [class] module.add_class('SocketFactory', parent=root_module['ns3::Object']) ## socket.h (module 'network'): ns3::SocketIpTtlTag [class] module.add_class('SocketIpTtlTag', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag [class] module.add_class('SocketSetDontFragmentTag', parent=root_module['ns3::Tag']) ## nstime.h (module 'core'): ns3::Time [class] module.add_class('Time', import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time::Unit [enumeration] module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time [class] root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t']) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## trailer.h (module 'network'): ns3::Trailer [class] module.add_class('Trailer', parent=root_module['ns3::Chunk']) ## application.h (module 'network'): ns3::Application [class] module.add_class('Application', parent=root_module['ns3::Object']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## channel.h (module 'network'): ns3::Channel [class] module.add_class('Channel', parent=root_module['ns3::Object']) ## data-rate.h (module 'network'): ns3::DataRateChecker [class] module.add_class('DataRateChecker', parent=root_module['ns3::AttributeChecker']) ## data-rate.h (module 'network'): ns3::DataRateValue [class] module.add_class('DataRateValue', parent=root_module['ns3::AttributeValue']) ## drop-tail-queue.h (module 'network'): ns3::DropTailQueue [class] module.add_class('DropTailQueue', parent=root_module['ns3::Queue']) ## drop-tail-queue.h (module 'network'): ns3::DropTailQueue::Mode [enumeration] module.add_enum('Mode', ['ILLEGAL', 'PACKETS', 'BYTES'], outer_class=root_module['ns3::DropTailQueue']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## error-model.h (module 'network'): ns3::ErrorModel [class] module.add_class('ErrorModel', parent=root_module['ns3::Object']) ## ethernet-header.h (module 'network'): ns3::EthernetHeader [class] module.add_class('EthernetHeader', parent=root_module['ns3::Header']) ## ethernet-trailer.h (module 'network'): ns3::EthernetTrailer [class] module.add_class('EthernetTrailer', parent=root_module['ns3::Trailer']) ## event-impl.h (module 'core'): ns3::EventImpl [class] module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class] module.add_class('Ipv4AddressChecker', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class] module.add_class('Ipv4AddressValue', parent=root_module['ns3::AttributeValue']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class] module.add_class('Ipv4MaskChecker', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class] module.add_class('Ipv4MaskValue', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class] module.add_class('Ipv6AddressChecker', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class] module.add_class('Ipv6AddressValue', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class] module.add_class('Ipv6PrefixChecker', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class] module.add_class('Ipv6PrefixValue', parent=root_module['ns3::AttributeValue']) ## error-model.h (module 'network'): ns3::ListErrorModel [class] module.add_class('ListErrorModel', parent=root_module['ns3::ErrorModel']) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class] module.add_class('Mac48AddressChecker', parent=root_module['ns3::AttributeChecker']) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class] module.add_class('Mac48AddressValue', parent=root_module['ns3::AttributeValue']) ## net-device.h (module 'network'): ns3::NetDevice [class] module.add_class('NetDevice', parent=root_module['ns3::Object']) ## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration] module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice']) ## nix-vector.h (module 'network'): ns3::NixVector [class] module.add_class('NixVector', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) ## node.h (module 'network'): ns3::Node [class] module.add_class('Node', parent=root_module['ns3::Object']) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class] module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class] module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class] module.add_class('OutputStreamWrapper', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) ## packet.h (module 'network'): ns3::Packet [class] module.add_class('Packet', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) ## packet-socket.h (module 'network'): ns3::PacketSocket [class] module.add_class('PacketSocket', parent=root_module['ns3::Socket']) ## packet-socket-factory.h (module 'network'): ns3::PacketSocketFactory [class] module.add_class('PacketSocketFactory', parent=root_module['ns3::SocketFactory']) ## packetbb.h (module 'network'): ns3::PbbAddressBlock [class] module.add_class('PbbAddressBlock', parent=root_module['ns3::SimpleRefCount< ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter<ns3::PbbAddressBlock> >']) ## packetbb.h (module 'network'): ns3::PbbAddressBlockIpv4 [class] module.add_class('PbbAddressBlockIpv4', parent=root_module['ns3::PbbAddressBlock']) ## packetbb.h (module 'network'): ns3::PbbAddressBlockIpv6 [class] module.add_class('PbbAddressBlockIpv6', parent=root_module['ns3::PbbAddressBlock']) ## packetbb.h (module 'network'): ns3::PbbMessage [class] module.add_class('PbbMessage', parent=root_module['ns3::SimpleRefCount< ns3::PbbMessage, ns3::empty, ns3::DefaultDeleter<ns3::PbbMessage> >']) ## packetbb.h (module 'network'): ns3::PbbMessageIpv4 [class] module.add_class('PbbMessageIpv4', parent=root_module['ns3::PbbMessage']) ## packetbb.h (module 'network'): ns3::PbbMessageIpv6 [class] module.add_class('PbbMessageIpv6', parent=root_module['ns3::PbbMessage']) ## packetbb.h (module 'network'): ns3::PbbPacket [class] module.add_class('PbbPacket', parent=root_module['ns3::SimpleRefCount< ns3::PbbPacket, ns3::Header, ns3::DefaultDeleter<ns3::PbbPacket> >']) ## packetbb.h (module 'network'): ns3::PbbTlv [class] module.add_class('PbbTlv', parent=root_module['ns3::SimpleRefCount< ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter<ns3::PbbTlv> >']) ## random-variable.h (module 'core'): ns3::RandomVariableChecker [class] module.add_class('RandomVariableChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## random-variable.h (module 'core'): ns3::RandomVariableValue [class] module.add_class('RandomVariableValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## error-model.h (module 'network'): ns3::RateErrorModel [class] module.add_class('RateErrorModel', parent=root_module['ns3::ErrorModel']) ## error-model.h (module 'network'): ns3::ReceiveListErrorModel [class] module.add_class('ReceiveListErrorModel', parent=root_module['ns3::ErrorModel']) ## simple-channel.h (module 'network'): ns3::SimpleChannel [class] module.add_class('SimpleChannel', parent=root_module['ns3::Channel']) ## simple-net-device.h (module 'network'): ns3::SimpleNetDevice [class] module.add_class('SimpleNetDevice', parent=root_module['ns3::NetDevice']) ## nstime.h (module 'core'): ns3::TimeChecker [class] module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## nstime.h (module 'core'): ns3::TimeValue [class] module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## address.h (module 'network'): ns3::AddressChecker [class] module.add_class('AddressChecker', parent=root_module['ns3::AttributeChecker']) ## address.h (module 'network'): ns3::AddressValue [class] module.add_class('AddressValue', parent=root_module['ns3::AttributeValue']) ## packetbb.h (module 'network'): ns3::PbbAddressTlv [class] module.add_class('PbbAddressTlv', parent=root_module['ns3::PbbTlv']) module.add_container('std::list< ns3::Ptr< ns3::Packet > >', 'ns3::Ptr< ns3::Packet >', container_type='list') module.add_container('std::list< unsigned int >', 'unsigned int', container_type='list') typehandlers.add_type_alias('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ns3::GenericPhyRxEndOkCallback') typehandlers.add_type_alias('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', 'ns3::GenericPhyRxEndOkCallback*') typehandlers.add_type_alias('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', 'ns3::GenericPhyRxEndOkCallback&') typehandlers.add_type_alias('ns3::SequenceNumber< unsigned short, short >', 'ns3::SequenceNumber16') typehandlers.add_type_alias('ns3::SequenceNumber< unsigned short, short >*', 'ns3::SequenceNumber16*') typehandlers.add_type_alias('ns3::SequenceNumber< unsigned short, short >&', 'ns3::SequenceNumber16&') typehandlers.add_type_alias('ns3::SequenceNumber< unsigned int, int >', 'ns3::SequenceNumber32') typehandlers.add_type_alias('ns3::SequenceNumber< unsigned int, int >*', 'ns3::SequenceNumber32*') typehandlers.add_type_alias('ns3::SequenceNumber< unsigned int, int >&', 'ns3::SequenceNumber32&') typehandlers.add_type_alias('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ns3::GenericPhyRxStartCallback') typehandlers.add_type_alias('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', 'ns3::GenericPhyRxStartCallback*') typehandlers.add_type_alias('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', 'ns3::GenericPhyRxStartCallback&') typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ns3::GenericPhyTxStartCallback') typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', 'ns3::GenericPhyTxStartCallback*') typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', 'ns3::GenericPhyTxStartCallback&') typehandlers.add_type_alias('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ns3::GenericPhyRxEndErrorCallback') typehandlers.add_type_alias('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', 'ns3::GenericPhyRxEndErrorCallback*') typehandlers.add_type_alias('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', 'ns3::GenericPhyRxEndErrorCallback&') typehandlers.add_type_alias('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ns3::GenericPhyTxEndCallback') typehandlers.add_type_alias('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', 'ns3::GenericPhyTxEndCallback*') typehandlers.add_type_alias('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', 'ns3::GenericPhyTxEndCallback&') ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) ## Register a nested module for the namespace addressUtils nested_module = module.add_cpp_namespace('addressUtils') register_types_ns3_addressUtils(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_types_ns3_addressUtils(module): root_module = module.get_root() def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3ApplicationContainer_methods(root_module, root_module['ns3::ApplicationContainer']) register_Ns3AsciiTraceHelper_methods(root_module, root_module['ns3::AsciiTraceHelper']) register_Ns3AsciiTraceHelperForDevice_methods(root_module, root_module['ns3::AsciiTraceHelperForDevice']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer']) register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator']) register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator']) register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item']) register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList']) register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator']) register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3ChannelList_methods(root_module, root_module['ns3::ChannelList']) register_Ns3DataRate_methods(root_module, root_module['ns3::DataRate']) register_Ns3EventId_methods(root_module, root_module['ns3::EventId']) register_Ns3Inet6SocketAddress_methods(root_module, root_module['ns3::Inet6SocketAddress']) register_Ns3InetSocketAddress_methods(root_module, root_module['ns3::InetSocketAddress']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address']) register_Ns3Mac64Address_methods(root_module, root_module['ns3::Mac64Address']) register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer']) register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer']) register_Ns3NodeList_methods(root_module, root_module['ns3::NodeList']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata']) register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item']) register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator']) register_Ns3PacketSocketAddress_methods(root_module, root_module['ns3::PacketSocketAddress']) register_Ns3PacketSocketHelper_methods(root_module, root_module['ns3::PacketSocketHelper']) register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator']) register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item']) register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList']) register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData']) register_Ns3PbbAddressTlvBlock_methods(root_module, root_module['ns3::PbbAddressTlvBlock']) register_Ns3PbbTlvBlock_methods(root_module, root_module['ns3::PbbTlvBlock']) register_Ns3PcapFile_methods(root_module, root_module['ns3::PcapFile']) register_Ns3PcapHelper_methods(root_module, root_module['ns3::PcapHelper']) register_Ns3PcapHelperForDevice_methods(root_module, root_module['ns3::PcapHelperForDevice']) register_Ns3RandomVariable_methods(root_module, root_module['ns3::RandomVariable']) register_Ns3SeedManager_methods(root_module, root_module['ns3::SeedManager']) register_Ns3SequenceNumber32_methods(root_module, root_module['ns3::SequenceNumber32']) register_Ns3SequenceNumber16_methods(root_module, root_module['ns3::SequenceNumber16']) register_Ns3SequentialVariable_methods(root_module, root_module['ns3::SequentialVariable']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator']) register_Ns3SystemWallClockMs_methods(root_module, root_module['ns3::SystemWallClockMs']) register_Ns3Tag_methods(root_module, root_module['ns3::Tag']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3TriangularVariable_methods(root_module, root_module['ns3::TriangularVariable']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3UniformVariable_methods(root_module, root_module['ns3::UniformVariable']) register_Ns3WeibullVariable_methods(root_module, root_module['ns3::WeibullVariable']) register_Ns3ZetaVariable_methods(root_module, root_module['ns3::ZetaVariable']) register_Ns3ZipfVariable_methods(root_module, root_module['ns3::ZipfVariable']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk']) register_Ns3ConstantVariable_methods(root_module, root_module['ns3::ConstantVariable']) register_Ns3DeterministicVariable_methods(root_module, root_module['ns3::DeterministicVariable']) register_Ns3EmpiricalVariable_methods(root_module, root_module['ns3::EmpiricalVariable']) register_Ns3ErlangVariable_methods(root_module, root_module['ns3::ErlangVariable']) register_Ns3ExponentialVariable_methods(root_module, root_module['ns3::ExponentialVariable']) register_Ns3FlowIdTag_methods(root_module, root_module['ns3::FlowIdTag']) register_Ns3GammaVariable_methods(root_module, root_module['ns3::GammaVariable']) register_Ns3Header_methods(root_module, root_module['ns3::Header']) register_Ns3IntEmpiricalVariable_methods(root_module, root_module['ns3::IntEmpiricalVariable']) register_Ns3LlcSnapHeader_methods(root_module, root_module['ns3::LlcSnapHeader']) register_Ns3LogNormalVariable_methods(root_module, root_module['ns3::LogNormalVariable']) register_Ns3NormalVariable_methods(root_module, root_module['ns3::NormalVariable']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3PacketBurst_methods(root_module, root_module['ns3::PacketBurst']) register_Ns3ParetoVariable_methods(root_module, root_module['ns3::ParetoVariable']) register_Ns3PcapFileWrapper_methods(root_module, root_module['ns3::PcapFileWrapper']) register_Ns3Queue_methods(root_module, root_module['ns3::Queue']) register_Ns3RadiotapHeader_methods(root_module, root_module['ns3::RadiotapHeader']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) register_Ns3SimpleRefCount__Ns3PbbAddressBlock_Ns3Empty_Ns3DefaultDeleter__lt__ns3PbbAddressBlock__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter<ns3::PbbAddressBlock> >']) register_Ns3SimpleRefCount__Ns3PbbMessage_Ns3Empty_Ns3DefaultDeleter__lt__ns3PbbMessage__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::PbbMessage, ns3::empty, ns3::DefaultDeleter<ns3::PbbMessage> >']) register_Ns3SimpleRefCount__Ns3PbbPacket_Ns3Header_Ns3DefaultDeleter__lt__ns3PbbPacket__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::PbbPacket, ns3::Header, ns3::DefaultDeleter<ns3::PbbPacket> >']) register_Ns3SimpleRefCount__Ns3PbbTlv_Ns3Empty_Ns3DefaultDeleter__lt__ns3PbbTlv__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter<ns3::PbbTlv> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3Socket_methods(root_module, root_module['ns3::Socket']) register_Ns3SocketAddressTag_methods(root_module, root_module['ns3::SocketAddressTag']) register_Ns3SocketFactory_methods(root_module, root_module['ns3::SocketFactory']) register_Ns3SocketIpTtlTag_methods(root_module, root_module['ns3::SocketIpTtlTag']) register_Ns3SocketSetDontFragmentTag_methods(root_module, root_module['ns3::SocketSetDontFragmentTag']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer']) register_Ns3Application_methods(root_module, root_module['ns3::Application']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3Channel_methods(root_module, root_module['ns3::Channel']) register_Ns3DataRateChecker_methods(root_module, root_module['ns3::DataRateChecker']) register_Ns3DataRateValue_methods(root_module, root_module['ns3::DataRateValue']) register_Ns3DropTailQueue_methods(root_module, root_module['ns3::DropTailQueue']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3ErrorModel_methods(root_module, root_module['ns3::ErrorModel']) register_Ns3EthernetHeader_methods(root_module, root_module['ns3::EthernetHeader']) register_Ns3EthernetTrailer_methods(root_module, root_module['ns3::EthernetTrailer']) register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3ListErrorModel_methods(root_module, root_module['ns3::ListErrorModel']) register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker']) register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper']) register_Ns3Packet_methods(root_module, root_module['ns3::Packet']) register_Ns3PacketSocket_methods(root_module, root_module['ns3::PacketSocket']) register_Ns3PacketSocketFactory_methods(root_module, root_module['ns3::PacketSocketFactory']) register_Ns3PbbAddressBlock_methods(root_module, root_module['ns3::PbbAddressBlock']) register_Ns3PbbAddressBlockIpv4_methods(root_module, root_module['ns3::PbbAddressBlockIpv4']) register_Ns3PbbAddressBlockIpv6_methods(root_module, root_module['ns3::PbbAddressBlockIpv6']) register_Ns3PbbMessage_methods(root_module, root_module['ns3::PbbMessage']) register_Ns3PbbMessageIpv4_methods(root_module, root_module['ns3::PbbMessageIpv4']) register_Ns3PbbMessageIpv6_methods(root_module, root_module['ns3::PbbMessageIpv6']) register_Ns3PbbPacket_methods(root_module, root_module['ns3::PbbPacket']) register_Ns3PbbTlv_methods(root_module, root_module['ns3::PbbTlv']) register_Ns3RandomVariableChecker_methods(root_module, root_module['ns3::RandomVariableChecker']) register_Ns3RandomVariableValue_methods(root_module, root_module['ns3::RandomVariableValue']) register_Ns3RateErrorModel_methods(root_module, root_module['ns3::RateErrorModel']) register_Ns3ReceiveListErrorModel_methods(root_module, root_module['ns3::ReceiveListErrorModel']) register_Ns3SimpleChannel_methods(root_module, root_module['ns3::SimpleChannel']) register_Ns3SimpleNetDevice_methods(root_module, root_module['ns3::SimpleNetDevice']) register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3PbbAddressTlv_methods(root_module, root_module['ns3::PbbAddressTlv']) return def register_Ns3Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## address.h (module 'network'): ns3::Address::Address() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor] cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor] cls.add_constructor([param('ns3::Address const &', 'address')]) ## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function] cls.add_method('CheckCompatible', 'bool', [param('uint8_t', 'type'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyAllFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function] cls.add_method('CopyAllTo', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'uint32_t', [param('uint8_t *', 'buffer')], is_const=True) ## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'buffer')]) ## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function] cls.add_method('GetLength', 'uint8_t', [], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function] cls.add_method('IsInvalid', 'bool', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function] cls.add_method('IsMatchingType', 'bool', [param('uint8_t', 'type')], is_const=True) ## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function] cls.add_method('Register', 'uint8_t', [], is_static=True) ## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'buffer')], is_const=True) return def register_Ns3ApplicationContainer_methods(root_module, cls): ## application-container.h (module 'network'): ns3::ApplicationContainer::ApplicationContainer(ns3::ApplicationContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::ApplicationContainer const &', 'arg0')]) ## application-container.h (module 'network'): ns3::ApplicationContainer::ApplicationContainer() [constructor] cls.add_constructor([]) ## application-container.h (module 'network'): ns3::ApplicationContainer::ApplicationContainer(ns3::Ptr<ns3::Application> application) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Application >', 'application')]) ## application-container.h (module 'network'): ns3::ApplicationContainer::ApplicationContainer(std::string name) [constructor] cls.add_constructor([param('std::string', 'name')]) ## application-container.h (module 'network'): void ns3::ApplicationContainer::Add(ns3::ApplicationContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::ApplicationContainer', 'other')]) ## application-container.h (module 'network'): void ns3::ApplicationContainer::Add(ns3::Ptr<ns3::Application> application) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Application >', 'application')]) ## application-container.h (module 'network'): void ns3::ApplicationContainer::Add(std::string name) [member function] cls.add_method('Add', 'void', [param('std::string', 'name')]) ## application-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Application>*,std::vector<ns3::Ptr<ns3::Application>, std::allocator<ns3::Ptr<ns3::Application> > > > ns3::ApplicationContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Application > const, std::vector< ns3::Ptr< ns3::Application > > >', [], is_const=True) ## application-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Application>*,std::vector<ns3::Ptr<ns3::Application>, std::allocator<ns3::Ptr<ns3::Application> > > > ns3::ApplicationContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Application > const, std::vector< ns3::Ptr< ns3::Application > > >', [], is_const=True) ## application-container.h (module 'network'): ns3::Ptr<ns3::Application> ns3::ApplicationContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::Application >', [param('uint32_t', 'i')], is_const=True) ## application-container.h (module 'network'): uint32_t ns3::ApplicationContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) ## application-container.h (module 'network'): void ns3::ApplicationContainer::Start(ns3::Time start) [member function] cls.add_method('Start', 'void', [param('ns3::Time', 'start')]) ## application-container.h (module 'network'): void ns3::ApplicationContainer::Stop(ns3::Time stop) [member function] cls.add_method('Stop', 'void', [param('ns3::Time', 'stop')]) return def register_Ns3AsciiTraceHelper_methods(root_module, cls): ## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper(ns3::AsciiTraceHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::AsciiTraceHelper const &', 'arg0')]) ## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper() [constructor] cls.add_constructor([]) ## trace-helper.h (module 'network'): ns3::Ptr<ns3::OutputStreamWrapper> ns3::AsciiTraceHelper::CreateFileStream(std::string filename, std::_Ios_Openmode filemode=std::ios_base::out) [member function] cls.add_method('CreateFileStream', 'ns3::Ptr< ns3::OutputStreamWrapper >', [param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode', default_value='std::ios_base::out')]) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultDequeueSinkWithContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultDequeueSinkWithoutContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultDropSinkWithContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultDropSinkWithoutContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultEnqueueSinkWithContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultEnqueueSinkWithoutContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultReceiveSinkWithContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultReceiveSinkWithoutContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr<ns3::NetDevice> device, bool useObjectNames=true) [member function] cls.add_method('GetFilenameFromDevice', 'std::string', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')]) ## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr<ns3::Object> object, uint32_t interface, bool useObjectNames=true) [member function] cls.add_method('GetFilenameFromInterfacePair', 'std::string', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')]) return def register_Ns3AsciiTraceHelperForDevice_methods(root_module, cls): ## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice(ns3::AsciiTraceHelperForDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::AsciiTraceHelperForDevice const &', 'arg0')]) ## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice() [constructor] cls.add_constructor([]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename=false) [member function] cls.add_method('EnableAscii', 'void', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Ptr<ns3::NetDevice> nd) [member function] cls.add_method('EnableAscii', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Ptr< ns3::NetDevice >', 'nd')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, std::string ndName, bool explicitFilename=false) [member function] cls.add_method('EnableAscii', 'void', [param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'explicitFilename', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string ndName) [member function] cls.add_method('EnableAscii', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'ndName')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NetDeviceContainer d) [member function] cls.add_method('EnableAscii', 'void', [param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::NetDeviceContainer d) [member function] cls.add_method('EnableAscii', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NetDeviceContainer', 'd')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NodeContainer n) [member function] cls.add_method('EnableAscii', 'void', [param('std::string', 'prefix'), param('ns3::NodeContainer', 'n')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::NodeContainer n) [member function] cls.add_method('EnableAscii', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NodeContainer', 'n')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool explicitFilename) [member function] cls.add_method('EnableAscii', 'void', [param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'explicitFilename')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, uint32_t nodeid, uint32_t deviceid) [member function] cls.add_method('EnableAscii', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(std::string prefix) [member function] cls.add_method('EnableAsciiAll', 'void', [param('std::string', 'prefix')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function] cls.add_method('EnableAsciiAll', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiInternal(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename) [member function] cls.add_method('EnableAsciiInternal', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename')], is_pure_virtual=True, is_virtual=True) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3Buffer_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor] cls.add_constructor([param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function] cls.add_method('AddAtEnd', 'bool', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function] cls.add_method('AddAtStart', 'bool', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function] cls.add_method('Begin', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Buffer', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function] cls.add_method('CreateFullCopy', 'ns3::Buffer', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function] cls.add_method('End', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function] cls.add_method('GetCurrentEndOffset', 'int32_t', [], is_const=True) ## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function] cls.add_method('GetCurrentStartOffset', 'int32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3BufferIterator_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function] cls.add_method('GetDistanceFrom', 'uint32_t', [param('ns3::Buffer::Iterator const &', 'o')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function] cls.add_method('IsEnd', 'bool', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function] cls.add_method('IsStart', 'bool', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function] cls.add_method('Next', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function] cls.add_method('Next', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function] cls.add_method('Prev', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function] cls.add_method('Prev', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function] cls.add_method('ReadLsbtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function] cls.add_method('ReadLsbtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function] cls.add_method('ReadLsbtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function] cls.add_method('ReadNtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function] cls.add_method('ReadNtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function] cls.add_method('ReadNtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function] cls.add_method('Write', 'void', [param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function] cls.add_method('WriteHtolsbU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function] cls.add_method('WriteHtolsbU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function] cls.add_method('WriteHtolsbU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function] cls.add_method('WriteHtonU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function] cls.add_method('WriteHtonU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function] cls.add_method('WriteHtonU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data'), param('uint32_t', 'len')]) return def register_Ns3ByteTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagIterator::Item', []) return def register_Ns3ByteTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function] cls.add_method('GetEnd', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function] cls.add_method('GetStart', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3ByteTagList_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor] cls.add_constructor([]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor] cls.add_constructor([param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function] cls.add_method('Add', 'ns3::TagBuffer', [param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function] cls.add_method('Add', 'void', [param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function] cls.add_method('AddAtEnd', 'void', [param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function] cls.add_method('AddAtStart', 'void', [param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function] cls.add_method('Begin', 'ns3::ByteTagList::Iterator', [param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')], is_const=True) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3ByteTagListIterator_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')]) ## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function] cls.add_method('GetOffsetStart', 'uint32_t', [], is_const=True) ## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagList::Iterator::Item', []) return def register_Ns3ByteTagListIteratorItem_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor] cls.add_constructor([param('ns3::TagBuffer', 'buf')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable] cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable] cls.add_instance_attribute('end', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable] cls.add_instance_attribute('size', 'uint32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable] cls.add_instance_attribute('start', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') ## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3ChannelList_methods(root_module, cls): ## channel-list.h (module 'network'): ns3::ChannelList::ChannelList() [constructor] cls.add_constructor([]) ## channel-list.h (module 'network'): ns3::ChannelList::ChannelList(ns3::ChannelList const & arg0) [copy constructor] cls.add_constructor([param('ns3::ChannelList const &', 'arg0')]) ## channel-list.h (module 'network'): static uint32_t ns3::ChannelList::Add(ns3::Ptr<ns3::Channel> channel) [member function] cls.add_method('Add', 'uint32_t', [param('ns3::Ptr< ns3::Channel >', 'channel')], is_static=True) ## channel-list.h (module 'network'): static __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Channel>*,std::vector<ns3::Ptr<ns3::Channel>, std::allocator<ns3::Ptr<ns3::Channel> > > > ns3::ChannelList::Begin() [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Channel > const, std::vector< ns3::Ptr< ns3::Channel > > >', [], is_static=True) ## channel-list.h (module 'network'): static __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Channel>*,std::vector<ns3::Ptr<ns3::Channel>, std::allocator<ns3::Ptr<ns3::Channel> > > > ns3::ChannelList::End() [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Channel > const, std::vector< ns3::Ptr< ns3::Channel > > >', [], is_static=True) ## channel-list.h (module 'network'): static ns3::Ptr<ns3::Channel> ns3::ChannelList::GetChannel(uint32_t n) [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [param('uint32_t', 'n')], is_static=True) ## channel-list.h (module 'network'): static uint32_t ns3::ChannelList::GetNChannels() [member function] cls.add_method('GetNChannels', 'uint32_t', [], is_static=True) return def register_Ns3DataRate_methods(root_module, cls): cls.add_output_stream_operator() cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('>=') ## data-rate.h (module 'network'): ns3::DataRate::DataRate(ns3::DataRate const & arg0) [copy constructor] cls.add_constructor([param('ns3::DataRate const &', 'arg0')]) ## data-rate.h (module 'network'): ns3::DataRate::DataRate() [constructor] cls.add_constructor([]) ## data-rate.h (module 'network'): ns3::DataRate::DataRate(uint64_t bps) [constructor] cls.add_constructor([param('uint64_t', 'bps')]) ## data-rate.h (module 'network'): ns3::DataRate::DataRate(std::string rate) [constructor] cls.add_constructor([param('std::string', 'rate')]) ## data-rate.h (module 'network'): double ns3::DataRate::CalculateTxTime(uint32_t bytes) const [member function] cls.add_method('CalculateTxTime', 'double', [param('uint32_t', 'bytes')], is_const=True) ## data-rate.h (module 'network'): uint64_t ns3::DataRate::GetBitRate() const [member function] cls.add_method('GetBitRate', 'uint64_t', [], is_const=True) return def register_Ns3EventId_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('==') ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventId const &', 'arg0')]) ## event-id.h (module 'core'): ns3::EventId::EventId() [constructor] cls.add_constructor([]) ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')]) ## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function] cls.add_method('GetContext', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function] cls.add_method('GetTs', 'uint64_t', [], is_const=True) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function] cls.add_method('GetUid', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function] cls.add_method('IsExpired', 'bool', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function] cls.add_method('IsRunning', 'bool', [], is_const=True) ## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function] cls.add_method('PeekEventImpl', 'ns3::EventImpl *', [], is_const=True) return def register_Ns3Inet6SocketAddress_methods(root_module, cls): ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Inet6SocketAddress const & arg0) [copy constructor] cls.add_constructor([param('ns3::Inet6SocketAddress const &', 'arg0')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Ipv6Address ipv6, uint16_t port) [constructor] cls.add_constructor([param('ns3::Ipv6Address', 'ipv6'), param('uint16_t', 'port')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Ipv6Address ipv6) [constructor] cls.add_constructor([param('ns3::Ipv6Address', 'ipv6')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(uint16_t port) [constructor] cls.add_constructor([param('uint16_t', 'port')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(char const * ipv6, uint16_t port) [constructor] cls.add_constructor([param('char const *', 'ipv6'), param('uint16_t', 'port')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(char const * ipv6) [constructor] cls.add_constructor([param('char const *', 'ipv6')]) ## inet6-socket-address.h (module 'network'): static ns3::Inet6SocketAddress ns3::Inet6SocketAddress::ConvertFrom(ns3::Address const & addr) [member function] cls.add_method('ConvertFrom', 'ns3::Inet6SocketAddress', [param('ns3::Address const &', 'addr')], is_static=True) ## inet6-socket-address.h (module 'network'): ns3::Ipv6Address ns3::Inet6SocketAddress::GetIpv6() const [member function] cls.add_method('GetIpv6', 'ns3::Ipv6Address', [], is_const=True) ## inet6-socket-address.h (module 'network'): uint16_t ns3::Inet6SocketAddress::GetPort() const [member function] cls.add_method('GetPort', 'uint16_t', [], is_const=True) ## inet6-socket-address.h (module 'network'): static bool ns3::Inet6SocketAddress::IsMatchingType(ns3::Address const & addr) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'addr')], is_static=True) ## inet6-socket-address.h (module 'network'): void ns3::Inet6SocketAddress::SetIpv6(ns3::Ipv6Address ipv6) [member function] cls.add_method('SetIpv6', 'void', [param('ns3::Ipv6Address', 'ipv6')]) ## inet6-socket-address.h (module 'network'): void ns3::Inet6SocketAddress::SetPort(uint16_t port) [member function] cls.add_method('SetPort', 'void', [param('uint16_t', 'port')]) return def register_Ns3InetSocketAddress_methods(root_module, cls): ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::InetSocketAddress const & arg0) [copy constructor] cls.add_constructor([param('ns3::InetSocketAddress const &', 'arg0')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::Ipv4Address ipv4, uint16_t port) [constructor] cls.add_constructor([param('ns3::Ipv4Address', 'ipv4'), param('uint16_t', 'port')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::Ipv4Address ipv4) [constructor] cls.add_constructor([param('ns3::Ipv4Address', 'ipv4')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(uint16_t port) [constructor] cls.add_constructor([param('uint16_t', 'port')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(char const * ipv4, uint16_t port) [constructor] cls.add_constructor([param('char const *', 'ipv4'), param('uint16_t', 'port')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(char const * ipv4) [constructor] cls.add_constructor([param('char const *', 'ipv4')]) ## inet-socket-address.h (module 'network'): static ns3::InetSocketAddress ns3::InetSocketAddress::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::InetSocketAddress', [param('ns3::Address const &', 'address')], is_static=True) ## inet-socket-address.h (module 'network'): ns3::Ipv4Address ns3::InetSocketAddress::GetIpv4() const [member function] cls.add_method('GetIpv4', 'ns3::Ipv4Address', [], is_const=True) ## inet-socket-address.h (module 'network'): uint16_t ns3::InetSocketAddress::GetPort() const [member function] cls.add_method('GetPort', 'uint16_t', [], is_const=True) ## inet-socket-address.h (module 'network'): static bool ns3::InetSocketAddress::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## inet-socket-address.h (module 'network'): void ns3::InetSocketAddress::SetIpv4(ns3::Ipv4Address address) [member function] cls.add_method('SetIpv4', 'void', [param('ns3::Ipv4Address', 'address')]) ## inet-socket-address.h (module 'network'): void ns3::InetSocketAddress::SetPort(uint16_t port) [member function] cls.add_method('SetPort', 'void', [param('uint16_t', 'port')]) return def register_Ns3Ipv4Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor] cls.add_constructor([param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('CombineMask', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv4Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv4Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('GetSubnetDirectedBroadcast', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Address const &', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function] cls.add_method('IsLocalMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('IsSubnetDirectedBroadcast', 'bool', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) return def register_Ns3Ipv4Mask_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor] cls.add_constructor([param('uint32_t', 'mask')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor] cls.add_constructor([param('char const *', 'mask')]) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function] cls.add_method('GetInverse', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint16_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Mask', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'mask')]) return def register_Ns3Ipv6Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor] cls.add_constructor([param('uint8_t *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor] cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function] cls.add_method('CombinePrefix', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv6Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv6Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function] cls.add_method('GetAllHostsMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function] cls.add_method('GetAllNodesMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function] cls.add_method('GetAllRoutersMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function] cls.add_method('IsAllHostsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function] cls.add_method('IsAllNodesMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function] cls.add_method('IsAllRoutersMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Address const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function] cls.add_method('IsLinkLocal', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function] cls.add_method('IsSolicitedMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function] cls.add_method('MakeSolicitedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function] cls.add_method('Set', 'void', [param('uint8_t *', 'address')]) return def register_Ns3Ipv6Prefix_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor] cls.add_constructor([param('uint8_t *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor] cls.add_constructor([param('char const *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor] cls.add_constructor([param('uint8_t', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint8_t', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Prefix const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) return def register_Ns3Mac48Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor] cls.add_constructor([param('char const *', 'str')]) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function] cls.add_method('Allocate', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Mac48Address', [param('ns3::Address const &', 'address')], is_static=True) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function] cls.add_method('CopyFrom', 'void', [param('uint8_t const *', 'buffer')]) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'void', [param('uint8_t *', 'buffer')], is_const=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv4Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv6Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function] cls.add_method('GetMulticast6Prefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function] cls.add_method('GetMulticastPrefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function] cls.add_method('IsGroup', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) return def register_Ns3Mac64Address_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## mac64-address.h (module 'network'): ns3::Mac64Address::Mac64Address(ns3::Mac64Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac64Address const &', 'arg0')]) ## mac64-address.h (module 'network'): ns3::Mac64Address::Mac64Address() [constructor] cls.add_constructor([]) ## mac64-address.h (module 'network'): ns3::Mac64Address::Mac64Address(char const * str) [constructor] cls.add_constructor([param('char const *', 'str')]) ## mac64-address.h (module 'network'): static ns3::Mac64Address ns3::Mac64Address::Allocate() [member function] cls.add_method('Allocate', 'ns3::Mac64Address', [], is_static=True) ## mac64-address.h (module 'network'): static ns3::Mac64Address ns3::Mac64Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Mac64Address', [param('ns3::Address const &', 'address')], is_static=True) ## mac64-address.h (module 'network'): void ns3::Mac64Address::CopyFrom(uint8_t const * buffer) [member function] cls.add_method('CopyFrom', 'void', [param('uint8_t const *', 'buffer')]) ## mac64-address.h (module 'network'): void ns3::Mac64Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'void', [param('uint8_t *', 'buffer')], is_const=True) ## mac64-address.h (module 'network'): static bool ns3::Mac64Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) return def register_Ns3NetDeviceContainer_methods(root_module, cls): ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor] cls.add_constructor([]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor] cls.add_constructor([param('std::string', 'devName')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor] cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::NetDeviceContainer', 'other')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function] cls.add_method('Add', 'void', [param('std::string', 'deviceName')]) ## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >', [], is_const=True) ## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >', [], is_const=True) ## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'i')], is_const=True) ## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3NodeContainer_methods(root_module, cls): ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor] cls.add_constructor([]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor] cls.add_constructor([param('std::string', 'nodeName')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::NodeContainer', 'other')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function] cls.add_method('Add', 'void', [param('std::string', 'nodeName')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n'), param('uint32_t', 'systemId')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::Node >', [param('uint32_t', 'i')], is_const=True) ## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function] cls.add_method('GetGlobal', 'ns3::NodeContainer', [], is_static=True) ## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3NodeList_methods(root_module, cls): ## node-list.h (module 'network'): ns3::NodeList::NodeList() [constructor] cls.add_constructor([]) ## node-list.h (module 'network'): ns3::NodeList::NodeList(ns3::NodeList const & arg0) [copy constructor] cls.add_constructor([param('ns3::NodeList const &', 'arg0')]) ## node-list.h (module 'network'): static uint32_t ns3::NodeList::Add(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('Add', 'uint32_t', [param('ns3::Ptr< ns3::Node >', 'node')], is_static=True) ## node-list.h (module 'network'): static __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeList::Begin() [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_static=True) ## node-list.h (module 'network'): static __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeList::End() [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_static=True) ## node-list.h (module 'network'): static uint32_t ns3::NodeList::GetNNodes() [member function] cls.add_method('GetNNodes', 'uint32_t', [], is_static=True) ## node-list.h (module 'network'): static ns3::Ptr<ns3::Node> ns3::NodeList::GetNode(uint32_t n) [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [param('uint32_t', 'n')], is_static=True) return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3ObjectFactory_methods(root_module, cls): cls.add_output_stream_operator() ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor] cls.add_constructor([param('std::string', 'typeId')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Object >', [], is_const=True) ## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) ## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function] cls.add_method('SetTypeId', 'void', [param('ns3::TypeId', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function] cls.add_method('SetTypeId', 'void', [param('char const *', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function] cls.add_method('SetTypeId', 'void', [param('std::string', 'tid')]) return def register_Ns3PacketMetadata_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor] cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [param('ns3::Buffer', 'buffer')], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function] cls.add_method('CreateFragment', 'ns3::PacketMetadata', [param('uint32_t', 'start'), param('uint32_t', 'end')], is_const=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function] cls.add_method('Enable', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('RemoveHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('RemoveTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3PacketMetadataItem_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor] cls.add_constructor([]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable] cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable] cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable] cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable] cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable] cls.add_instance_attribute('isFragment', 'bool', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3PacketMetadataItemIterator_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor] cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')]) ## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketMetadata::Item', []) return def register_Ns3PacketSocketAddress_methods(root_module, cls): ## packet-socket-address.h (module 'network'): ns3::PacketSocketAddress::PacketSocketAddress(ns3::PacketSocketAddress const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketSocketAddress const &', 'arg0')]) ## packet-socket-address.h (module 'network'): ns3::PacketSocketAddress::PacketSocketAddress() [constructor] cls.add_constructor([]) ## packet-socket-address.h (module 'network'): static ns3::PacketSocketAddress ns3::PacketSocketAddress::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::PacketSocketAddress', [param('ns3::Address const &', 'address')], is_static=True) ## packet-socket-address.h (module 'network'): ns3::Address ns3::PacketSocketAddress::GetPhysicalAddress() const [member function] cls.add_method('GetPhysicalAddress', 'ns3::Address', [], is_const=True) ## packet-socket-address.h (module 'network'): uint16_t ns3::PacketSocketAddress::GetProtocol() const [member function] cls.add_method('GetProtocol', 'uint16_t', [], is_const=True) ## packet-socket-address.h (module 'network'): uint32_t ns3::PacketSocketAddress::GetSingleDevice() const [member function] cls.add_method('GetSingleDevice', 'uint32_t', [], is_const=True) ## packet-socket-address.h (module 'network'): static bool ns3::PacketSocketAddress::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## packet-socket-address.h (module 'network'): bool ns3::PacketSocketAddress::IsSingleDevice() const [member function] cls.add_method('IsSingleDevice', 'bool', [], is_const=True) ## packet-socket-address.h (module 'network'): void ns3::PacketSocketAddress::SetAllDevices() [member function] cls.add_method('SetAllDevices', 'void', []) ## packet-socket-address.h (module 'network'): void ns3::PacketSocketAddress::SetPhysicalAddress(ns3::Address const address) [member function] cls.add_method('SetPhysicalAddress', 'void', [param('ns3::Address const', 'address')]) ## packet-socket-address.h (module 'network'): void ns3::PacketSocketAddress::SetProtocol(uint16_t protocol) [member function] cls.add_method('SetProtocol', 'void', [param('uint16_t', 'protocol')]) ## packet-socket-address.h (module 'network'): void ns3::PacketSocketAddress::SetSingleDevice(uint32_t device) [member function] cls.add_method('SetSingleDevice', 'void', [param('uint32_t', 'device')]) return def register_Ns3PacketSocketHelper_methods(root_module, cls): ## packet-socket-helper.h (module 'network'): ns3::PacketSocketHelper::PacketSocketHelper() [constructor] cls.add_constructor([]) ## packet-socket-helper.h (module 'network'): ns3::PacketSocketHelper::PacketSocketHelper(ns3::PacketSocketHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketSocketHelper const &', 'arg0')]) ## packet-socket-helper.h (module 'network'): void ns3::PacketSocketHelper::Install(ns3::Ptr<ns3::Node> node) const [member function] cls.add_method('Install', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_const=True) ## packet-socket-helper.h (module 'network'): void ns3::PacketSocketHelper::Install(std::string nodeName) const [member function] cls.add_method('Install', 'void', [param('std::string', 'nodeName')], is_const=True) ## packet-socket-helper.h (module 'network'): void ns3::PacketSocketHelper::Install(ns3::NodeContainer c) const [member function] cls.add_method('Install', 'void', [param('ns3::NodeContainer', 'c')], is_const=True) return def register_Ns3PacketTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketTagIterator::Item', []) return def register_Ns3PacketTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3PacketTagList_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor] cls.add_constructor([param('ns3::PacketTagList const &', 'o')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function] cls.add_method('Add', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function] cls.add_method('Head', 'ns3::PacketTagList::TagData const *', [], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function] cls.add_method('Peek', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function] cls.add_method('Remove', 'bool', [param('ns3::Tag &', 'tag')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3PacketTagListTagData_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable] cls.add_instance_attribute('count', 'uint32_t', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable] cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable] cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3PbbAddressTlvBlock_methods(root_module, cls): cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('!=') ## packetbb.h (module 'network'): ns3::PbbAddressTlvBlock::PbbAddressTlvBlock(ns3::PbbAddressTlvBlock const & arg0) [copy constructor] cls.add_constructor([param('ns3::PbbAddressTlvBlock const &', 'arg0')]) ## packetbb.h (module 'network'): ns3::PbbAddressTlvBlock::PbbAddressTlvBlock() [constructor] cls.add_constructor([]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressTlv> ns3::PbbAddressTlvBlock::Back() const [member function] cls.add_method('Back', 'ns3::Ptr< ns3::PbbAddressTlv >', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressTlvBlock::Begin() [member function] cls.add_method('Begin', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressTlvBlock::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbAddressTlvBlock::Clear() [member function] cls.add_method('Clear', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbAddressTlvBlock::Deserialize(ns3::Buffer::Iterator & start) [member function] cls.add_method('Deserialize', 'void', [param('ns3::Buffer::Iterator &', 'start')]) ## packetbb.h (module 'network'): bool ns3::PbbAddressTlvBlock::Empty() const [member function] cls.add_method('Empty', 'bool', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressTlvBlock::End() [member function] cls.add_method('End', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressTlvBlock::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressTlvBlock::Erase(std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > position) [member function] cls.add_method('Erase', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', 'position')]) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressTlvBlock::Erase(std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > first, std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > last) [member function] cls.add_method('Erase', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', 'first'), param('std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', 'last')]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressTlv> ns3::PbbAddressTlvBlock::Front() const [member function] cls.add_method('Front', 'ns3::Ptr< ns3::PbbAddressTlv >', [], is_const=True) ## packetbb.h (module 'network'): uint32_t ns3::PbbAddressTlvBlock::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressTlvBlock::Insert(std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > position, ns3::Ptr<ns3::PbbAddressTlv> const tlv) [member function] cls.add_method('Insert', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', 'position'), param('ns3::Ptr< ns3::PbbAddressTlv > const', 'tlv')]) ## packetbb.h (module 'network'): void ns3::PbbAddressTlvBlock::PopBack() [member function] cls.add_method('PopBack', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbAddressTlvBlock::PopFront() [member function] cls.add_method('PopFront', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbAddressTlvBlock::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbAddressTlvBlock::Print(std::ostream & os, int level) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os'), param('int', 'level')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbAddressTlvBlock::PushBack(ns3::Ptr<ns3::PbbAddressTlv> tlv) [member function] cls.add_method('PushBack', 'void', [param('ns3::Ptr< ns3::PbbAddressTlv >', 'tlv')]) ## packetbb.h (module 'network'): void ns3::PbbAddressTlvBlock::PushFront(ns3::Ptr<ns3::PbbAddressTlv> tlv) [member function] cls.add_method('PushFront', 'void', [param('ns3::Ptr< ns3::PbbAddressTlv >', 'tlv')]) ## packetbb.h (module 'network'): void ns3::PbbAddressTlvBlock::Serialize(ns3::Buffer::Iterator & start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator &', 'start')], is_const=True) ## packetbb.h (module 'network'): int ns3::PbbAddressTlvBlock::Size() const [member function] cls.add_method('Size', 'int', [], is_const=True) return def register_Ns3PbbTlvBlock_methods(root_module, cls): cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('!=') ## packetbb.h (module 'network'): ns3::PbbTlvBlock::PbbTlvBlock(ns3::PbbTlvBlock const & arg0) [copy constructor] cls.add_constructor([param('ns3::PbbTlvBlock const &', 'arg0')]) ## packetbb.h (module 'network'): ns3::PbbTlvBlock::PbbTlvBlock() [constructor] cls.add_constructor([]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbTlv> ns3::PbbTlvBlock::Back() const [member function] cls.add_method('Back', 'ns3::Ptr< ns3::PbbTlv >', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbTlvBlock::Begin() [member function] cls.add_method('Begin', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbTlvBlock::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbTlv > >', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbTlvBlock::Clear() [member function] cls.add_method('Clear', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbTlvBlock::Deserialize(ns3::Buffer::Iterator & start) [member function] cls.add_method('Deserialize', 'void', [param('ns3::Buffer::Iterator &', 'start')]) ## packetbb.h (module 'network'): bool ns3::PbbTlvBlock::Empty() const [member function] cls.add_method('Empty', 'bool', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbTlvBlock::End() [member function] cls.add_method('End', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbTlvBlock::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbTlv > >', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbTlvBlock::Erase(std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > position) [member function] cls.add_method('Erase', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', 'position')]) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbTlvBlock::Erase(std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > first, std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > last) [member function] cls.add_method('Erase', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', 'first'), param('std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', 'last')]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbTlv> ns3::PbbTlvBlock::Front() const [member function] cls.add_method('Front', 'ns3::Ptr< ns3::PbbTlv >', [], is_const=True) ## packetbb.h (module 'network'): uint32_t ns3::PbbTlvBlock::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbTlvBlock::Insert(std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > position, ns3::Ptr<ns3::PbbTlv> const tlv) [member function] cls.add_method('Insert', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', 'position'), param('ns3::Ptr< ns3::PbbTlv > const', 'tlv')]) ## packetbb.h (module 'network'): void ns3::PbbTlvBlock::PopBack() [member function] cls.add_method('PopBack', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbTlvBlock::PopFront() [member function] cls.add_method('PopFront', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbTlvBlock::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbTlvBlock::Print(std::ostream & os, int level) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os'), param('int', 'level')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbTlvBlock::PushBack(ns3::Ptr<ns3::PbbTlv> tlv) [member function] cls.add_method('PushBack', 'void', [param('ns3::Ptr< ns3::PbbTlv >', 'tlv')]) ## packetbb.h (module 'network'): void ns3::PbbTlvBlock::PushFront(ns3::Ptr<ns3::PbbTlv> tlv) [member function] cls.add_method('PushFront', 'void', [param('ns3::Ptr< ns3::PbbTlv >', 'tlv')]) ## packetbb.h (module 'network'): void ns3::PbbTlvBlock::Serialize(ns3::Buffer::Iterator & start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator &', 'start')], is_const=True) ## packetbb.h (module 'network'): int ns3::PbbTlvBlock::Size() const [member function] cls.add_method('Size', 'int', [], is_const=True) return def register_Ns3PcapFile_methods(root_module, cls): ## pcap-file.h (module 'network'): ns3::PcapFile::PcapFile() [constructor] cls.add_constructor([]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Clear() [member function] cls.add_method('Clear', 'void', []) ## pcap-file.h (module 'network'): void ns3::PcapFile::Close() [member function] cls.add_method('Close', 'void', []) ## pcap-file.h (module 'network'): static bool ns3::PcapFile::Diff(std::string const & f1, std::string const & f2, uint32_t & sec, uint32_t & usec, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT) [member function] cls.add_method('Diff', 'bool', [param('std::string const &', 'f1'), param('std::string const &', 'f2'), param('uint32_t &', 'sec'), param('uint32_t &', 'usec'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT')], is_static=True) ## pcap-file.h (module 'network'): bool ns3::PcapFile::Eof() const [member function] cls.add_method('Eof', 'bool', [], is_const=True) ## pcap-file.h (module 'network'): bool ns3::PcapFile::Fail() const [member function] cls.add_method('Fail', 'bool', [], is_const=True) ## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetDataLinkType() [member function] cls.add_method('GetDataLinkType', 'uint32_t', []) ## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetMagic() [member function] cls.add_method('GetMagic', 'uint32_t', []) ## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSigFigs() [member function] cls.add_method('GetSigFigs', 'uint32_t', []) ## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSnapLen() [member function] cls.add_method('GetSnapLen', 'uint32_t', []) ## pcap-file.h (module 'network'): bool ns3::PcapFile::GetSwapMode() [member function] cls.add_method('GetSwapMode', 'bool', []) ## pcap-file.h (module 'network'): int32_t ns3::PcapFile::GetTimeZoneOffset() [member function] cls.add_method('GetTimeZoneOffset', 'int32_t', []) ## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMajor() [member function] cls.add_method('GetVersionMajor', 'uint16_t', []) ## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMinor() [member function] cls.add_method('GetVersionMinor', 'uint16_t', []) ## pcap-file.h (module 'network'): void ns3::PcapFile::Init(uint32_t dataLinkType, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT, int32_t timeZoneCorrection=ns3::PcapFile::ZONE_DEFAULT, bool swapMode=false) [member function] cls.add_method('Init', 'void', [param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT'), param('int32_t', 'timeZoneCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT'), param('bool', 'swapMode', default_value='false')]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Open(std::string const & filename, std::_Ios_Openmode mode) [member function] cls.add_method('Open', 'void', [param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Read(uint8_t * const data, uint32_t maxBytes, uint32_t & tsSec, uint32_t & tsUsec, uint32_t & inclLen, uint32_t & origLen, uint32_t & readLen) [member function] cls.add_method('Read', 'void', [param('uint8_t * const', 'data'), param('uint32_t', 'maxBytes'), param('uint32_t &', 'tsSec'), param('uint32_t &', 'tsUsec'), param('uint32_t &', 'inclLen'), param('uint32_t &', 'origLen'), param('uint32_t &', 'readLen')]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, uint8_t const * const data, uint32_t totalLen) [member function] cls.add_method('Write', 'void', [param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('uint8_t const * const', 'data'), param('uint32_t', 'totalLen')]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('Write', 'void', [param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Ptr< ns3::Packet const >', 'p')]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Header & header, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('Write', 'void', [param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Header &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')]) ## pcap-file.h (module 'network'): ns3::PcapFile::SNAPLEN_DEFAULT [variable] cls.add_static_attribute('SNAPLEN_DEFAULT', 'uint32_t const', is_const=True) ## pcap-file.h (module 'network'): ns3::PcapFile::ZONE_DEFAULT [variable] cls.add_static_attribute('ZONE_DEFAULT', 'int32_t const', is_const=True) return def register_Ns3PcapHelper_methods(root_module, cls): ## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper(ns3::PcapHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::PcapHelper const &', 'arg0')]) ## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper() [constructor] cls.add_constructor([]) ## trace-helper.h (module 'network'): ns3::Ptr<ns3::PcapFileWrapper> ns3::PcapHelper::CreateFile(std::string filename, std::_Ios_Openmode filemode, uint32_t dataLinkType, uint32_t snapLen=65535, int32_t tzCorrection=0) [member function] cls.add_method('CreateFile', 'ns3::Ptr< ns3::PcapFileWrapper >', [param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode'), param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='65535'), param('int32_t', 'tzCorrection', default_value='0')]) ## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr<ns3::NetDevice> device, bool useObjectNames=true) [member function] cls.add_method('GetFilenameFromDevice', 'std::string', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')]) ## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr<ns3::Object> object, uint32_t interface, bool useObjectNames=true) [member function] cls.add_method('GetFilenameFromInterfacePair', 'std::string', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')]) return def register_Ns3PcapHelperForDevice_methods(root_module, cls): ## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice(ns3::PcapHelperForDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::PcapHelperForDevice const &', 'arg0')]) ## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice() [constructor] cls.add_constructor([]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous=false, bool explicitFilename=false) [member function] cls.add_method('EnablePcap', 'void', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, std::string ndName, bool promiscuous=false, bool explicitFilename=false) [member function] cls.add_method('EnablePcap', 'void', [param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NetDeviceContainer d, bool promiscuous=false) [member function] cls.add_method('EnablePcap', 'void', [param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd'), param('bool', 'promiscuous', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NodeContainer n, bool promiscuous=false) [member function] cls.add_method('EnablePcap', 'void', [param('std::string', 'prefix'), param('ns3::NodeContainer', 'n'), param('bool', 'promiscuous', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool promiscuous=false) [member function] cls.add_method('EnablePcap', 'void', [param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'promiscuous', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapAll(std::string prefix, bool promiscuous=false) [member function] cls.add_method('EnablePcapAll', 'void', [param('std::string', 'prefix'), param('bool', 'promiscuous', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapInternal(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous, bool explicitFilename) [member function] cls.add_method('EnablePcapInternal', 'void', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous'), param('bool', 'explicitFilename')], is_pure_virtual=True, is_virtual=True) return def register_Ns3RandomVariable_methods(root_module, cls): cls.add_output_stream_operator() ## random-variable.h (module 'core'): ns3::RandomVariable::RandomVariable() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::RandomVariable::RandomVariable(ns3::RandomVariable const & o) [copy constructor] cls.add_constructor([param('ns3::RandomVariable const &', 'o')]) ## random-variable.h (module 'core'): uint32_t ns3::RandomVariable::GetInteger() const [member function] cls.add_method('GetInteger', 'uint32_t', [], is_const=True) ## random-variable.h (module 'core'): double ns3::RandomVariable::GetValue() const [member function] cls.add_method('GetValue', 'double', [], is_const=True) return def register_Ns3SeedManager_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::SeedManager::SeedManager() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::SeedManager::SeedManager(ns3::SeedManager const & arg0) [copy constructor] cls.add_constructor([param('ns3::SeedManager const &', 'arg0')]) ## random-variable.h (module 'core'): static bool ns3::SeedManager::CheckSeed(uint32_t seed) [member function] cls.add_method('CheckSeed', 'bool', [param('uint32_t', 'seed')], is_static=True) ## random-variable.h (module 'core'): static uint32_t ns3::SeedManager::GetRun() [member function] cls.add_method('GetRun', 'uint32_t', [], is_static=True) ## random-variable.h (module 'core'): static uint32_t ns3::SeedManager::GetSeed() [member function] cls.add_method('GetSeed', 'uint32_t', [], is_static=True) ## random-variable.h (module 'core'): static void ns3::SeedManager::SetRun(uint32_t run) [member function] cls.add_method('SetRun', 'void', [param('uint32_t', 'run')], is_static=True) ## random-variable.h (module 'core'): static void ns3::SeedManager::SetSeed(uint32_t seed) [member function] cls.add_method('SetSeed', 'void', [param('uint32_t', 'seed')], is_static=True) return def register_Ns3SequenceNumber32_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_numeric_operator('+', root_module['ns3::SequenceNumber32'], root_module['ns3::SequenceNumber32'], param('ns3::SequenceNumber< unsigned int, int > const &', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::SequenceNumber32'], root_module['ns3::SequenceNumber32'], param('int', 'right')) cls.add_inplace_numeric_operator('+=', param('int', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::SequenceNumber32'], root_module['ns3::SequenceNumber32'], param('int', 'right')) cls.add_inplace_numeric_operator('-=', param('int', 'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('>=') ## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned int, int>::SequenceNumber() [constructor] cls.add_constructor([]) ## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned int, int>::SequenceNumber(unsigned int value) [constructor] cls.add_constructor([param('unsigned int', 'value')]) ## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned int, int>::SequenceNumber(ns3::SequenceNumber<unsigned int, int> const & value) [copy constructor] cls.add_constructor([param('ns3::SequenceNumber< unsigned int, int > const &', 'value')]) ## sequence-number.h (module 'network'): unsigned int ns3::SequenceNumber<unsigned int, int>::GetValue() const [member function] cls.add_method('GetValue', 'unsigned int', [], is_const=True) return def register_Ns3SequenceNumber16_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_numeric_operator('+', root_module['ns3::SequenceNumber16'], root_module['ns3::SequenceNumber16'], param('ns3::SequenceNumber< unsigned short, short > const &', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::SequenceNumber16'], root_module['ns3::SequenceNumber16'], param('short int', 'right')) cls.add_inplace_numeric_operator('+=', param('short int', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::SequenceNumber16'], root_module['ns3::SequenceNumber16'], param('short int', 'right')) cls.add_inplace_numeric_operator('-=', param('short int', 'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('>=') ## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned short, short>::SequenceNumber() [constructor] cls.add_constructor([]) ## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned short, short>::SequenceNumber(short unsigned int value) [constructor] cls.add_constructor([param('short unsigned int', 'value')]) ## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned short, short>::SequenceNumber(ns3::SequenceNumber<unsigned short, short> const & value) [copy constructor] cls.add_constructor([param('ns3::SequenceNumber< unsigned short, short > const &', 'value')]) ## sequence-number.h (module 'network'): short unsigned int ns3::SequenceNumber<unsigned short, short>::GetValue() const [member function] cls.add_method('GetValue', 'short unsigned int', [], is_const=True) return def register_Ns3SequentialVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::SequentialVariable::SequentialVariable(ns3::SequentialVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::SequentialVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::SequentialVariable::SequentialVariable(double f, double l, double i=1, uint32_t c=1) [constructor] cls.add_constructor([param('double', 'f'), param('double', 'l'), param('double', 'i', default_value='1'), param('uint32_t', 'c', default_value='1')]) ## random-variable.h (module 'core'): ns3::SequentialVariable::SequentialVariable(double f, double l, ns3::RandomVariable const & i, uint32_t c=1) [constructor] cls.add_constructor([param('double', 'f'), param('double', 'l'), param('ns3::RandomVariable const &', 'i'), param('uint32_t', 'c', default_value='1')]) return def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Simulator_methods(root_module, cls): ## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Simulator const &', 'arg0')]) ## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function] cls.add_method('Cancel', 'void', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function] cls.add_method('Destroy', 'void', [], is_static=True) ## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function] cls.add_method('GetContext', 'uint32_t', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function] cls.add_method('GetDelayLeft', 'ns3::Time', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function] cls.add_method('GetImplementation', 'ns3::Ptr< ns3::SimulatorImpl >', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function] cls.add_method('GetMaximumSimulationTime', 'ns3::Time', [], is_static=True) ## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_static=True) ## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function] cls.add_method('IsExpired', 'bool', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function] cls.add_method('IsFinished', 'bool', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Next() [member function] cls.add_method('Next', 'ns3::Time', [], is_static=True, deprecated=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function] cls.add_method('Now', 'ns3::Time', [], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function] cls.add_method('Remove', 'void', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::RunOneEvent() [member function] cls.add_method('RunOneEvent', 'void', [], is_static=True, deprecated=True) ## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function] cls.add_method('SetImplementation', 'void', [param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function] cls.add_method('SetScheduler', 'void', [param('ns3::ObjectFactory', 'schedulerFactory')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function] cls.add_method('Stop', 'void', [], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & time) [member function] cls.add_method('Stop', 'void', [param('ns3::Time const &', 'time')], is_static=True) return def register_Ns3SystemWallClockMs_methods(root_module, cls): ## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs::SystemWallClockMs(ns3::SystemWallClockMs const & arg0) [copy constructor] cls.add_constructor([param('ns3::SystemWallClockMs const &', 'arg0')]) ## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs::SystemWallClockMs() [constructor] cls.add_constructor([]) ## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::End() [member function] cls.add_method('End', 'int64_t', []) ## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedReal() const [member function] cls.add_method('GetElapsedReal', 'int64_t', [], is_const=True) ## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedSystem() const [member function] cls.add_method('GetElapsedSystem', 'int64_t', [], is_const=True) ## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedUser() const [member function] cls.add_method('GetElapsedUser', 'int64_t', [], is_const=True) ## system-wall-clock-ms.h (module 'core'): void ns3::SystemWallClockMs::Start() [member function] cls.add_method('Start', 'void', []) return def register_Ns3Tag_methods(root_module, cls): ## tag.h (module 'network'): ns3::Tag::Tag() [constructor] cls.add_constructor([]) ## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor] cls.add_constructor([param('ns3::Tag const &', 'arg0')]) ## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_virtual=True) ## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3TagBuffer_methods(root_module, cls): ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor] cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')]) ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor] cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function] cls.add_method('CopyFrom', 'void', [param('ns3::TagBuffer', 'o')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function] cls.add_method('ReadDouble', 'double', []) ## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function] cls.add_method('TrimAtEnd', 'void', [param('uint32_t', 'trim')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function] cls.add_method('WriteDouble', 'void', [param('double', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'v')]) return def register_Ns3TriangularVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::TriangularVariable::TriangularVariable(ns3::TriangularVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::TriangularVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::TriangularVariable::TriangularVariable() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::TriangularVariable::TriangularVariable(double s, double l, double mean) [constructor] cls.add_constructor([param('double', 's'), param('double', 'l'), param('double', 'mean')]) return def register_Ns3TypeId_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor] cls.add_constructor([param('char const *', 'name')]) ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor] cls.add_constructor([param('ns3::TypeId const &', 'o')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function] cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function] cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function] cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function] cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function] cls.add_method('GetGroupName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function] cls.add_method('GetName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function] cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function] cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True) ## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function] cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function] cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function] cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function] cls.add_method('GetUid', 'uint16_t', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function] cls.add_method('HasConstructor', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function] cls.add_method('HasParent', 'bool', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function] cls.add_method('HideFromDocumentation', 'ns3::TypeId', []) ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function] cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function] cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info')], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function] cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function] cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function] cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function] cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function] cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')]) ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function] cls.add_method('SetUid', 'void', [param('uint16_t', 'tid')]) return def register_Ns3TypeIdAttributeInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable] cls.add_instance_attribute('flags', 'uint32_t', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable] cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable] cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) return def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) return def register_Ns3UniformVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::UniformVariable::UniformVariable(ns3::UniformVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::UniformVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::UniformVariable::UniformVariable() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::UniformVariable::UniformVariable(double s, double l) [constructor] cls.add_constructor([param('double', 's'), param('double', 'l')]) ## random-variable.h (module 'core'): uint32_t ns3::UniformVariable::GetInteger(uint32_t s, uint32_t l) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 's'), param('uint32_t', 'l')]) ## random-variable.h (module 'core'): double ns3::UniformVariable::GetValue() const [member function] cls.add_method('GetValue', 'double', [], is_const=True) ## random-variable.h (module 'core'): double ns3::UniformVariable::GetValue(double s, double l) [member function] cls.add_method('GetValue', 'double', [param('double', 's'), param('double', 'l')]) return def register_Ns3WeibullVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable(ns3::WeibullVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::WeibullVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable(double m) [constructor] cls.add_constructor([param('double', 'm')]) ## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable(double m, double s) [constructor] cls.add_constructor([param('double', 'm'), param('double', 's')]) ## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable(double m, double s, double b) [constructor] cls.add_constructor([param('double', 'm'), param('double', 's'), param('double', 'b')]) return def register_Ns3ZetaVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::ZetaVariable::ZetaVariable(ns3::ZetaVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::ZetaVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::ZetaVariable::ZetaVariable(double alpha) [constructor] cls.add_constructor([param('double', 'alpha')]) ## random-variable.h (module 'core'): ns3::ZetaVariable::ZetaVariable() [constructor] cls.add_constructor([]) return def register_Ns3ZipfVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::ZipfVariable::ZipfVariable(ns3::ZipfVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::ZipfVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::ZipfVariable::ZipfVariable(long int N, double alpha) [constructor] cls.add_constructor([param('long int', 'N'), param('double', 'alpha')]) ## random-variable.h (module 'core'): ns3::ZipfVariable::ZipfVariable() [constructor] cls.add_constructor([]) return def register_Ns3Empty_methods(root_module, cls): ## empty.h (module 'core'): ns3::empty::empty() [constructor] cls.add_constructor([]) ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor] cls.add_constructor([param('ns3::empty const &', 'arg0')]) return def register_Ns3Int64x64_t_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_unary_numeric_operator('-') cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor] cls.add_constructor([]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor] cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function] cls.add_method('GetHigh', 'int64_t', [], is_const=True) ## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function] cls.add_method('GetLow', 'uint64_t', [], is_const=True) ## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function] cls.add_method('Invert', 'ns3::int64x64_t', [param('uint64_t', 'v')], is_static=True) ## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function] cls.add_method('MulByInvert', 'void', [param('ns3::int64x64_t const &', 'o')]) return def register_Ns3Chunk_methods(root_module, cls): ## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor] cls.add_constructor([]) ## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor] cls.add_constructor([param('ns3::Chunk const &', 'arg0')]) ## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3ConstantVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::ConstantVariable::ConstantVariable(ns3::ConstantVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::ConstantVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::ConstantVariable::ConstantVariable() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::ConstantVariable::ConstantVariable(double c) [constructor] cls.add_constructor([param('double', 'c')]) ## random-variable.h (module 'core'): void ns3::ConstantVariable::SetConstant(double c) [member function] cls.add_method('SetConstant', 'void', [param('double', 'c')]) return def register_Ns3DeterministicVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::DeterministicVariable::DeterministicVariable(ns3::DeterministicVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::DeterministicVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::DeterministicVariable::DeterministicVariable(double * d, uint32_t c) [constructor] cls.add_constructor([param('double *', 'd'), param('uint32_t', 'c')]) return def register_Ns3EmpiricalVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::EmpiricalVariable::EmpiricalVariable(ns3::EmpiricalVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmpiricalVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::EmpiricalVariable::EmpiricalVariable() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): void ns3::EmpiricalVariable::CDF(double v, double c) [member function] cls.add_method('CDF', 'void', [param('double', 'v'), param('double', 'c')]) return def register_Ns3ErlangVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::ErlangVariable::ErlangVariable(ns3::ErlangVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::ErlangVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::ErlangVariable::ErlangVariable() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::ErlangVariable::ErlangVariable(unsigned int k, double lambda) [constructor] cls.add_constructor([param('unsigned int', 'k'), param('double', 'lambda')]) ## random-variable.h (module 'core'): double ns3::ErlangVariable::GetValue() const [member function] cls.add_method('GetValue', 'double', [], is_const=True) ## random-variable.h (module 'core'): double ns3::ErlangVariable::GetValue(unsigned int k, double lambda) const [member function] cls.add_method('GetValue', 'double', [param('unsigned int', 'k'), param('double', 'lambda')], is_const=True) return def register_Ns3ExponentialVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::ExponentialVariable::ExponentialVariable(ns3::ExponentialVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::ExponentialVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::ExponentialVariable::ExponentialVariable() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::ExponentialVariable::ExponentialVariable(double m) [constructor] cls.add_constructor([param('double', 'm')]) ## random-variable.h (module 'core'): ns3::ExponentialVariable::ExponentialVariable(double m, double b) [constructor] cls.add_constructor([param('double', 'm'), param('double', 'b')]) return def register_Ns3FlowIdTag_methods(root_module, cls): ## flow-id-tag.h (module 'network'): ns3::FlowIdTag::FlowIdTag(ns3::FlowIdTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::FlowIdTag const &', 'arg0')]) ## flow-id-tag.h (module 'network'): ns3::FlowIdTag::FlowIdTag() [constructor] cls.add_constructor([]) ## flow-id-tag.h (module 'network'): ns3::FlowIdTag::FlowIdTag(uint32_t flowId) [constructor] cls.add_constructor([param('uint32_t', 'flowId')]) ## flow-id-tag.h (module 'network'): static uint32_t ns3::FlowIdTag::AllocateFlowId() [member function] cls.add_method('AllocateFlowId', 'uint32_t', [], is_static=True) ## flow-id-tag.h (module 'network'): void ns3::FlowIdTag::Deserialize(ns3::TagBuffer buf) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'buf')], is_virtual=True) ## flow-id-tag.h (module 'network'): uint32_t ns3::FlowIdTag::GetFlowId() const [member function] cls.add_method('GetFlowId', 'uint32_t', [], is_const=True) ## flow-id-tag.h (module 'network'): ns3::TypeId ns3::FlowIdTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## flow-id-tag.h (module 'network'): uint32_t ns3::FlowIdTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## flow-id-tag.h (module 'network'): static ns3::TypeId ns3::FlowIdTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## flow-id-tag.h (module 'network'): void ns3::FlowIdTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## flow-id-tag.h (module 'network'): void ns3::FlowIdTag::Serialize(ns3::TagBuffer buf) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'buf')], is_const=True, is_virtual=True) ## flow-id-tag.h (module 'network'): void ns3::FlowIdTag::SetFlowId(uint32_t flowId) [member function] cls.add_method('SetFlowId', 'void', [param('uint32_t', 'flowId')]) return def register_Ns3GammaVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::GammaVariable::GammaVariable(ns3::GammaVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::GammaVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::GammaVariable::GammaVariable() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::GammaVariable::GammaVariable(double alpha, double beta) [constructor] cls.add_constructor([param('double', 'alpha'), param('double', 'beta')]) ## random-variable.h (module 'core'): double ns3::GammaVariable::GetValue() const [member function] cls.add_method('GetValue', 'double', [], is_const=True) ## random-variable.h (module 'core'): double ns3::GammaVariable::GetValue(double alpha, double beta) const [member function] cls.add_method('GetValue', 'double', [param('double', 'alpha'), param('double', 'beta')], is_const=True) return def register_Ns3Header_methods(root_module, cls): cls.add_output_stream_operator() ## header.h (module 'network'): ns3::Header::Header() [constructor] cls.add_constructor([]) ## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor] cls.add_constructor([param('ns3::Header const &', 'arg0')]) ## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3IntEmpiricalVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::IntEmpiricalVariable::IntEmpiricalVariable(ns3::IntEmpiricalVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntEmpiricalVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::IntEmpiricalVariable::IntEmpiricalVariable() [constructor] cls.add_constructor([]) return def register_Ns3LlcSnapHeader_methods(root_module, cls): ## llc-snap-header.h (module 'network'): ns3::LlcSnapHeader::LlcSnapHeader(ns3::LlcSnapHeader const & arg0) [copy constructor] cls.add_constructor([param('ns3::LlcSnapHeader const &', 'arg0')]) ## llc-snap-header.h (module 'network'): ns3::LlcSnapHeader::LlcSnapHeader() [constructor] cls.add_constructor([]) ## llc-snap-header.h (module 'network'): uint32_t ns3::LlcSnapHeader::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## llc-snap-header.h (module 'network'): ns3::TypeId ns3::LlcSnapHeader::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## llc-snap-header.h (module 'network'): uint32_t ns3::LlcSnapHeader::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## llc-snap-header.h (module 'network'): uint16_t ns3::LlcSnapHeader::GetType() [member function] cls.add_method('GetType', 'uint16_t', []) ## llc-snap-header.h (module 'network'): static ns3::TypeId ns3::LlcSnapHeader::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## llc-snap-header.h (module 'network'): void ns3::LlcSnapHeader::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## llc-snap-header.h (module 'network'): void ns3::LlcSnapHeader::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) ## llc-snap-header.h (module 'network'): void ns3::LlcSnapHeader::SetType(uint16_t type) [member function] cls.add_method('SetType', 'void', [param('uint16_t', 'type')]) return def register_Ns3LogNormalVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::LogNormalVariable::LogNormalVariable(ns3::LogNormalVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::LogNormalVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::LogNormalVariable::LogNormalVariable(double mu, double sigma) [constructor] cls.add_constructor([param('double', 'mu'), param('double', 'sigma')]) return def register_Ns3NormalVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::NormalVariable::NormalVariable(ns3::NormalVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::NormalVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::NormalVariable::NormalVariable() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::NormalVariable::NormalVariable(double m, double v) [constructor] cls.add_constructor([param('double', 'm'), param('double', 'v')]) ## random-variable.h (module 'core'): ns3::NormalVariable::NormalVariable(double m, double v, double b) [constructor] cls.add_constructor([param('double', 'm'), param('double', 'v'), param('double', 'b')]) return def register_Ns3Object_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::Object() [constructor] cls.add_constructor([]) ## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function] cls.add_method('AggregateObject', 'void', [param('ns3::Ptr< ns3::Object >', 'other')]) ## object.h (module 'core'): void ns3::Object::Dispose() [member function] cls.add_method('Dispose', 'void', []) ## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function] cls.add_method('GetAggregateIterator', 'ns3::Object::AggregateIterator', [], is_const=True) ## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object.h (module 'core'): void ns3::Object::Start() [member function] cls.add_method('Start', 'void', []) ## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor] cls.add_constructor([param('ns3::Object const &', 'o')], visibility='protected') ## object.h (module 'core'): void ns3::Object::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::DoStart() [member function] cls.add_method('DoStart', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function] cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectAggregateIterator_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')]) ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor] cls.add_constructor([]) ## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function] cls.add_method('Next', 'ns3::Ptr< ns3::Object const >', []) return def register_Ns3PacketBurst_methods(root_module, cls): ## packet-burst.h (module 'network'): ns3::PacketBurst::PacketBurst(ns3::PacketBurst const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketBurst const &', 'arg0')]) ## packet-burst.h (module 'network'): ns3::PacketBurst::PacketBurst() [constructor] cls.add_constructor([]) ## packet-burst.h (module 'network'): void ns3::PacketBurst::AddPacket(ns3::Ptr<ns3::Packet> packet) [member function] cls.add_method('AddPacket', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet')]) ## packet-burst.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::Packet> > ns3::PacketBurst::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::Ptr< ns3::Packet > >', [], is_const=True) ## packet-burst.h (module 'network'): ns3::Ptr<ns3::PacketBurst> ns3::PacketBurst::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::PacketBurst >', [], is_const=True) ## packet-burst.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::Packet> > ns3::PacketBurst::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::Ptr< ns3::Packet > >', [], is_const=True) ## packet-burst.h (module 'network'): uint32_t ns3::PacketBurst::GetNPackets() const [member function] cls.add_method('GetNPackets', 'uint32_t', [], is_const=True) ## packet-burst.h (module 'network'): std::list<ns3::Ptr<ns3::Packet>, std::allocator<ns3::Ptr<ns3::Packet> > > ns3::PacketBurst::GetPackets() const [member function] cls.add_method('GetPackets', 'std::list< ns3::Ptr< ns3::Packet > >', [], is_const=True) ## packet-burst.h (module 'network'): uint32_t ns3::PacketBurst::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## packet-burst.h (module 'network'): static ns3::TypeId ns3::PacketBurst::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## packet-burst.h (module 'network'): void ns3::PacketBurst::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True) return def register_Ns3ParetoVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(ns3::ParetoVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::ParetoVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(double m) [constructor] cls.add_constructor([param('double', 'm')]) ## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(double m, double s) [constructor] cls.add_constructor([param('double', 'm'), param('double', 's')]) ## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(double m, double s, double b) [constructor] cls.add_constructor([param('double', 'm'), param('double', 's'), param('double', 'b')]) ## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(std::pair<double,double> params) [constructor] cls.add_constructor([param('std::pair< double, double >', 'params')]) ## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(std::pair<double,double> params, double b) [constructor] cls.add_constructor([param('std::pair< double, double >', 'params'), param('double', 'b')]) return def register_Ns3PcapFileWrapper_methods(root_module, cls): ## pcap-file-wrapper.h (module 'network'): static ns3::TypeId ns3::PcapFileWrapper::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper::PcapFileWrapper() [constructor] cls.add_constructor([]) ## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Fail() const [member function] cls.add_method('Fail', 'bool', [], is_const=True) ## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Eof() const [member function] cls.add_method('Eof', 'bool', [], is_const=True) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Clear() [member function] cls.add_method('Clear', 'void', []) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Open(std::string const & filename, std::_Ios_Openmode mode) [member function] cls.add_method('Open', 'void', [param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')]) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Close() [member function] cls.add_method('Close', 'void', []) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Init(uint32_t dataLinkType, uint32_t snapLen=std::numeric_limits<unsigned int>::max(), int32_t tzCorrection=ns3::PcapFile::ZONE_DEFAULT) [member function] cls.add_method('Init', 'void', [param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='std::numeric_limits<unsigned int>::max()'), param('int32_t', 'tzCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT')]) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('Write', 'void', [param('ns3::Time', 't'), param('ns3::Ptr< ns3::Packet const >', 'p')]) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Header & header, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('Write', 'void', [param('ns3::Time', 't'), param('ns3::Header &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')]) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, uint8_t const * buffer, uint32_t length) [member function] cls.add_method('Write', 'void', [param('ns3::Time', 't'), param('uint8_t const *', 'buffer'), param('uint32_t', 'length')]) ## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetMagic() [member function] cls.add_method('GetMagic', 'uint32_t', []) ## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMajor() [member function] cls.add_method('GetVersionMajor', 'uint16_t', []) ## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMinor() [member function] cls.add_method('GetVersionMinor', 'uint16_t', []) ## pcap-file-wrapper.h (module 'network'): int32_t ns3::PcapFileWrapper::GetTimeZoneOffset() [member function] cls.add_method('GetTimeZoneOffset', 'int32_t', []) ## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSigFigs() [member function] cls.add_method('GetSigFigs', 'uint32_t', []) ## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSnapLen() [member function] cls.add_method('GetSnapLen', 'uint32_t', []) ## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetDataLinkType() [member function] cls.add_method('GetDataLinkType', 'uint32_t', []) return def register_Ns3Queue_methods(root_module, cls): ## queue.h (module 'network'): ns3::Queue::Queue(ns3::Queue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Queue const &', 'arg0')]) ## queue.h (module 'network'): ns3::Queue::Queue() [constructor] cls.add_constructor([]) ## queue.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Queue::Dequeue() [member function] cls.add_method('Dequeue', 'ns3::Ptr< ns3::Packet >', []) ## queue.h (module 'network'): void ns3::Queue::DequeueAll() [member function] cls.add_method('DequeueAll', 'void', []) ## queue.h (module 'network'): bool ns3::Queue::Enqueue(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('Enqueue', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')]) ## queue.h (module 'network'): uint32_t ns3::Queue::GetNBytes() const [member function] cls.add_method('GetNBytes', 'uint32_t', [], is_const=True) ## queue.h (module 'network'): uint32_t ns3::Queue::GetNPackets() const [member function] cls.add_method('GetNPackets', 'uint32_t', [], is_const=True) ## queue.h (module 'network'): uint32_t ns3::Queue::GetTotalDroppedBytes() const [member function] cls.add_method('GetTotalDroppedBytes', 'uint32_t', [], is_const=True) ## queue.h (module 'network'): uint32_t ns3::Queue::GetTotalDroppedPackets() const [member function] cls.add_method('GetTotalDroppedPackets', 'uint32_t', [], is_const=True) ## queue.h (module 'network'): uint32_t ns3::Queue::GetTotalReceivedBytes() const [member function] cls.add_method('GetTotalReceivedBytes', 'uint32_t', [], is_const=True) ## queue.h (module 'network'): uint32_t ns3::Queue::GetTotalReceivedPackets() const [member function] cls.add_method('GetTotalReceivedPackets', 'uint32_t', [], is_const=True) ## queue.h (module 'network'): static ns3::TypeId ns3::Queue::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## queue.h (module 'network'): bool ns3::Queue::IsEmpty() const [member function] cls.add_method('IsEmpty', 'bool', [], is_const=True) ## queue.h (module 'network'): ns3::Ptr<const ns3::Packet> ns3::Queue::Peek() const [member function] cls.add_method('Peek', 'ns3::Ptr< ns3::Packet const >', [], is_const=True) ## queue.h (module 'network'): void ns3::Queue::ResetStatistics() [member function] cls.add_method('ResetStatistics', 'void', []) ## queue.h (module 'network'): void ns3::Queue::Drop(ns3::Ptr<ns3::Packet> packet) [member function] cls.add_method('Drop', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet')], visibility='protected') ## queue.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Queue::DoDequeue() [member function] cls.add_method('DoDequeue', 'ns3::Ptr< ns3::Packet >', [], is_pure_virtual=True, visibility='private', is_virtual=True) ## queue.h (module 'network'): bool ns3::Queue::DoEnqueue(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoEnqueue', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], is_pure_virtual=True, visibility='private', is_virtual=True) ## queue.h (module 'network'): ns3::Ptr<const ns3::Packet> ns3::Queue::DoPeek() const [member function] cls.add_method('DoPeek', 'ns3::Ptr< ns3::Packet const >', [], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True) return def register_Ns3RadiotapHeader_methods(root_module, cls): ## radiotap-header.h (module 'network'): ns3::RadiotapHeader::RadiotapHeader(ns3::RadiotapHeader const & arg0) [copy constructor] cls.add_constructor([param('ns3::RadiotapHeader const &', 'arg0')]) ## radiotap-header.h (module 'network'): ns3::RadiotapHeader::RadiotapHeader() [constructor] cls.add_constructor([]) ## radiotap-header.h (module 'network'): uint32_t ns3::RadiotapHeader::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## radiotap-header.h (module 'network'): uint8_t ns3::RadiotapHeader::GetAntennaNoisePower() const [member function] cls.add_method('GetAntennaNoisePower', 'uint8_t', [], is_const=True) ## radiotap-header.h (module 'network'): uint8_t ns3::RadiotapHeader::GetAntennaSignalPower() const [member function] cls.add_method('GetAntennaSignalPower', 'uint8_t', [], is_const=True) ## radiotap-header.h (module 'network'): uint16_t ns3::RadiotapHeader::GetChannelFlags() const [member function] cls.add_method('GetChannelFlags', 'uint16_t', [], is_const=True) ## radiotap-header.h (module 'network'): uint16_t ns3::RadiotapHeader::GetChannelFrequency() const [member function] cls.add_method('GetChannelFrequency', 'uint16_t', [], is_const=True) ## radiotap-header.h (module 'network'): uint8_t ns3::RadiotapHeader::GetFrameFlags() const [member function] cls.add_method('GetFrameFlags', 'uint8_t', [], is_const=True) ## radiotap-header.h (module 'network'): ns3::TypeId ns3::RadiotapHeader::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## radiotap-header.h (module 'network'): uint8_t ns3::RadiotapHeader::GetRate() const [member function] cls.add_method('GetRate', 'uint8_t', [], is_const=True) ## radiotap-header.h (module 'network'): uint32_t ns3::RadiotapHeader::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## radiotap-header.h (module 'network'): uint64_t ns3::RadiotapHeader::GetTsft() const [member function] cls.add_method('GetTsft', 'uint64_t', [], is_const=True) ## radiotap-header.h (module 'network'): static ns3::TypeId ns3::RadiotapHeader::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## radiotap-header.h (module 'network'): void ns3::RadiotapHeader::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## radiotap-header.h (module 'network'): void ns3::RadiotapHeader::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) ## radiotap-header.h (module 'network'): void ns3::RadiotapHeader::SetAntennaNoisePower(double noise) [member function] cls.add_method('SetAntennaNoisePower', 'void', [param('double', 'noise')]) ## radiotap-header.h (module 'network'): void ns3::RadiotapHeader::SetAntennaSignalPower(double signal) [member function] cls.add_method('SetAntennaSignalPower', 'void', [param('double', 'signal')]) ## radiotap-header.h (module 'network'): void ns3::RadiotapHeader::SetChannelFrequencyAndFlags(uint16_t frequency, uint16_t flags) [member function] cls.add_method('SetChannelFrequencyAndFlags', 'void', [param('uint16_t', 'frequency'), param('uint16_t', 'flags')]) ## radiotap-header.h (module 'network'): void ns3::RadiotapHeader::SetFrameFlags(uint8_t flags) [member function] cls.add_method('SetFrameFlags', 'void', [param('uint8_t', 'flags')]) ## radiotap-header.h (module 'network'): void ns3::RadiotapHeader::SetRate(uint8_t rate) [member function] cls.add_method('SetRate', 'void', [param('uint8_t', 'rate')]) ## radiotap-header.h (module 'network'): void ns3::RadiotapHeader::SetTsft(uint64_t tsft) [member function] cls.add_method('SetTsft', 'void', [param('uint64_t', 'tsft')]) return def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3PbbAddressBlock_Ns3Empty_Ns3DefaultDeleter__lt__ns3PbbAddressBlock__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter<ns3::PbbAddressBlock> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter<ns3::PbbAddressBlock> >::SimpleRefCount(ns3::SimpleRefCount<ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter<ns3::PbbAddressBlock> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter< ns3::PbbAddressBlock > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter<ns3::PbbAddressBlock> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3PbbMessage_Ns3Empty_Ns3DefaultDeleter__lt__ns3PbbMessage__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbMessage, ns3::empty, ns3::DefaultDeleter<ns3::PbbMessage> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbMessage, ns3::empty, ns3::DefaultDeleter<ns3::PbbMessage> >::SimpleRefCount(ns3::SimpleRefCount<ns3::PbbMessage, ns3::empty, ns3::DefaultDeleter<ns3::PbbMessage> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::PbbMessage, ns3::empty, ns3::DefaultDeleter< ns3::PbbMessage > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::PbbMessage, ns3::empty, ns3::DefaultDeleter<ns3::PbbMessage> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3PbbPacket_Ns3Header_Ns3DefaultDeleter__lt__ns3PbbPacket__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbPacket, ns3::Header, ns3::DefaultDeleter<ns3::PbbPacket> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbPacket, ns3::Header, ns3::DefaultDeleter<ns3::PbbPacket> >::SimpleRefCount(ns3::SimpleRefCount<ns3::PbbPacket, ns3::Header, ns3::DefaultDeleter<ns3::PbbPacket> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::PbbPacket, ns3::Header, ns3::DefaultDeleter< ns3::PbbPacket > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::PbbPacket, ns3::Header, ns3::DefaultDeleter<ns3::PbbPacket> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3PbbTlv_Ns3Empty_Ns3DefaultDeleter__lt__ns3PbbTlv__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter<ns3::PbbTlv> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter<ns3::PbbTlv> >::SimpleRefCount(ns3::SimpleRefCount<ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter<ns3::PbbTlv> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter< ns3::PbbTlv > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter<ns3::PbbTlv> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Socket_methods(root_module, cls): ## socket.h (module 'network'): ns3::Socket::Socket(ns3::Socket const & arg0) [copy constructor] cls.add_constructor([param('ns3::Socket const &', 'arg0')]) ## socket.h (module 'network'): ns3::Socket::Socket() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): int ns3::Socket::Bind(ns3::Address const & address) [member function] cls.add_method('Bind', 'int', [param('ns3::Address const &', 'address')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Bind() [member function] cls.add_method('Bind', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::BindToNetDevice(ns3::Ptr<ns3::NetDevice> netdevice) [member function] cls.add_method('BindToNetDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'netdevice')], is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Close() [member function] cls.add_method('Close', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Connect(ns3::Address const & address) [member function] cls.add_method('Connect', 'int', [param('ns3::Address const &', 'address')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): static ns3::Ptr<ns3::Socket> ns3::Socket::CreateSocket(ns3::Ptr<ns3::Node> node, ns3::TypeId tid) [member function] cls.add_method('CreateSocket', 'ns3::Ptr< ns3::Socket >', [param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::TypeId', 'tid')], is_static=True) ## socket.h (module 'network'): bool ns3::Socket::GetAllowBroadcast() const [member function] cls.add_method('GetAllowBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Socket::GetBoundNetDevice() [member function] cls.add_method('GetBoundNetDevice', 'ns3::Ptr< ns3::NetDevice >', []) ## socket.h (module 'network'): ns3::Socket::SocketErrno ns3::Socket::GetErrno() const [member function] cls.add_method('GetErrno', 'ns3::Socket::SocketErrno', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Node> ns3::Socket::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::Socket::GetRxAvailable() const [member function] cls.add_method('GetRxAvailable', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::GetSockName(ns3::Address & address) const [member function] cls.add_method('GetSockName', 'int', [param('ns3::Address &', 'address')], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): ns3::Socket::SocketType ns3::Socket::GetSocketType() const [member function] cls.add_method('GetSocketType', 'ns3::Socket::SocketType', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::Socket::GetTxAvailable() const [member function] cls.add_method('GetTxAvailable', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Listen() [member function] cls.add_method('Listen', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv(uint32_t maxSize, uint32_t flags) [member function] cls.add_method('Recv', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'maxSize'), param('uint32_t', 'flags')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv() [member function] cls.add_method('Recv', 'ns3::Ptr< ns3::Packet >', []) ## socket.h (module 'network'): int ns3::Socket::Recv(uint8_t * buf, uint32_t size, uint32_t flags) [member function] cls.add_method('Recv', 'int', [param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')]) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(uint32_t maxSize, uint32_t flags, ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'maxSize'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'ns3::Ptr< ns3::Packet >', [param('ns3::Address &', 'fromAddress')]) ## socket.h (module 'network'): int ns3::Socket::RecvFrom(uint8_t * buf, uint32_t size, uint32_t flags, ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'int', [param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')]) ## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p, uint32_t flags) [member function] cls.add_method('Send', 'int', [param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('Send', 'int', [param('ns3::Ptr< ns3::Packet >', 'p')]) ## socket.h (module 'network'): int ns3::Socket::Send(uint8_t const * buf, uint32_t size, uint32_t flags) [member function] cls.add_method('Send', 'int', [param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')]) ## socket.h (module 'network'): int ns3::Socket::SendTo(ns3::Ptr<ns3::Packet> p, uint32_t flags, ns3::Address const & toAddress) [member function] cls.add_method('SendTo', 'int', [param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags'), param('ns3::Address const &', 'toAddress')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::SendTo(uint8_t const * buf, uint32_t size, uint32_t flags, ns3::Address const & address) [member function] cls.add_method('SendTo', 'int', [param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address const &', 'address')]) ## socket.h (module 'network'): void ns3::Socket::SetAcceptCallback(ns3::Callback<bool, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionRequest, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> newConnectionCreated) [member function] cls.add_method('SetAcceptCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionRequest'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'newConnectionCreated')]) ## socket.h (module 'network'): bool ns3::Socket::SetAllowBroadcast(bool allowBroadcast) [member function] cls.add_method('SetAllowBroadcast', 'bool', [param('bool', 'allowBroadcast')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::SetCloseCallbacks(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> normalClose, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> errorClose) [member function] cls.add_method('SetCloseCallbacks', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'normalClose'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'errorClose')]) ## socket.h (module 'network'): void ns3::Socket::SetConnectCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionSucceeded, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionFailed) [member function] cls.add_method('SetConnectCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionSucceeded'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionFailed')]) ## socket.h (module 'network'): void ns3::Socket::SetDataSentCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> dataSent) [member function] cls.add_method('SetDataSentCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'dataSent')]) ## socket.h (module 'network'): void ns3::Socket::SetRecvCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> arg0) [member function] cls.add_method('SetRecvCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'arg0')]) ## socket.h (module 'network'): void ns3::Socket::SetRecvPktInfo(bool flag) [member function] cls.add_method('SetRecvPktInfo', 'void', [param('bool', 'flag')]) ## socket.h (module 'network'): void ns3::Socket::SetSendCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> sendCb) [member function] cls.add_method('SetSendCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'sendCb')]) ## socket.h (module 'network'): int ns3::Socket::ShutdownRecv() [member function] cls.add_method('ShutdownRecv', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::ShutdownSend() [member function] cls.add_method('ShutdownSend', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::NotifyConnectionFailed() [member function] cls.add_method('NotifyConnectionFailed', 'void', [], visibility='protected') ## socket.h (module 'network'): bool ns3::Socket::NotifyConnectionRequest(ns3::Address const & from) [member function] cls.add_method('NotifyConnectionRequest', 'bool', [param('ns3::Address const &', 'from')], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyConnectionSucceeded() [member function] cls.add_method('NotifyConnectionSucceeded', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyDataRecv() [member function] cls.add_method('NotifyDataRecv', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyDataSent(uint32_t size) [member function] cls.add_method('NotifyDataSent', 'void', [param('uint32_t', 'size')], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyErrorClose() [member function] cls.add_method('NotifyErrorClose', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyNewConnectionCreated(ns3::Ptr<ns3::Socket> socket, ns3::Address const & from) [member function] cls.add_method('NotifyNewConnectionCreated', 'void', [param('ns3::Ptr< ns3::Socket >', 'socket'), param('ns3::Address const &', 'from')], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyNormalClose() [member function] cls.add_method('NotifyNormalClose', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifySend(uint32_t spaceAvailable) [member function] cls.add_method('NotifySend', 'void', [param('uint32_t', 'spaceAvailable')], visibility='protected') return def register_Ns3SocketAddressTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketAddressTag::SocketAddressTag(ns3::SocketAddressTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketAddressTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketAddressTag::SocketAddressTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketAddressTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): ns3::Address ns3::SocketAddressTag::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_const=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketAddressTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketAddressTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketAddressTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketAddressTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketAddressTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketAddressTag::SetAddress(ns3::Address addr) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'addr')]) return def register_Ns3SocketFactory_methods(root_module, cls): ## socket-factory.h (module 'network'): ns3::SocketFactory::SocketFactory(ns3::SocketFactory const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketFactory const &', 'arg0')]) ## socket-factory.h (module 'network'): ns3::SocketFactory::SocketFactory() [constructor] cls.add_constructor([]) ## socket-factory.h (module 'network'): ns3::Ptr<ns3::Socket> ns3::SocketFactory::CreateSocket() [member function] cls.add_method('CreateSocket', 'ns3::Ptr< ns3::Socket >', [], is_pure_virtual=True, is_virtual=True) ## socket-factory.h (module 'network'): static ns3::TypeId ns3::SocketFactory::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3SocketIpTtlTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag(ns3::SocketIpTtlTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketIpTtlTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketIpTtlTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketIpTtlTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::SocketIpTtlTag::GetTtl() const [member function] cls.add_method('GetTtl', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpTtlTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::SetTtl(uint8_t ttl) [member function] cls.add_method('SetTtl', 'void', [param('uint8_t', 'ttl')]) return def register_Ns3SocketSetDontFragmentTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag(ns3::SocketSetDontFragmentTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketSetDontFragmentTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Disable() [member function] cls.add_method('Disable', 'void', []) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Enable() [member function] cls.add_method('Enable', 'void', []) ## socket.h (module 'network'): ns3::TypeId ns3::SocketSetDontFragmentTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketSetDontFragmentTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketSetDontFragmentTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): bool ns3::SocketSetDontFragmentTag::IsEnabled() const [member function] cls.add_method('IsEnabled', 'bool', [], is_const=True) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) return def register_Ns3Time_methods(root_module, cls): cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right')) cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## nstime.h (module 'core'): ns3::Time::Time() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor] cls.add_constructor([param('ns3::Time const &', 'o')]) ## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor] cls.add_constructor([param('std::string const &', 's')]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'value')]) ## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function] cls.add_method('Compare', 'int', [param('ns3::Time const &', 'o')], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function] cls.add_method('FromDouble', 'ns3::Time', [param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function] cls.add_method('FromInteger', 'ns3::Time', [param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function] cls.add_method('GetFemtoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function] cls.add_method('GetInteger', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function] cls.add_method('GetMicroSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function] cls.add_method('GetMilliSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function] cls.add_method('GetNanoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function] cls.add_method('GetPicoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function] cls.add_method('GetResolution', 'ns3::Time::Unit', [], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function] cls.add_method('GetSeconds', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function] cls.add_method('GetTimeStep', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function] cls.add_method('IsNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function] cls.add_method('IsPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function] cls.add_method('IsStrictlyNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function] cls.add_method('IsStrictlyPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function] cls.add_method('IsZero', 'bool', [], is_const=True) ## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function] cls.add_method('SetResolution', 'void', [param('ns3::Time::Unit', 'resolution')], is_static=True) ## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function] cls.add_method('To', 'ns3::int64x64_t', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) ## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function] cls.add_method('ToDouble', 'double', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function] cls.add_method('ToInteger', 'int64_t', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) return def register_Ns3TraceSourceAccessor_methods(root_module, cls): ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')]) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor] cls.add_constructor([]) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Connect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('ConnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Disconnect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('DisconnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Trailer_methods(root_module, cls): cls.add_output_stream_operator() ## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor] cls.add_constructor([]) ## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor] cls.add_constructor([param('ns3::Trailer const &', 'arg0')]) ## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'end')], is_pure_virtual=True, is_virtual=True) ## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Application_methods(root_module, cls): ## application.h (module 'network'): ns3::Application::Application(ns3::Application const & arg0) [copy constructor] cls.add_constructor([param('ns3::Application const &', 'arg0')]) ## application.h (module 'network'): ns3::Application::Application() [constructor] cls.add_constructor([]) ## application.h (module 'network'): ns3::Ptr<ns3::Node> ns3::Application::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_const=True) ## application.h (module 'network'): static ns3::TypeId ns3::Application::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## application.h (module 'network'): void ns3::Application::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## application.h (module 'network'): void ns3::Application::SetStartTime(ns3::Time start) [member function] cls.add_method('SetStartTime', 'void', [param('ns3::Time', 'start')]) ## application.h (module 'network'): void ns3::Application::SetStopTime(ns3::Time stop) [member function] cls.add_method('SetStopTime', 'void', [param('ns3::Time', 'stop')]) ## application.h (module 'network'): void ns3::Application::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## application.h (module 'network'): void ns3::Application::DoStart() [member function] cls.add_method('DoStart', 'void', [], visibility='protected', is_virtual=True) ## application.h (module 'network'): void ns3::Application::StartApplication() [member function] cls.add_method('StartApplication', 'void', [], visibility='private', is_virtual=True) ## application.h (module 'network'): void ns3::Application::StopApplication() [member function] cls.add_method('StopApplication', 'void', [], visibility='private', is_virtual=True) return def register_Ns3AttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function] cls.add_method('CreateValidValue', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::AttributeValue const &', 'value')], is_const=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackChecker_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')]) return def register_Ns3CallbackImplBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')]) ## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackValue_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'base')]) ## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function] cls.add_method('Set', 'void', [param('ns3::CallbackBase', 'base')]) return def register_Ns3Channel_methods(root_module, cls): ## channel.h (module 'network'): ns3::Channel::Channel(ns3::Channel const & arg0) [copy constructor] cls.add_constructor([param('ns3::Channel const &', 'arg0')]) ## channel.h (module 'network'): ns3::Channel::Channel() [constructor] cls.add_constructor([]) ## channel.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Channel::GetDevice(uint32_t i) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'i')], is_pure_virtual=True, is_const=True, is_virtual=True) ## channel.h (module 'network'): uint32_t ns3::Channel::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## channel.h (module 'network'): uint32_t ns3::Channel::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## channel.h (module 'network'): static ns3::TypeId ns3::Channel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3DataRateChecker_methods(root_module, cls): ## data-rate.h (module 'network'): ns3::DataRateChecker::DataRateChecker() [constructor] cls.add_constructor([]) ## data-rate.h (module 'network'): ns3::DataRateChecker::DataRateChecker(ns3::DataRateChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::DataRateChecker const &', 'arg0')]) return def register_Ns3DataRateValue_methods(root_module, cls): ## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue() [constructor] cls.add_constructor([]) ## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue(ns3::DataRateValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::DataRateValue const &', 'arg0')]) ## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue(ns3::DataRate const & value) [constructor] cls.add_constructor([param('ns3::DataRate const &', 'value')]) ## data-rate.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::DataRateValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## data-rate.h (module 'network'): bool ns3::DataRateValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## data-rate.h (module 'network'): ns3::DataRate ns3::DataRateValue::Get() const [member function] cls.add_method('Get', 'ns3::DataRate', [], is_const=True) ## data-rate.h (module 'network'): std::string ns3::DataRateValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## data-rate.h (module 'network'): void ns3::DataRateValue::Set(ns3::DataRate const & value) [member function] cls.add_method('Set', 'void', [param('ns3::DataRate const &', 'value')]) return def register_Ns3DropTailQueue_methods(root_module, cls): ## drop-tail-queue.h (module 'network'): ns3::DropTailQueue::DropTailQueue(ns3::DropTailQueue const & arg0) [copy constructor] cls.add_constructor([param('ns3::DropTailQueue const &', 'arg0')]) ## drop-tail-queue.h (module 'network'): ns3::DropTailQueue::DropTailQueue() [constructor] cls.add_constructor([]) ## drop-tail-queue.h (module 'network'): ns3::DropTailQueue::Mode ns3::DropTailQueue::GetMode() [member function] cls.add_method('GetMode', 'ns3::DropTailQueue::Mode', []) ## drop-tail-queue.h (module 'network'): static ns3::TypeId ns3::DropTailQueue::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## drop-tail-queue.h (module 'network'): void ns3::DropTailQueue::SetMode(ns3::DropTailQueue::Mode mode) [member function] cls.add_method('SetMode', 'void', [param('ns3::DropTailQueue::Mode', 'mode')]) ## drop-tail-queue.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::DropTailQueue::DoDequeue() [member function] cls.add_method('DoDequeue', 'ns3::Ptr< ns3::Packet >', [], visibility='private', is_virtual=True) ## drop-tail-queue.h (module 'network'): bool ns3::DropTailQueue::DoEnqueue(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoEnqueue', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='private', is_virtual=True) ## drop-tail-queue.h (module 'network'): ns3::Ptr<const ns3::Packet> ns3::DropTailQueue::DoPeek() const [member function] cls.add_method('DoPeek', 'ns3::Ptr< ns3::Packet const >', [], is_const=True, visibility='private', is_virtual=True) return def register_Ns3EmptyAttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, visibility='private', is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], visibility='private', is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3ErrorModel_methods(root_module, cls): ## error-model.h (module 'network'): ns3::ErrorModel::ErrorModel(ns3::ErrorModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::ErrorModel const &', 'arg0')]) ## error-model.h (module 'network'): ns3::ErrorModel::ErrorModel() [constructor] cls.add_constructor([]) ## error-model.h (module 'network'): void ns3::ErrorModel::Disable() [member function] cls.add_method('Disable', 'void', []) ## error-model.h (module 'network'): void ns3::ErrorModel::Enable() [member function] cls.add_method('Enable', 'void', []) ## error-model.h (module 'network'): static ns3::TypeId ns3::ErrorModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## error-model.h (module 'network'): bool ns3::ErrorModel::IsCorrupt(ns3::Ptr<ns3::Packet> pkt) [member function] cls.add_method('IsCorrupt', 'bool', [param('ns3::Ptr< ns3::Packet >', 'pkt')]) ## error-model.h (module 'network'): bool ns3::ErrorModel::IsEnabled() const [member function] cls.add_method('IsEnabled', 'bool', [], is_const=True) ## error-model.h (module 'network'): void ns3::ErrorModel::Reset() [member function] cls.add_method('Reset', 'void', []) ## error-model.h (module 'network'): bool ns3::ErrorModel::DoCorrupt(ns3::Ptr<ns3::Packet> arg0) [member function] cls.add_method('DoCorrupt', 'bool', [param('ns3::Ptr< ns3::Packet >', 'arg0')], is_pure_virtual=True, visibility='private', is_virtual=True) ## error-model.h (module 'network'): void ns3::ErrorModel::DoReset() [member function] cls.add_method('DoReset', 'void', [], is_pure_virtual=True, visibility='private', is_virtual=True) return def register_Ns3EthernetHeader_methods(root_module, cls): ## ethernet-header.h (module 'network'): ns3::EthernetHeader::EthernetHeader(ns3::EthernetHeader const & arg0) [copy constructor] cls.add_constructor([param('ns3::EthernetHeader const &', 'arg0')]) ## ethernet-header.h (module 'network'): ns3::EthernetHeader::EthernetHeader(bool hasPreamble) [constructor] cls.add_constructor([param('bool', 'hasPreamble')]) ## ethernet-header.h (module 'network'): ns3::EthernetHeader::EthernetHeader() [constructor] cls.add_constructor([]) ## ethernet-header.h (module 'network'): uint32_t ns3::EthernetHeader::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## ethernet-header.h (module 'network'): ns3::Mac48Address ns3::EthernetHeader::GetDestination() const [member function] cls.add_method('GetDestination', 'ns3::Mac48Address', [], is_const=True) ## ethernet-header.h (module 'network'): uint32_t ns3::EthernetHeader::GetHeaderSize() const [member function] cls.add_method('GetHeaderSize', 'uint32_t', [], is_const=True) ## ethernet-header.h (module 'network'): ns3::TypeId ns3::EthernetHeader::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## ethernet-header.h (module 'network'): uint16_t ns3::EthernetHeader::GetLengthType() const [member function] cls.add_method('GetLengthType', 'uint16_t', [], is_const=True) ## ethernet-header.h (module 'network'): ns3::ethernet_header_t ns3::EthernetHeader::GetPacketType() const [member function] cls.add_method('GetPacketType', 'ns3::ethernet_header_t', [], is_const=True) ## ethernet-header.h (module 'network'): uint64_t ns3::EthernetHeader::GetPreambleSfd() const [member function] cls.add_method('GetPreambleSfd', 'uint64_t', [], is_const=True) ## ethernet-header.h (module 'network'): uint32_t ns3::EthernetHeader::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## ethernet-header.h (module 'network'): ns3::Mac48Address ns3::EthernetHeader::GetSource() const [member function] cls.add_method('GetSource', 'ns3::Mac48Address', [], is_const=True) ## ethernet-header.h (module 'network'): static ns3::TypeId ns3::EthernetHeader::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ethernet-header.h (module 'network'): void ns3::EthernetHeader::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## ethernet-header.h (module 'network'): void ns3::EthernetHeader::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) ## ethernet-header.h (module 'network'): void ns3::EthernetHeader::SetDestination(ns3::Mac48Address destination) [member function] cls.add_method('SetDestination', 'void', [param('ns3::Mac48Address', 'destination')]) ## ethernet-header.h (module 'network'): void ns3::EthernetHeader::SetLengthType(uint16_t size) [member function] cls.add_method('SetLengthType', 'void', [param('uint16_t', 'size')]) ## ethernet-header.h (module 'network'): void ns3::EthernetHeader::SetPreambleSfd(uint64_t preambleSfd) [member function] cls.add_method('SetPreambleSfd', 'void', [param('uint64_t', 'preambleSfd')]) ## ethernet-header.h (module 'network'): void ns3::EthernetHeader::SetSource(ns3::Mac48Address source) [member function] cls.add_method('SetSource', 'void', [param('ns3::Mac48Address', 'source')]) return def register_Ns3EthernetTrailer_methods(root_module, cls): ## ethernet-trailer.h (module 'network'): ns3::EthernetTrailer::EthernetTrailer(ns3::EthernetTrailer const & arg0) [copy constructor] cls.add_constructor([param('ns3::EthernetTrailer const &', 'arg0')]) ## ethernet-trailer.h (module 'network'): ns3::EthernetTrailer::EthernetTrailer() [constructor] cls.add_constructor([]) ## ethernet-trailer.h (module 'network'): void ns3::EthernetTrailer::CalcFcs(ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('CalcFcs', 'void', [param('ns3::Ptr< ns3::Packet const >', 'p')]) ## ethernet-trailer.h (module 'network'): bool ns3::EthernetTrailer::CheckFcs(ns3::Ptr<const ns3::Packet> p) const [member function] cls.add_method('CheckFcs', 'bool', [param('ns3::Ptr< ns3::Packet const >', 'p')], is_const=True) ## ethernet-trailer.h (module 'network'): uint32_t ns3::EthernetTrailer::Deserialize(ns3::Buffer::Iterator end) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'end')], is_virtual=True) ## ethernet-trailer.h (module 'network'): void ns3::EthernetTrailer::EnableFcs(bool enable) [member function] cls.add_method('EnableFcs', 'void', [param('bool', 'enable')]) ## ethernet-trailer.h (module 'network'): uint32_t ns3::EthernetTrailer::GetFcs() [member function] cls.add_method('GetFcs', 'uint32_t', []) ## ethernet-trailer.h (module 'network'): ns3::TypeId ns3::EthernetTrailer::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## ethernet-trailer.h (module 'network'): uint32_t ns3::EthernetTrailer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## ethernet-trailer.h (module 'network'): uint32_t ns3::EthernetTrailer::GetTrailerSize() const [member function] cls.add_method('GetTrailerSize', 'uint32_t', [], is_const=True) ## ethernet-trailer.h (module 'network'): static ns3::TypeId ns3::EthernetTrailer::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ethernet-trailer.h (module 'network'): void ns3::EthernetTrailer::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## ethernet-trailer.h (module 'network'): void ns3::EthernetTrailer::Serialize(ns3::Buffer::Iterator end) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'end')], is_const=True, is_virtual=True) ## ethernet-trailer.h (module 'network'): void ns3::EthernetTrailer::SetFcs(uint32_t fcs) [member function] cls.add_method('SetFcs', 'void', [param('uint32_t', 'fcs')]) return def register_Ns3EventImpl_methods(root_module, cls): ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventImpl const &', 'arg0')]) ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor] cls.add_constructor([]) ## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function] cls.add_method('Invoke', 'void', []) ## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function] cls.add_method('IsCancelled', 'bool', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function] cls.add_method('Notify', 'void', [], is_pure_virtual=True, visibility='protected', is_virtual=True) return def register_Ns3Ipv4AddressChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')]) return def register_Ns3Ipv4AddressValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Address const &', 'value')]) return def register_Ns3Ipv4MaskChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')]) return def register_Ns3Ipv4MaskValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Mask const &', 'value')]) return def register_Ns3Ipv6AddressChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')]) return def register_Ns3Ipv6AddressValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Address', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Address const &', 'value')]) return def register_Ns3Ipv6PrefixChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')]) return def register_Ns3Ipv6PrefixValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Prefix', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Prefix const &', 'value')]) return def register_Ns3ListErrorModel_methods(root_module, cls): ## error-model.h (module 'network'): ns3::ListErrorModel::ListErrorModel(ns3::ListErrorModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::ListErrorModel const &', 'arg0')]) ## error-model.h (module 'network'): ns3::ListErrorModel::ListErrorModel() [constructor] cls.add_constructor([]) ## error-model.h (module 'network'): std::list<unsigned int, std::allocator<unsigned int> > ns3::ListErrorModel::GetList() const [member function] cls.add_method('GetList', 'std::list< unsigned int >', [], is_const=True) ## error-model.h (module 'network'): static ns3::TypeId ns3::ListErrorModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## error-model.h (module 'network'): void ns3::ListErrorModel::SetList(std::list<unsigned int, std::allocator<unsigned int> > const & packetlist) [member function] cls.add_method('SetList', 'void', [param('std::list< unsigned int > const &', 'packetlist')]) ## error-model.h (module 'network'): bool ns3::ListErrorModel::DoCorrupt(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoCorrupt', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='private', is_virtual=True) ## error-model.h (module 'network'): void ns3::ListErrorModel::DoReset() [member function] cls.add_method('DoReset', 'void', [], visibility='private', is_virtual=True) return def register_Ns3Mac48AddressChecker_methods(root_module, cls): ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')]) return def register_Ns3Mac48AddressValue_methods(root_module, cls): ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'value')]) ## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Mac48Address', [], is_const=True) ## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Mac48Address const &', 'value')]) return def register_Ns3NetDevice_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDevice const &', 'arg0')]) ## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3NixVector_methods(root_module, cls): cls.add_output_stream_operator() ## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor] cls.add_constructor([]) ## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor] cls.add_constructor([param('ns3::NixVector const &', 'o')]) ## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function] cls.add_method('AddNeighborIndex', 'void', [param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function] cls.add_method('BitCount', 'uint32_t', [param('uint32_t', 'numberOfNeighbors')], is_const=True) ## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint32_t const *', 'buffer'), param('uint32_t', 'size')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function] cls.add_method('ExtractNeighborIndex', 'uint32_t', [param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function] cls.add_method('GetRemainingBits', 'uint32_t', []) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3Node_methods(root_module, cls): ## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor] cls.add_constructor([param('ns3::Node const &', 'arg0')]) ## node.h (module 'network'): ns3::Node::Node() [constructor] cls.add_constructor([]) ## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor] cls.add_constructor([param('uint32_t', 'systemId')]) ## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function] cls.add_method('AddApplication', 'uint32_t', [param('ns3::Ptr< ns3::Application >', 'application')]) ## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddDevice', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function] cls.add_method('ChecksumEnabled', 'bool', [], is_static=True) ## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function] cls.add_method('GetApplication', 'ns3::Ptr< ns3::Application >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function] cls.add_method('GetNApplications', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('RegisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function] cls.add_method('RegisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')]) ## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('UnregisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function] cls.add_method('UnregisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')]) ## node.h (module 'network'): void ns3::Node::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## node.h (module 'network'): void ns3::Node::DoStart() [member function] cls.add_method('DoStart', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectFactoryChecker_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')]) return def register_Ns3ObjectFactoryValue_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'value')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function] cls.add_method('Get', 'ns3::ObjectFactory', [], is_const=True) ## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function] cls.add_method('Set', 'void', [param('ns3::ObjectFactory const &', 'value')]) return def register_Ns3OutputStreamWrapper_methods(root_module, cls): ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [copy constructor] cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')]) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::_Ios_Openmode filemode) [constructor] cls.add_constructor([param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode')]) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::ostream * os) [constructor] cls.add_constructor([param('std::ostream *', 'os')]) ## output-stream-wrapper.h (module 'network'): std::ostream * ns3::OutputStreamWrapper::GetStream() [member function] cls.add_method('GetStream', 'std::ostream *', []) return def register_Ns3Packet_methods(root_module, cls): cls.add_output_stream_operator() ## packet.h (module 'network'): ns3::Packet::Packet() [constructor] cls.add_constructor([]) ## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor] cls.add_constructor([param('ns3::Packet const &', 'o')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor] cls.add_constructor([param('uint32_t', 'size')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')]) ## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function] cls.add_method('AddByteTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header')]) ## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function] cls.add_method('AddPacketTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer')]) ## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::Packet >', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function] cls.add_method('EnablePrinting', 'void', [], is_static=True) ## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function] cls.add_method('FindFirstMatchingByteTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function] cls.add_method('GetByteTagIterator', 'ns3::ByteTagIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function] cls.add_method('GetNixVector', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function] cls.add_method('GetPacketTagIterator', 'ns3::PacketTagIterator', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet.h (module 'network'): uint8_t const * ns3::Packet::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], deprecated=True, is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function] cls.add_method('PeekHeader', 'uint32_t', [param('ns3::Header &', 'header')], is_const=True) ## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function] cls.add_method('PeekPacketTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function] cls.add_method('PeekTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function] cls.add_method('PrintByteTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function] cls.add_method('PrintPacketTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function] cls.add_method('RemoveAllByteTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function] cls.add_method('RemoveAllPacketTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function] cls.add_method('RemoveHeader', 'uint32_t', [param('ns3::Header &', 'header')]) ## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function] cls.add_method('RemovePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function] cls.add_method('RemoveTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> arg0) [member function] cls.add_method('SetNixVector', 'void', [param('ns3::Ptr< ns3::NixVector >', 'arg0')]) return def register_Ns3PacketSocket_methods(root_module, cls): ## packet-socket.h (module 'network'): ns3::PacketSocket::PacketSocket(ns3::PacketSocket const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketSocket const &', 'arg0')]) ## packet-socket.h (module 'network'): ns3::PacketSocket::PacketSocket() [constructor] cls.add_constructor([]) ## packet-socket.h (module 'network'): int ns3::PacketSocket::Bind() [member function] cls.add_method('Bind', 'int', [], is_virtual=True) ## packet-socket.h (module 'network'): int ns3::PacketSocket::Bind(ns3::Address const & address) [member function] cls.add_method('Bind', 'int', [param('ns3::Address const &', 'address')], is_virtual=True) ## packet-socket.h (module 'network'): int ns3::PacketSocket::Close() [member function] cls.add_method('Close', 'int', [], is_virtual=True) ## packet-socket.h (module 'network'): int ns3::PacketSocket::Connect(ns3::Address const & address) [member function] cls.add_method('Connect', 'int', [param('ns3::Address const &', 'address')], is_virtual=True) ## packet-socket.h (module 'network'): bool ns3::PacketSocket::GetAllowBroadcast() const [member function] cls.add_method('GetAllowBroadcast', 'bool', [], is_const=True, is_virtual=True) ## packet-socket.h (module 'network'): ns3::Socket::SocketErrno ns3::PacketSocket::GetErrno() const [member function] cls.add_method('GetErrno', 'ns3::Socket::SocketErrno', [], is_const=True, is_virtual=True) ## packet-socket.h (module 'network'): ns3::Ptr<ns3::Node> ns3::PacketSocket::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_const=True, is_virtual=True) ## packet-socket.h (module 'network'): uint32_t ns3::PacketSocket::GetRxAvailable() const [member function] cls.add_method('GetRxAvailable', 'uint32_t', [], is_const=True, is_virtual=True) ## packet-socket.h (module 'network'): int ns3::PacketSocket::GetSockName(ns3::Address & address) const [member function] cls.add_method('GetSockName', 'int', [param('ns3::Address &', 'address')], is_const=True, is_virtual=True) ## packet-socket.h (module 'network'): ns3::Socket::SocketType ns3::PacketSocket::GetSocketType() const [member function] cls.add_method('GetSocketType', 'ns3::Socket::SocketType', [], is_const=True, is_virtual=True) ## packet-socket.h (module 'network'): uint32_t ns3::PacketSocket::GetTxAvailable() const [member function] cls.add_method('GetTxAvailable', 'uint32_t', [], is_const=True, is_virtual=True) ## packet-socket.h (module 'network'): static ns3::TypeId ns3::PacketSocket::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## packet-socket.h (module 'network'): int ns3::PacketSocket::Listen() [member function] cls.add_method('Listen', 'int', [], is_virtual=True) ## packet-socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::PacketSocket::Recv(uint32_t maxSize, uint32_t flags) [member function] cls.add_method('Recv', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'maxSize'), param('uint32_t', 'flags')], is_virtual=True) ## packet-socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::PacketSocket::RecvFrom(uint32_t maxSize, uint32_t flags, ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'maxSize'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')], is_virtual=True) ## packet-socket.h (module 'network'): int ns3::PacketSocket::Send(ns3::Ptr<ns3::Packet> p, uint32_t flags) [member function] cls.add_method('Send', 'int', [param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags')], is_virtual=True) ## packet-socket.h (module 'network'): int ns3::PacketSocket::SendTo(ns3::Ptr<ns3::Packet> p, uint32_t flags, ns3::Address const & toAddress) [member function] cls.add_method('SendTo', 'int', [param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags'), param('ns3::Address const &', 'toAddress')], is_virtual=True) ## packet-socket.h (module 'network'): bool ns3::PacketSocket::SetAllowBroadcast(bool allowBroadcast) [member function] cls.add_method('SetAllowBroadcast', 'bool', [param('bool', 'allowBroadcast')], is_virtual=True) ## packet-socket.h (module 'network'): void ns3::PacketSocket::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## packet-socket.h (module 'network'): int ns3::PacketSocket::ShutdownRecv() [member function] cls.add_method('ShutdownRecv', 'int', [], is_virtual=True) ## packet-socket.h (module 'network'): int ns3::PacketSocket::ShutdownSend() [member function] cls.add_method('ShutdownSend', 'int', [], is_virtual=True) ## packet-socket.h (module 'network'): void ns3::PacketSocket::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True) return def register_Ns3PacketSocketFactory_methods(root_module, cls): ## packet-socket-factory.h (module 'network'): ns3::PacketSocketFactory::PacketSocketFactory(ns3::PacketSocketFactory const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketSocketFactory const &', 'arg0')]) ## packet-socket-factory.h (module 'network'): ns3::PacketSocketFactory::PacketSocketFactory() [constructor] cls.add_constructor([]) ## packet-socket-factory.h (module 'network'): ns3::Ptr<ns3::Socket> ns3::PacketSocketFactory::CreateSocket() [member function] cls.add_method('CreateSocket', 'ns3::Ptr< ns3::Socket >', [], is_virtual=True) ## packet-socket-factory.h (module 'network'): static ns3::TypeId ns3::PacketSocketFactory::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3PbbAddressBlock_methods(root_module, cls): cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('!=') ## packetbb.h (module 'network'): ns3::PbbAddressBlock::PbbAddressBlock(ns3::PbbAddressBlock const & arg0) [copy constructor] cls.add_constructor([param('ns3::PbbAddressBlock const &', 'arg0')]) ## packetbb.h (module 'network'): ns3::PbbAddressBlock::PbbAddressBlock() [constructor] cls.add_constructor([]) ## packetbb.h (module 'network'): ns3::Address ns3::PbbAddressBlock::AddressBack() const [member function] cls.add_method('AddressBack', 'ns3::Address', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Address> ns3::PbbAddressBlock::AddressBegin() [member function] cls.add_method('AddressBegin', 'std::_List_iterator< ns3::Address >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Address> ns3::PbbAddressBlock::AddressBegin() const [member function] cls.add_method('AddressBegin', 'std::_List_const_iterator< ns3::Address >', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::AddressClear() [member function] cls.add_method('AddressClear', 'void', []) ## packetbb.h (module 'network'): bool ns3::PbbAddressBlock::AddressEmpty() const [member function] cls.add_method('AddressEmpty', 'bool', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Address> ns3::PbbAddressBlock::AddressEnd() [member function] cls.add_method('AddressEnd', 'std::_List_iterator< ns3::Address >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Address> ns3::PbbAddressBlock::AddressEnd() const [member function] cls.add_method('AddressEnd', 'std::_List_const_iterator< ns3::Address >', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Address> ns3::PbbAddressBlock::AddressErase(std::_List_iterator<ns3::Address> position) [member function] cls.add_method('AddressErase', 'std::_List_iterator< ns3::Address >', [param('std::_List_iterator< ns3::Address >', 'position')]) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Address> ns3::PbbAddressBlock::AddressErase(std::_List_iterator<ns3::Address> first, std::_List_iterator<ns3::Address> last) [member function] cls.add_method('AddressErase', 'std::_List_iterator< ns3::Address >', [param('std::_List_iterator< ns3::Address >', 'first'), param('std::_List_iterator< ns3::Address >', 'last')]) ## packetbb.h (module 'network'): ns3::Address ns3::PbbAddressBlock::AddressFront() const [member function] cls.add_method('AddressFront', 'ns3::Address', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Address> ns3::PbbAddressBlock::AddressInsert(std::_List_iterator<ns3::Address> position, ns3::Address const value) [member function] cls.add_method('AddressInsert', 'std::_List_iterator< ns3::Address >', [param('std::_List_iterator< ns3::Address >', 'position'), param('ns3::Address const', 'value')]) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::AddressPopBack() [member function] cls.add_method('AddressPopBack', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::AddressPopFront() [member function] cls.add_method('AddressPopFront', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::AddressPushBack(ns3::Address address) [member function] cls.add_method('AddressPushBack', 'void', [param('ns3::Address', 'address')]) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::AddressPushFront(ns3::Address address) [member function] cls.add_method('AddressPushFront', 'void', [param('ns3::Address', 'address')]) ## packetbb.h (module 'network'): int ns3::PbbAddressBlock::AddressSize() const [member function] cls.add_method('AddressSize', 'int', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::Deserialize(ns3::Buffer::Iterator & start) [member function] cls.add_method('Deserialize', 'void', [param('ns3::Buffer::Iterator &', 'start')]) ## packetbb.h (module 'network'): uint32_t ns3::PbbAddressBlock::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packetbb.h (module 'network'): uint8_t ns3::PbbAddressBlock::PrefixBack() const [member function] cls.add_method('PrefixBack', 'uint8_t', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<unsigned char> ns3::PbbAddressBlock::PrefixBegin() [member function] cls.add_method('PrefixBegin', 'std::_List_iterator< unsigned char >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<unsigned char> ns3::PbbAddressBlock::PrefixBegin() const [member function] cls.add_method('PrefixBegin', 'std::_List_const_iterator< unsigned char >', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::PrefixClear() [member function] cls.add_method('PrefixClear', 'void', []) ## packetbb.h (module 'network'): bool ns3::PbbAddressBlock::PrefixEmpty() const [member function] cls.add_method('PrefixEmpty', 'bool', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<unsigned char> ns3::PbbAddressBlock::PrefixEnd() [member function] cls.add_method('PrefixEnd', 'std::_List_iterator< unsigned char >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<unsigned char> ns3::PbbAddressBlock::PrefixEnd() const [member function] cls.add_method('PrefixEnd', 'std::_List_const_iterator< unsigned char >', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<unsigned char> ns3::PbbAddressBlock::PrefixErase(std::_List_iterator<unsigned char> position) [member function] cls.add_method('PrefixErase', 'std::_List_iterator< unsigned char >', [param('std::_List_iterator< unsigned char >', 'position')]) ## packetbb.h (module 'network'): std::_List_iterator<unsigned char> ns3::PbbAddressBlock::PrefixErase(std::_List_iterator<unsigned char> first, std::_List_iterator<unsigned char> last) [member function] cls.add_method('PrefixErase', 'std::_List_iterator< unsigned char >', [param('std::_List_iterator< unsigned char >', 'first'), param('std::_List_iterator< unsigned char >', 'last')]) ## packetbb.h (module 'network'): uint8_t ns3::PbbAddressBlock::PrefixFront() const [member function] cls.add_method('PrefixFront', 'uint8_t', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<unsigned char> ns3::PbbAddressBlock::PrefixInsert(std::_List_iterator<unsigned char> position, uint8_t const value) [member function] cls.add_method('PrefixInsert', 'std::_List_iterator< unsigned char >', [param('std::_List_iterator< unsigned char >', 'position'), param('uint8_t const', 'value')]) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::PrefixPopBack() [member function] cls.add_method('PrefixPopBack', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::PrefixPopFront() [member function] cls.add_method('PrefixPopFront', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::PrefixPushBack(uint8_t prefix) [member function] cls.add_method('PrefixPushBack', 'void', [param('uint8_t', 'prefix')]) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::PrefixPushFront(uint8_t prefix) [member function] cls.add_method('PrefixPushFront', 'void', [param('uint8_t', 'prefix')]) ## packetbb.h (module 'network'): int ns3::PbbAddressBlock::PrefixSize() const [member function] cls.add_method('PrefixSize', 'int', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::Print(std::ostream & os, int level) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os'), param('int', 'level')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::Serialize(ns3::Buffer::Iterator & start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator &', 'start')], is_const=True) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressTlv> ns3::PbbAddressBlock::TlvBack() [member function] cls.add_method('TlvBack', 'ns3::Ptr< ns3::PbbAddressTlv >', []) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressTlv> const ns3::PbbAddressBlock::TlvBack() const [member function] cls.add_method('TlvBack', 'ns3::Ptr< ns3::PbbAddressTlv > const', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressBlock::TlvBegin() [member function] cls.add_method('TlvBegin', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressBlock::TlvBegin() const [member function] cls.add_method('TlvBegin', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::TlvClear() [member function] cls.add_method('TlvClear', 'void', []) ## packetbb.h (module 'network'): bool ns3::PbbAddressBlock::TlvEmpty() const [member function] cls.add_method('TlvEmpty', 'bool', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressBlock::TlvEnd() [member function] cls.add_method('TlvEnd', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressBlock::TlvEnd() const [member function] cls.add_method('TlvEnd', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressBlock::TlvErase(std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > position) [member function] cls.add_method('TlvErase', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', 'position')]) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressBlock::TlvErase(std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > first, std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > last) [member function] cls.add_method('TlvErase', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', 'first'), param('std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', 'last')]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressTlv> ns3::PbbAddressBlock::TlvFront() [member function] cls.add_method('TlvFront', 'ns3::Ptr< ns3::PbbAddressTlv >', []) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressTlv> const ns3::PbbAddressBlock::TlvFront() const [member function] cls.add_method('TlvFront', 'ns3::Ptr< ns3::PbbAddressTlv > const', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressBlock::TlvInsert(std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > position, ns3::Ptr<ns3::PbbTlv> const value) [member function] cls.add_method('TlvInsert', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', 'position'), param('ns3::Ptr< ns3::PbbTlv > const', 'value')]) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::TlvPopBack() [member function] cls.add_method('TlvPopBack', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::TlvPopFront() [member function] cls.add_method('TlvPopFront', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::TlvPushBack(ns3::Ptr<ns3::PbbAddressTlv> address) [member function] cls.add_method('TlvPushBack', 'void', [param('ns3::Ptr< ns3::PbbAddressTlv >', 'address')]) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::TlvPushFront(ns3::Ptr<ns3::PbbAddressTlv> address) [member function] cls.add_method('TlvPushFront', 'void', [param('ns3::Ptr< ns3::PbbAddressTlv >', 'address')]) ## packetbb.h (module 'network'): int ns3::PbbAddressBlock::TlvSize() const [member function] cls.add_method('TlvSize', 'int', [], is_const=True) ## packetbb.h (module 'network'): ns3::Address ns3::PbbAddressBlock::DeserializeAddress(uint8_t * buffer) const [member function] cls.add_method('DeserializeAddress', 'ns3::Address', [param('uint8_t *', 'buffer')], is_pure_virtual=True, is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): uint8_t ns3::PbbAddressBlock::GetAddressLength() const [member function] cls.add_method('GetAddressLength', 'uint8_t', [], is_pure_virtual=True, is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::PrintAddress(std::ostream & os, std::_List_const_iterator<ns3::Address> iter) const [member function] cls.add_method('PrintAddress', 'void', [param('std::ostream &', 'os'), param('std::_List_const_iterator< ns3::Address >', 'iter')], is_pure_virtual=True, is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::SerializeAddress(uint8_t * buffer, std::_List_const_iterator<ns3::Address> iter) const [member function] cls.add_method('SerializeAddress', 'void', [param('uint8_t *', 'buffer'), param('std::_List_const_iterator< ns3::Address >', 'iter')], is_pure_virtual=True, is_const=True, visibility='protected', is_virtual=True) return def register_Ns3PbbAddressBlockIpv4_methods(root_module, cls): ## packetbb.h (module 'network'): ns3::PbbAddressBlockIpv4::PbbAddressBlockIpv4(ns3::PbbAddressBlockIpv4 const & arg0) [copy constructor] cls.add_constructor([param('ns3::PbbAddressBlockIpv4 const &', 'arg0')]) ## packetbb.h (module 'network'): ns3::PbbAddressBlockIpv4::PbbAddressBlockIpv4() [constructor] cls.add_constructor([]) ## packetbb.h (module 'network'): ns3::Address ns3::PbbAddressBlockIpv4::DeserializeAddress(uint8_t * buffer) const [member function] cls.add_method('DeserializeAddress', 'ns3::Address', [param('uint8_t *', 'buffer')], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): uint8_t ns3::PbbAddressBlockIpv4::GetAddressLength() const [member function] cls.add_method('GetAddressLength', 'uint8_t', [], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlockIpv4::PrintAddress(std::ostream & os, std::_List_const_iterator<ns3::Address> iter) const [member function] cls.add_method('PrintAddress', 'void', [param('std::ostream &', 'os'), param('std::_List_const_iterator< ns3::Address >', 'iter')], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlockIpv4::SerializeAddress(uint8_t * buffer, std::_List_const_iterator<ns3::Address> iter) const [member function] cls.add_method('SerializeAddress', 'void', [param('uint8_t *', 'buffer'), param('std::_List_const_iterator< ns3::Address >', 'iter')], is_const=True, visibility='protected', is_virtual=True) return def register_Ns3PbbAddressBlockIpv6_methods(root_module, cls): ## packetbb.h (module 'network'): ns3::PbbAddressBlockIpv6::PbbAddressBlockIpv6(ns3::PbbAddressBlockIpv6 const & arg0) [copy constructor] cls.add_constructor([param('ns3::PbbAddressBlockIpv6 const &', 'arg0')]) ## packetbb.h (module 'network'): ns3::PbbAddressBlockIpv6::PbbAddressBlockIpv6() [constructor] cls.add_constructor([]) ## packetbb.h (module 'network'): ns3::Address ns3::PbbAddressBlockIpv6::DeserializeAddress(uint8_t * buffer) const [member function] cls.add_method('DeserializeAddress', 'ns3::Address', [param('uint8_t *', 'buffer')], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): uint8_t ns3::PbbAddressBlockIpv6::GetAddressLength() const [member function] cls.add_method('GetAddressLength', 'uint8_t', [], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlockIpv6::PrintAddress(std::ostream & os, std::_List_const_iterator<ns3::Address> iter) const [member function] cls.add_method('PrintAddress', 'void', [param('std::ostream &', 'os'), param('std::_List_const_iterator< ns3::Address >', 'iter')], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlockIpv6::SerializeAddress(uint8_t * buffer, std::_List_const_iterator<ns3::Address> iter) const [member function] cls.add_method('SerializeAddress', 'void', [param('uint8_t *', 'buffer'), param('std::_List_const_iterator< ns3::Address >', 'iter')], is_const=True, visibility='protected', is_virtual=True) return def register_Ns3PbbMessage_methods(root_module, cls): cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('!=') ## packetbb.h (module 'network'): ns3::PbbMessage::PbbMessage(ns3::PbbMessage const & arg0) [copy constructor] cls.add_constructor([param('ns3::PbbMessage const &', 'arg0')]) ## packetbb.h (module 'network'): ns3::PbbMessage::PbbMessage() [constructor] cls.add_constructor([]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressBlock> ns3::PbbMessage::AddressBlockBack() [member function] cls.add_method('AddressBlockBack', 'ns3::Ptr< ns3::PbbAddressBlock >', []) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressBlock> const ns3::PbbMessage::AddressBlockBack() const [member function] cls.add_method('AddressBlockBack', 'ns3::Ptr< ns3::PbbAddressBlock > const', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressBlock> > ns3::PbbMessage::AddressBlockBegin() [member function] cls.add_method('AddressBlockBegin', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressBlock > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbAddressBlock> > ns3::PbbMessage::AddressBlockBegin() const [member function] cls.add_method('AddressBlockBegin', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbAddressBlock > >', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbMessage::AddressBlockClear() [member function] cls.add_method('AddressBlockClear', 'void', []) ## packetbb.h (module 'network'): bool ns3::PbbMessage::AddressBlockEmpty() const [member function] cls.add_method('AddressBlockEmpty', 'bool', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressBlock> > ns3::PbbMessage::AddressBlockEnd() [member function] cls.add_method('AddressBlockEnd', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressBlock > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbAddressBlock> > ns3::PbbMessage::AddressBlockEnd() const [member function] cls.add_method('AddressBlockEnd', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbAddressBlock > >', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressBlock> > ns3::PbbMessage::AddressBlockErase(std::_List_iterator<ns3::Ptr<ns3::PbbAddressBlock> > position) [member function] cls.add_method('AddressBlockErase', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressBlock > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbAddressBlock > >', 'position')]) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressBlock> > ns3::PbbMessage::AddressBlockErase(std::_List_iterator<ns3::Ptr<ns3::PbbAddressBlock> > first, std::_List_iterator<ns3::Ptr<ns3::PbbAddressBlock> > last) [member function] cls.add_method('AddressBlockErase', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressBlock > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbAddressBlock > >', 'first'), param('std::_List_iterator< ns3::Ptr< ns3::PbbAddressBlock > >', 'last')]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressBlock> ns3::PbbMessage::AddressBlockFront() [member function] cls.add_method('AddressBlockFront', 'ns3::Ptr< ns3::PbbAddressBlock >', []) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressBlock> const ns3::PbbMessage::AddressBlockFront() const [member function] cls.add_method('AddressBlockFront', 'ns3::Ptr< ns3::PbbAddressBlock > const', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbMessage::AddressBlockPopBack() [member function] cls.add_method('AddressBlockPopBack', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbMessage::AddressBlockPopFront() [member function] cls.add_method('AddressBlockPopFront', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbMessage::AddressBlockPushBack(ns3::Ptr<ns3::PbbAddressBlock> block) [member function] cls.add_method('AddressBlockPushBack', 'void', [param('ns3::Ptr< ns3::PbbAddressBlock >', 'block')]) ## packetbb.h (module 'network'): void ns3::PbbMessage::AddressBlockPushFront(ns3::Ptr<ns3::PbbAddressBlock> block) [member function] cls.add_method('AddressBlockPushFront', 'void', [param('ns3::Ptr< ns3::PbbAddressBlock >', 'block')]) ## packetbb.h (module 'network'): int ns3::PbbMessage::AddressBlockSize() const [member function] cls.add_method('AddressBlockSize', 'int', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbMessage::Deserialize(ns3::Buffer::Iterator & start) [member function] cls.add_method('Deserialize', 'void', [param('ns3::Buffer::Iterator &', 'start')]) ## packetbb.h (module 'network'): static ns3::Ptr<ns3::PbbMessage> ns3::PbbMessage::DeserializeMessage(ns3::Buffer::Iterator & start) [member function] cls.add_method('DeserializeMessage', 'ns3::Ptr< ns3::PbbMessage >', [param('ns3::Buffer::Iterator &', 'start')], is_static=True) ## packetbb.h (module 'network'): uint8_t ns3::PbbMessage::GetHopCount() const [member function] cls.add_method('GetHopCount', 'uint8_t', [], is_const=True) ## packetbb.h (module 'network'): uint8_t ns3::PbbMessage::GetHopLimit() const [member function] cls.add_method('GetHopLimit', 'uint8_t', [], is_const=True) ## packetbb.h (module 'network'): ns3::Address ns3::PbbMessage::GetOriginatorAddress() const [member function] cls.add_method('GetOriginatorAddress', 'ns3::Address', [], is_const=True) ## packetbb.h (module 'network'): uint16_t ns3::PbbMessage::GetSequenceNumber() const [member function] cls.add_method('GetSequenceNumber', 'uint16_t', [], is_const=True) ## packetbb.h (module 'network'): uint32_t ns3::PbbMessage::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packetbb.h (module 'network'): uint8_t ns3::PbbMessage::GetType() const [member function] cls.add_method('GetType', 'uint8_t', [], is_const=True) ## packetbb.h (module 'network'): bool ns3::PbbMessage::HasHopCount() const [member function] cls.add_method('HasHopCount', 'bool', [], is_const=True) ## packetbb.h (module 'network'): bool ns3::PbbMessage::HasHopLimit() const [member function] cls.add_method('HasHopLimit', 'bool', [], is_const=True) ## packetbb.h (module 'network'): bool ns3::PbbMessage::HasOriginatorAddress() const [member function] cls.add_method('HasOriginatorAddress', 'bool', [], is_const=True) ## packetbb.h (module 'network'): bool ns3::PbbMessage::HasSequenceNumber() const [member function] cls.add_method('HasSequenceNumber', 'bool', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbMessage::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbMessage::Print(std::ostream & os, int level) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os'), param('int', 'level')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbMessage::Serialize(ns3::Buffer::Iterator & start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator &', 'start')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbMessage::SetHopCount(uint8_t hopcount) [member function] cls.add_method('SetHopCount', 'void', [param('uint8_t', 'hopcount')]) ## packetbb.h (module 'network'): void ns3::PbbMessage::SetHopLimit(uint8_t hoplimit) [member function] cls.add_method('SetHopLimit', 'void', [param('uint8_t', 'hoplimit')]) ## packetbb.h (module 'network'): void ns3::PbbMessage::SetOriginatorAddress(ns3::Address address) [member function] cls.add_method('SetOriginatorAddress', 'void', [param('ns3::Address', 'address')]) ## packetbb.h (module 'network'): void ns3::PbbMessage::SetSequenceNumber(uint16_t seqnum) [member function] cls.add_method('SetSequenceNumber', 'void', [param('uint16_t', 'seqnum')]) ## packetbb.h (module 'network'): void ns3::PbbMessage::SetType(uint8_t type) [member function] cls.add_method('SetType', 'void', [param('uint8_t', 'type')]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbTlv> ns3::PbbMessage::TlvBack() [member function] cls.add_method('TlvBack', 'ns3::Ptr< ns3::PbbTlv >', []) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbTlv> const ns3::PbbMessage::TlvBack() const [member function] cls.add_method('TlvBack', 'ns3::Ptr< ns3::PbbTlv > const', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbMessage::TlvBegin() [member function] cls.add_method('TlvBegin', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbMessage::TlvBegin() const [member function] cls.add_method('TlvBegin', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbTlv > >', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbMessage::TlvClear() [member function] cls.add_method('TlvClear', 'void', []) ## packetbb.h (module 'network'): bool ns3::PbbMessage::TlvEmpty() const [member function] cls.add_method('TlvEmpty', 'bool', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbMessage::TlvEnd() [member function] cls.add_method('TlvEnd', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbMessage::TlvEnd() const [member function] cls.add_method('TlvEnd', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbTlv > >', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbMessage::TlvErase(std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > position) [member function] cls.add_method('TlvErase', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', 'position')]) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbMessage::TlvErase(std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > first, std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > last) [member function] cls.add_method('TlvErase', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', 'first'), param('std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', 'last')]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbTlv> ns3::PbbMessage::TlvFront() [member function] cls.add_method('TlvFront', 'ns3::Ptr< ns3::PbbTlv >', []) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbTlv> const ns3::PbbMessage::TlvFront() const [member function] cls.add_method('TlvFront', 'ns3::Ptr< ns3::PbbTlv > const', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbMessage::TlvPopBack() [member function] cls.add_method('TlvPopBack', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbMessage::TlvPopFront() [member function] cls.add_method('TlvPopFront', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbMessage::TlvPushBack(ns3::Ptr<ns3::PbbTlv> tlv) [member function] cls.add_method('TlvPushBack', 'void', [param('ns3::Ptr< ns3::PbbTlv >', 'tlv')]) ## packetbb.h (module 'network'): void ns3::PbbMessage::TlvPushFront(ns3::Ptr<ns3::PbbTlv> tlv) [member function] cls.add_method('TlvPushFront', 'void', [param('ns3::Ptr< ns3::PbbTlv >', 'tlv')]) ## packetbb.h (module 'network'): int ns3::PbbMessage::TlvSize() const [member function] cls.add_method('TlvSize', 'int', [], is_const=True) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressBlock> ns3::PbbMessage::AddressBlockDeserialize(ns3::Buffer::Iterator & start) const [member function] cls.add_method('AddressBlockDeserialize', 'ns3::Ptr< ns3::PbbAddressBlock >', [param('ns3::Buffer::Iterator &', 'start')], is_pure_virtual=True, is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): ns3::Address ns3::PbbMessage::DeserializeOriginatorAddress(ns3::Buffer::Iterator & start) const [member function] cls.add_method('DeserializeOriginatorAddress', 'ns3::Address', [param('ns3::Buffer::Iterator &', 'start')], is_pure_virtual=True, is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): ns3::PbbAddressLength ns3::PbbMessage::GetAddressLength() const [member function] cls.add_method('GetAddressLength', 'ns3::PbbAddressLength', [], is_pure_virtual=True, is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbMessage::PrintOriginatorAddress(std::ostream & os) const [member function] cls.add_method('PrintOriginatorAddress', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbMessage::SerializeOriginatorAddress(ns3::Buffer::Iterator & start) const [member function] cls.add_method('SerializeOriginatorAddress', 'void', [param('ns3::Buffer::Iterator &', 'start')], is_pure_virtual=True, is_const=True, visibility='protected', is_virtual=True) return def register_Ns3PbbMessageIpv4_methods(root_module, cls): ## packetbb.h (module 'network'): ns3::PbbMessageIpv4::PbbMessageIpv4(ns3::PbbMessageIpv4 const & arg0) [copy constructor] cls.add_constructor([param('ns3::PbbMessageIpv4 const &', 'arg0')]) ## packetbb.h (module 'network'): ns3::PbbMessageIpv4::PbbMessageIpv4() [constructor] cls.add_constructor([]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressBlock> ns3::PbbMessageIpv4::AddressBlockDeserialize(ns3::Buffer::Iterator & start) const [member function] cls.add_method('AddressBlockDeserialize', 'ns3::Ptr< ns3::PbbAddressBlock >', [param('ns3::Buffer::Iterator &', 'start')], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): ns3::Address ns3::PbbMessageIpv4::DeserializeOriginatorAddress(ns3::Buffer::Iterator & start) const [member function] cls.add_method('DeserializeOriginatorAddress', 'ns3::Address', [param('ns3::Buffer::Iterator &', 'start')], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): ns3::PbbAddressLength ns3::PbbMessageIpv4::GetAddressLength() const [member function] cls.add_method('GetAddressLength', 'ns3::PbbAddressLength', [], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbMessageIpv4::PrintOriginatorAddress(std::ostream & os) const [member function] cls.add_method('PrintOriginatorAddress', 'void', [param('std::ostream &', 'os')], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbMessageIpv4::SerializeOriginatorAddress(ns3::Buffer::Iterator & start) const [member function] cls.add_method('SerializeOriginatorAddress', 'void', [param('ns3::Buffer::Iterator &', 'start')], is_const=True, visibility='protected', is_virtual=True) return def register_Ns3PbbMessageIpv6_methods(root_module, cls): ## packetbb.h (module 'network'): ns3::PbbMessageIpv6::PbbMessageIpv6(ns3::PbbMessageIpv6 const & arg0) [copy constructor] cls.add_constructor([param('ns3::PbbMessageIpv6 const &', 'arg0')]) ## packetbb.h (module 'network'): ns3::PbbMessageIpv6::PbbMessageIpv6() [constructor] cls.add_constructor([]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressBlock> ns3::PbbMessageIpv6::AddressBlockDeserialize(ns3::Buffer::Iterator & start) const [member function] cls.add_method('AddressBlockDeserialize', 'ns3::Ptr< ns3::PbbAddressBlock >', [param('ns3::Buffer::Iterator &', 'start')], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): ns3::Address ns3::PbbMessageIpv6::DeserializeOriginatorAddress(ns3::Buffer::Iterator & start) const [member function] cls.add_method('DeserializeOriginatorAddress', 'ns3::Address', [param('ns3::Buffer::Iterator &', 'start')], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): ns3::PbbAddressLength ns3::PbbMessageIpv6::GetAddressLength() const [member function] cls.add_method('GetAddressLength', 'ns3::PbbAddressLength', [], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbMessageIpv6::PrintOriginatorAddress(std::ostream & os) const [member function] cls.add_method('PrintOriginatorAddress', 'void', [param('std::ostream &', 'os')], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbMessageIpv6::SerializeOriginatorAddress(ns3::Buffer::Iterator & start) const [member function] cls.add_method('SerializeOriginatorAddress', 'void', [param('ns3::Buffer::Iterator &', 'start')], is_const=True, visibility='protected', is_virtual=True) return def register_Ns3PbbPacket_methods(root_module, cls): cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('!=') ## packetbb.h (module 'network'): ns3::PbbPacket::PbbPacket(ns3::PbbPacket const & arg0) [copy constructor] cls.add_constructor([param('ns3::PbbPacket const &', 'arg0')]) ## packetbb.h (module 'network'): ns3::PbbPacket::PbbPacket() [constructor] cls.add_constructor([]) ## packetbb.h (module 'network'): uint32_t ns3::PbbPacket::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbPacket::Erase(std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > position) [member function] cls.add_method('Erase', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', 'position')]) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbPacket::Erase(std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > first, std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > last) [member function] cls.add_method('Erase', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', 'first'), param('std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', 'last')]) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbMessage> > ns3::PbbPacket::Erase(std::_List_iterator<ns3::Ptr<ns3::PbbMessage> > position) [member function] cls.add_method('Erase', 'std::_List_iterator< ns3::Ptr< ns3::PbbMessage > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbMessage > >', 'position')]) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbMessage> > ns3::PbbPacket::Erase(std::_List_iterator<ns3::Ptr<ns3::PbbMessage> > first, std::_List_iterator<ns3::Ptr<ns3::PbbMessage> > last) [member function] cls.add_method('Erase', 'std::_List_iterator< ns3::Ptr< ns3::PbbMessage > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbMessage > >', 'first'), param('std::_List_iterator< ns3::Ptr< ns3::PbbMessage > >', 'last')]) ## packetbb.h (module 'network'): ns3::TypeId ns3::PbbPacket::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## packetbb.h (module 'network'): uint16_t ns3::PbbPacket::GetSequenceNumber() const [member function] cls.add_method('GetSequenceNumber', 'uint16_t', [], is_const=True) ## packetbb.h (module 'network'): uint32_t ns3::PbbPacket::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## packetbb.h (module 'network'): static ns3::TypeId ns3::PbbPacket::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## packetbb.h (module 'network'): uint8_t ns3::PbbPacket::GetVersion() const [member function] cls.add_method('GetVersion', 'uint8_t', [], is_const=True) ## packetbb.h (module 'network'): bool ns3::PbbPacket::HasSequenceNumber() const [member function] cls.add_method('HasSequenceNumber', 'bool', [], is_const=True) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbMessage> ns3::PbbPacket::MessageBack() [member function] cls.add_method('MessageBack', 'ns3::Ptr< ns3::PbbMessage >', []) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbMessage> const ns3::PbbPacket::MessageBack() const [member function] cls.add_method('MessageBack', 'ns3::Ptr< ns3::PbbMessage > const', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbMessage> > ns3::PbbPacket::MessageBegin() [member function] cls.add_method('MessageBegin', 'std::_List_iterator< ns3::Ptr< ns3::PbbMessage > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbMessage> > ns3::PbbPacket::MessageBegin() const [member function] cls.add_method('MessageBegin', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbMessage > >', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbPacket::MessageClear() [member function] cls.add_method('MessageClear', 'void', []) ## packetbb.h (module 'network'): bool ns3::PbbPacket::MessageEmpty() const [member function] cls.add_method('MessageEmpty', 'bool', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbMessage> > ns3::PbbPacket::MessageEnd() [member function] cls.add_method('MessageEnd', 'std::_List_iterator< ns3::Ptr< ns3::PbbMessage > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbMessage> > ns3::PbbPacket::MessageEnd() const [member function] cls.add_method('MessageEnd', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbMessage > >', [], is_const=True) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbMessage> ns3::PbbPacket::MessageFront() [member function] cls.add_method('MessageFront', 'ns3::Ptr< ns3::PbbMessage >', []) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbMessage> const ns3::PbbPacket::MessageFront() const [member function] cls.add_method('MessageFront', 'ns3::Ptr< ns3::PbbMessage > const', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbPacket::MessagePopBack() [member function] cls.add_method('MessagePopBack', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbPacket::MessagePopFront() [member function] cls.add_method('MessagePopFront', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbPacket::MessagePushBack(ns3::Ptr<ns3::PbbMessage> message) [member function] cls.add_method('MessagePushBack', 'void', [param('ns3::Ptr< ns3::PbbMessage >', 'message')]) ## packetbb.h (module 'network'): void ns3::PbbPacket::MessagePushFront(ns3::Ptr<ns3::PbbMessage> message) [member function] cls.add_method('MessagePushFront', 'void', [param('ns3::Ptr< ns3::PbbMessage >', 'message')]) ## packetbb.h (module 'network'): int ns3::PbbPacket::MessageSize() const [member function] cls.add_method('MessageSize', 'int', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbPacket::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbPacket::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbPacket::SetSequenceNumber(uint16_t number) [member function] cls.add_method('SetSequenceNumber', 'void', [param('uint16_t', 'number')]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbTlv> ns3::PbbPacket::TlvBack() [member function] cls.add_method('TlvBack', 'ns3::Ptr< ns3::PbbTlv >', []) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbTlv> const ns3::PbbPacket::TlvBack() const [member function] cls.add_method('TlvBack', 'ns3::Ptr< ns3::PbbTlv > const', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbPacket::TlvBegin() [member function] cls.add_method('TlvBegin', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbPacket::TlvBegin() const [member function] cls.add_method('TlvBegin', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbTlv > >', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbPacket::TlvClear() [member function] cls.add_method('TlvClear', 'void', []) ## packetbb.h (module 'network'): bool ns3::PbbPacket::TlvEmpty() const [member function] cls.add_method('TlvEmpty', 'bool', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbPacket::TlvEnd() [member function] cls.add_method('TlvEnd', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbPacket::TlvEnd() const [member function] cls.add_method('TlvEnd', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbTlv > >', [], is_const=True) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbTlv> ns3::PbbPacket::TlvFront() [member function] cls.add_method('TlvFront', 'ns3::Ptr< ns3::PbbTlv >', []) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbTlv> const ns3::PbbPacket::TlvFront() const [member function] cls.add_method('TlvFront', 'ns3::Ptr< ns3::PbbTlv > const', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbPacket::TlvPopBack() [member function] cls.add_method('TlvPopBack', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbPacket::TlvPopFront() [member function] cls.add_method('TlvPopFront', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbPacket::TlvPushBack(ns3::Ptr<ns3::PbbTlv> tlv) [member function] cls.add_method('TlvPushBack', 'void', [param('ns3::Ptr< ns3::PbbTlv >', 'tlv')]) ## packetbb.h (module 'network'): void ns3::PbbPacket::TlvPushFront(ns3::Ptr<ns3::PbbTlv> tlv) [member function] cls.add_method('TlvPushFront', 'void', [param('ns3::Ptr< ns3::PbbTlv >', 'tlv')]) ## packetbb.h (module 'network'): int ns3::PbbPacket::TlvSize() const [member function] cls.add_method('TlvSize', 'int', [], is_const=True) return def register_Ns3PbbTlv_methods(root_module, cls): cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('!=') ## packetbb.h (module 'network'): ns3::PbbTlv::PbbTlv(ns3::PbbTlv const & arg0) [copy constructor] cls.add_constructor([param('ns3::PbbTlv const &', 'arg0')]) ## packetbb.h (module 'network'): ns3::PbbTlv::PbbTlv() [constructor] cls.add_constructor([]) ## packetbb.h (module 'network'): void ns3::PbbTlv::Deserialize(ns3::Buffer::Iterator & start) [member function] cls.add_method('Deserialize', 'void', [param('ns3::Buffer::Iterator &', 'start')]) ## packetbb.h (module 'network'): uint32_t ns3::PbbTlv::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packetbb.h (module 'network'): uint8_t ns3::PbbTlv::GetType() const [member function] cls.add_method('GetType', 'uint8_t', [], is_const=True) ## packetbb.h (module 'network'): uint8_t ns3::PbbTlv::GetTypeExt() const [member function] cls.add_method('GetTypeExt', 'uint8_t', [], is_const=True) ## packetbb.h (module 'network'): ns3::Buffer ns3::PbbTlv::GetValue() const [member function] cls.add_method('GetValue', 'ns3::Buffer', [], is_const=True) ## packetbb.h (module 'network'): bool ns3::PbbTlv::HasTypeExt() const [member function] cls.add_method('HasTypeExt', 'bool', [], is_const=True) ## packetbb.h (module 'network'): bool ns3::PbbTlv::HasValue() const [member function] cls.add_method('HasValue', 'bool', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbTlv::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbTlv::Print(std::ostream & os, int level) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os'), param('int', 'level')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbTlv::Serialize(ns3::Buffer::Iterator & start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator &', 'start')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbTlv::SetType(uint8_t type) [member function] cls.add_method('SetType', 'void', [param('uint8_t', 'type')]) ## packetbb.h (module 'network'): void ns3::PbbTlv::SetTypeExt(uint8_t type) [member function] cls.add_method('SetTypeExt', 'void', [param('uint8_t', 'type')]) ## packetbb.h (module 'network'): void ns3::PbbTlv::SetValue(ns3::Buffer start) [member function] cls.add_method('SetValue', 'void', [param('ns3::Buffer', 'start')]) ## packetbb.h (module 'network'): void ns3::PbbTlv::SetValue(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('SetValue', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packetbb.h (module 'network'): uint8_t ns3::PbbTlv::GetIndexStart() const [member function] cls.add_method('GetIndexStart', 'uint8_t', [], is_const=True, visibility='protected') ## packetbb.h (module 'network'): uint8_t ns3::PbbTlv::GetIndexStop() const [member function] cls.add_method('GetIndexStop', 'uint8_t', [], is_const=True, visibility='protected') ## packetbb.h (module 'network'): bool ns3::PbbTlv::HasIndexStart() const [member function] cls.add_method('HasIndexStart', 'bool', [], is_const=True, visibility='protected') ## packetbb.h (module 'network'): bool ns3::PbbTlv::HasIndexStop() const [member function] cls.add_method('HasIndexStop', 'bool', [], is_const=True, visibility='protected') ## packetbb.h (module 'network'): bool ns3::PbbTlv::IsMultivalue() const [member function] cls.add_method('IsMultivalue', 'bool', [], is_const=True, visibility='protected') ## packetbb.h (module 'network'): void ns3::PbbTlv::SetIndexStart(uint8_t index) [member function] cls.add_method('SetIndexStart', 'void', [param('uint8_t', 'index')], visibility='protected') ## packetbb.h (module 'network'): void ns3::PbbTlv::SetIndexStop(uint8_t index) [member function] cls.add_method('SetIndexStop', 'void', [param('uint8_t', 'index')], visibility='protected') ## packetbb.h (module 'network'): void ns3::PbbTlv::SetMultivalue(bool isMultivalue) [member function] cls.add_method('SetMultivalue', 'void', [param('bool', 'isMultivalue')], visibility='protected') return def register_Ns3RandomVariableChecker_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::RandomVariableChecker::RandomVariableChecker() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::RandomVariableChecker::RandomVariableChecker(ns3::RandomVariableChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::RandomVariableChecker const &', 'arg0')]) return def register_Ns3RandomVariableValue_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::RandomVariableValue::RandomVariableValue() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::RandomVariableValue::RandomVariableValue(ns3::RandomVariableValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::RandomVariableValue const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::RandomVariableValue::RandomVariableValue(ns3::RandomVariable const & value) [constructor] cls.add_constructor([param('ns3::RandomVariable const &', 'value')]) ## random-variable.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::RandomVariableValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## random-variable.h (module 'core'): bool ns3::RandomVariableValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## random-variable.h (module 'core'): ns3::RandomVariable ns3::RandomVariableValue::Get() const [member function] cls.add_method('Get', 'ns3::RandomVariable', [], is_const=True) ## random-variable.h (module 'core'): std::string ns3::RandomVariableValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## random-variable.h (module 'core'): void ns3::RandomVariableValue::Set(ns3::RandomVariable const & value) [member function] cls.add_method('Set', 'void', [param('ns3::RandomVariable const &', 'value')]) return def register_Ns3RateErrorModel_methods(root_module, cls): ## error-model.h (module 'network'): ns3::RateErrorModel::RateErrorModel(ns3::RateErrorModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::RateErrorModel const &', 'arg0')]) ## error-model.h (module 'network'): ns3::RateErrorModel::RateErrorModel() [constructor] cls.add_constructor([]) ## error-model.h (module 'network'): double ns3::RateErrorModel::GetRate() const [member function] cls.add_method('GetRate', 'double', [], is_const=True) ## error-model.h (module 'network'): static ns3::TypeId ns3::RateErrorModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## error-model.h (module 'network'): ns3::ErrorUnit ns3::RateErrorModel::GetUnit() const [member function] cls.add_method('GetUnit', 'ns3::ErrorUnit', [], is_const=True) ## error-model.h (module 'network'): void ns3::RateErrorModel::SetRandomVariable(ns3::RandomVariable const & ranvar) [member function] cls.add_method('SetRandomVariable', 'void', [param('ns3::RandomVariable const &', 'ranvar')]) ## error-model.h (module 'network'): void ns3::RateErrorModel::SetRate(double rate) [member function] cls.add_method('SetRate', 'void', [param('double', 'rate')]) ## error-model.h (module 'network'): void ns3::RateErrorModel::SetUnit(ns3::ErrorUnit error_unit) [member function] cls.add_method('SetUnit', 'void', [param('ns3::ErrorUnit', 'error_unit')]) ## error-model.h (module 'network'): bool ns3::RateErrorModel::DoCorrupt(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoCorrupt', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='private', is_virtual=True) ## error-model.h (module 'network'): bool ns3::RateErrorModel::DoCorruptBit(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoCorruptBit', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='private', is_virtual=True) ## error-model.h (module 'network'): bool ns3::RateErrorModel::DoCorruptByte(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoCorruptByte', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='private', is_virtual=True) ## error-model.h (module 'network'): bool ns3::RateErrorModel::DoCorruptPkt(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoCorruptPkt', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='private', is_virtual=True) ## error-model.h (module 'network'): void ns3::RateErrorModel::DoReset() [member function] cls.add_method('DoReset', 'void', [], visibility='private', is_virtual=True) return def register_Ns3ReceiveListErrorModel_methods(root_module, cls): ## error-model.h (module 'network'): ns3::ReceiveListErrorModel::ReceiveListErrorModel(ns3::ReceiveListErrorModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::ReceiveListErrorModel const &', 'arg0')]) ## error-model.h (module 'network'): ns3::ReceiveListErrorModel::ReceiveListErrorModel() [constructor] cls.add_constructor([]) ## error-model.h (module 'network'): std::list<unsigned int, std::allocator<unsigned int> > ns3::ReceiveListErrorModel::GetList() const [member function] cls.add_method('GetList', 'std::list< unsigned int >', [], is_const=True) ## error-model.h (module 'network'): static ns3::TypeId ns3::ReceiveListErrorModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## error-model.h (module 'network'): void ns3::ReceiveListErrorModel::SetList(std::list<unsigned int, std::allocator<unsigned int> > const & packetlist) [member function] cls.add_method('SetList', 'void', [param('std::list< unsigned int > const &', 'packetlist')]) ## error-model.h (module 'network'): bool ns3::ReceiveListErrorModel::DoCorrupt(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoCorrupt', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='private', is_virtual=True) ## error-model.h (module 'network'): void ns3::ReceiveListErrorModel::DoReset() [member function] cls.add_method('DoReset', 'void', [], visibility='private', is_virtual=True) return def register_Ns3SimpleChannel_methods(root_module, cls): ## simple-channel.h (module 'network'): ns3::SimpleChannel::SimpleChannel(ns3::SimpleChannel const & arg0) [copy constructor] cls.add_constructor([param('ns3::SimpleChannel const &', 'arg0')]) ## simple-channel.h (module 'network'): ns3::SimpleChannel::SimpleChannel() [constructor] cls.add_constructor([]) ## simple-channel.h (module 'network'): void ns3::SimpleChannel::Add(ns3::Ptr<ns3::SimpleNetDevice> device) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::SimpleNetDevice >', 'device')]) ## simple-channel.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::SimpleChannel::GetDevice(uint32_t i) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'i')], is_const=True, is_virtual=True) ## simple-channel.h (module 'network'): uint32_t ns3::SimpleChannel::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_const=True, is_virtual=True) ## simple-channel.h (module 'network'): static ns3::TypeId ns3::SimpleChannel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## simple-channel.h (module 'network'): void ns3::SimpleChannel::Send(ns3::Ptr<ns3::Packet> p, uint16_t protocol, ns3::Mac48Address to, ns3::Mac48Address from, ns3::Ptr<ns3::SimpleNetDevice> sender) [member function] cls.add_method('Send', 'void', [param('ns3::Ptr< ns3::Packet >', 'p'), param('uint16_t', 'protocol'), param('ns3::Mac48Address', 'to'), param('ns3::Mac48Address', 'from'), param('ns3::Ptr< ns3::SimpleNetDevice >', 'sender')]) return def register_Ns3SimpleNetDevice_methods(root_module, cls): ## simple-net-device.h (module 'network'): ns3::SimpleNetDevice::SimpleNetDevice(ns3::SimpleNetDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::SimpleNetDevice const &', 'arg0')]) ## simple-net-device.h (module 'network'): ns3::SimpleNetDevice::SimpleNetDevice() [constructor] cls.add_constructor([]) ## simple-net-device.h (module 'network'): void ns3::SimpleNetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_virtual=True) ## simple-net-device.h (module 'network'): ns3::Address ns3::SimpleNetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): ns3::Address ns3::SimpleNetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::SimpleNetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): uint32_t ns3::SimpleNetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): uint16_t ns3::SimpleNetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): ns3::Address ns3::SimpleNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): ns3::Address ns3::SimpleNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::SimpleNetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): static ns3::TypeId ns3::SimpleNetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## simple-net-device.h (module 'network'): bool ns3::SimpleNetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): bool ns3::SimpleNetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): bool ns3::SimpleNetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): bool ns3::SimpleNetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): bool ns3::SimpleNetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): bool ns3::SimpleNetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): void ns3::SimpleNetDevice::Receive(ns3::Ptr<ns3::Packet> packet, uint16_t protocol, ns3::Mac48Address to, ns3::Mac48Address from) [member function] cls.add_method('Receive', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Mac48Address', 'to'), param('ns3::Mac48Address', 'from')]) ## simple-net-device.h (module 'network'): bool ns3::SimpleNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True) ## simple-net-device.h (module 'network'): bool ns3::SimpleNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True) ## simple-net-device.h (module 'network'): void ns3::SimpleNetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_virtual=True) ## simple-net-device.h (module 'network'): void ns3::SimpleNetDevice::SetChannel(ns3::Ptr<ns3::SimpleChannel> channel) [member function] cls.add_method('SetChannel', 'void', [param('ns3::Ptr< ns3::SimpleChannel >', 'channel')]) ## simple-net-device.h (module 'network'): void ns3::SimpleNetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_virtual=True) ## simple-net-device.h (module 'network'): bool ns3::SimpleNetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_virtual=True) ## simple-net-device.h (module 'network'): void ns3::SimpleNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_virtual=True) ## simple-net-device.h (module 'network'): void ns3::SimpleNetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True) ## simple-net-device.h (module 'network'): void ns3::SimpleNetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True) ## simple-net-device.h (module 'network'): void ns3::SimpleNetDevice::SetReceiveErrorModel(ns3::Ptr<ns3::ErrorModel> em) [member function] cls.add_method('SetReceiveErrorModel', 'void', [param('ns3::Ptr< ns3::ErrorModel >', 'em')]) ## simple-net-device.h (module 'network'): bool ns3::SimpleNetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): void ns3::SimpleNetDevice::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3TimeChecker_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker(ns3::TimeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')]) return def register_Ns3TimeValue_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeValue const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor] cls.add_constructor([param('ns3::Time const &', 'value')]) ## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function] cls.add_method('Get', 'ns3::Time', [], is_const=True) ## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Time const &', 'value')]) return def register_Ns3TypeIdChecker_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')]) return def register_Ns3TypeIdValue_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor] cls.add_constructor([param('ns3::TypeId const &', 'value')]) ## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function] cls.add_method('Get', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function] cls.add_method('Set', 'void', [param('ns3::TypeId const &', 'value')]) return def register_Ns3AddressChecker_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')]) return def register_Ns3AddressValue_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressValue const &', 'arg0')]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor] cls.add_constructor([param('ns3::Address const &', 'value')]) ## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Address', [], is_const=True) ## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Address const &', 'value')]) return def register_Ns3PbbAddressTlv_methods(root_module, cls): ## packetbb.h (module 'network'): ns3::PbbAddressTlv::PbbAddressTlv() [constructor] cls.add_constructor([]) ## packetbb.h (module 'network'): ns3::PbbAddressTlv::PbbAddressTlv(ns3::PbbAddressTlv const & arg0) [copy constructor] cls.add_constructor([param('ns3::PbbAddressTlv const &', 'arg0')]) ## packetbb.h (module 'network'): uint8_t ns3::PbbAddressTlv::GetIndexStart() const [member function] cls.add_method('GetIndexStart', 'uint8_t', [], is_const=True) ## packetbb.h (module 'network'): uint8_t ns3::PbbAddressTlv::GetIndexStop() const [member function] cls.add_method('GetIndexStop', 'uint8_t', [], is_const=True) ## packetbb.h (module 'network'): bool ns3::PbbAddressTlv::HasIndexStart() const [member function] cls.add_method('HasIndexStart', 'bool', [], is_const=True) ## packetbb.h (module 'network'): bool ns3::PbbAddressTlv::HasIndexStop() const [member function] cls.add_method('HasIndexStop', 'bool', [], is_const=True) ## packetbb.h (module 'network'): bool ns3::PbbAddressTlv::IsMultivalue() const [member function] cls.add_method('IsMultivalue', 'bool', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbAddressTlv::SetIndexStart(uint8_t index) [member function] cls.add_method('SetIndexStart', 'void', [param('uint8_t', 'index')]) ## packetbb.h (module 'network'): void ns3::PbbAddressTlv::SetIndexStop(uint8_t index) [member function] cls.add_method('SetIndexStop', 'void', [param('uint8_t', 'index')]) ## packetbb.h (module 'network'): void ns3::PbbAddressTlv::SetMultivalue(bool isMultivalue) [member function] cls.add_method('SetMultivalue', 'void', [param('bool', 'isMultivalue')]) return def register_functions(root_module): module = root_module ## address.h (module 'network'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeAddressChecker() [free function] module.add_function('MakeAddressChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) ## data-rate.h (module 'network'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeDataRateChecker() [free function] module.add_function('MakeDataRateChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) ## ipv4-address.h (module 'network'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeIpv4AddressChecker() [free function] module.add_function('MakeIpv4AddressChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) ## ipv4-address.h (module 'network'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeIpv4MaskChecker() [free function] module.add_function('MakeIpv4MaskChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) ## ipv6-address.h (module 'network'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeIpv6AddressChecker() [free function] module.add_function('MakeIpv6AddressChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) ## ipv6-address.h (module 'network'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeIpv6PrefixChecker() [free function] module.add_function('MakeIpv6PrefixChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) ## mac48-address.h (module 'network'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeMac48AddressChecker() [free function] module.add_function('MakeMac48AddressChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) ## address-utils.h (module 'network'): extern void ns3::ReadFrom(ns3::Buffer::Iterator & i, ns3::Address & ad, uint32_t len) [free function] module.add_function('ReadFrom', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Address &', 'ad'), param('uint32_t', 'len')]) ## address-utils.h (module 'network'): extern void ns3::ReadFrom(ns3::Buffer::Iterator & i, ns3::Ipv4Address & ad) [free function] module.add_function('ReadFrom', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Ipv4Address &', 'ad')]) ## address-utils.h (module 'network'): extern void ns3::ReadFrom(ns3::Buffer::Iterator & i, ns3::Ipv6Address & ad) [free function] module.add_function('ReadFrom', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Ipv6Address &', 'ad')]) ## address-utils.h (module 'network'): extern void ns3::ReadFrom(ns3::Buffer::Iterator & i, ns3::Mac48Address & ad) [free function] module.add_function('ReadFrom', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Mac48Address &', 'ad')]) ## address-utils.h (module 'network'): extern void ns3::WriteTo(ns3::Buffer::Iterator & i, ns3::Address const & ad) [free function] module.add_function('WriteTo', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Address const &', 'ad')]) ## address-utils.h (module 'network'): extern void ns3::WriteTo(ns3::Buffer::Iterator & i, ns3::Ipv4Address ad) [free function] module.add_function('WriteTo', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Ipv4Address', 'ad')]) ## address-utils.h (module 'network'): extern void ns3::WriteTo(ns3::Buffer::Iterator & i, ns3::Ipv6Address ad) [free function] module.add_function('WriteTo', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Ipv6Address', 'ad')]) ## address-utils.h (module 'network'): extern void ns3::WriteTo(ns3::Buffer::Iterator & i, ns3::Mac48Address ad) [free function] module.add_function('WriteTo', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Mac48Address', 'ad')]) register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) register_functions_ns3_addressUtils(module.get_submodule('addressUtils'), root_module) return def register_functions_ns3_FatalImpl(module, root_module): return def register_functions_ns3_addressUtils(module, root_module): ## address-utils.h (module 'network'): extern bool ns3::addressUtils::IsMulticast(ns3::Address const & ad) [free function] module.add_function('IsMulticast', 'bool', [param('ns3::Address const &', 'ad')]) return def main(): out = FileCodeSink(sys.stdout) root_module = module_init() register_types(root_module) register_methods(root_module) register_functions(root_module) root_module.generate(out) if __name__ == '__main__': main()
gpl-2.0
-2,329,994,842,073,469,000
62.844982
426
0.604485
false
maxive/erp
addons/l10n_eu_service/__manifest__.py
26
2279
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. { 'name': 'EU Mini One Stop Shop (MOSS)', 'category': 'Localization', 'description': """ EU Mini One Stop Shop (MOSS) VAT for telecommunications, broadcasting and electronic services ============================================================================================= As of January 1rst, 2015, telecommunications, broadcasting and electronic services sold within the European Union have to be always taxed in the country where the customer belongs. In order to simplify the application of this EU directive, the Mini One Stop Shop (MOSS) registration scheme allows businesses to make a unique tax declaration. This module makes it possible by helping with the creation of the required EU fiscal positions and taxes in order to automatically apply and record the required taxes. This module installs a wizard to help setup fiscal positions and taxes for selling electronic services inside EU. The wizard lets you select: - the EU countries to which you are selling these services - your national VAT tax for services, to be mapped to the target country's tax - optionally: a template fiscal position, in order to copy the account mapping. Should be your existing B2C Intra-EU fiscal position. (defaults to no account mapping) - optionally: an account to use for collecting the tax amounts (defaults to the account used by your national VAT tax for services) It creates the corresponding fiscal positions and taxes, automatically applicable for EU sales with a customer in the selected countries. The wizard can be run again for adding more countries. The wizard creates a separate Chart of Taxes for collecting the VAT amounts of the MOSS declaration, so extracting the MOSS data should be easy. Look for a Chart of Taxes named "EU MOSS VAT Chart" in the Taxes Report menu (Generic Accounting Report). References ++++++++++ - Directive 2008/8/EC - Council Implementing Regulation (EU) No 1042/2013 """, 'depends': ['account'], 'data': [ 'security/ir.model.access.csv', 'wizard/wizard.xml', 'data/l10n_eu_service.service_tax_rate.csv', 'views/res_config_settings_views.xml' ], }
agpl-3.0
2,004,400,643,340,232,200
35.758065
93
0.716981
false
Hawaii-Smart-Energy-Project/Smart-Grid-PV-Inverter
test/test_insert_single_meter_data_file.py
1
5700
#!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = 'Daniel Zhang (張道博)' __copyright__ = 'Copyright (c) 2014, University of Hawaii Smart Energy Project' __license__ = 'https://raw.github.com/Hawaii-Smart-Energy-Project/Smart-Grid' \ '-PV-Inverter/master/BSD-LICENSE.txt' import unittest from insertSingleMeterDataFile import SingleFileLoader from si_configer import SIConfiger from sek.logger import SEKLogger from sek.db_util import SEKDBUtil from sek.db_connector import SEKDBConnector from si_data_util import SIDataUtil NUM_DB_COLS = 76 class SingleFileLoaderTester(unittest.TestCase): def setUp(self): self.logger = SEKLogger(__name__,'DEBUG') self.configer = SIConfiger() self.conn = SEKDBConnector( dbName = self.configer.configOptionValue('Database', 'db_name'), dbHost = self.configer.configOptionValue('Database', 'db_host'), dbPort = self.configer.configOptionValue('Database', 'db_port'), dbUsername = self.configer.configOptionValue('Database', 'db_username'), dbPassword = self.configer.configOptionValue('Database', 'db_password')).connectDB() self.cursor = self.conn.cursor() self.dbUtil = SEKDBUtil() self.dataUtil = SIDataUtil() self.inserter = SingleFileLoader('data/test-meter/log.csv') self.data = '"2014-07-12 16:22:30",0,,,1187488464896.00,' \ '2322185846784.00,1134697381888.00,35184644096.00,' \ '290857353216.00,10133100822528.00,367.13,' \ '-17660932096.00,1078.01,17660934144.00,-7.86,1.80,8.06,' \ '-0.97,244.01,122.00,32.93,60.01,-7.09,1.42,7.24,8.06,' \ '3.34,8.35,-40.18,-5.68,40.52,516.72,403.12,0,' \ '8797179904.00,47518.67,0,86.03,50.23,4198.40,' \ '281475022848.00,2251868602368.00,0,6820.01,' \ '8796095488.00,0,178.83,188.30,0,620.07,505.19,' \ '288230389841920.02,12668.18,68729384.00,0,-3.68,-4.18,,' \ '1.00,0.79,,3.81,4.25,,-0.97,-0.98,,244.01,,,121.54,' \ '122.46,,31.28,34.59,' self.testMeterName = 'test-meter' def test_columns(self): self.assertEquals(len(self.dataUtil.dbColumns), NUM_DB_COLS) def test_insert_data(self): self.logger.log('testing data insert') self.assertTrue(self.inserter.insertData(self.data)) self.conn.commit() def test_sql_formatted_values(self): self.logger.log( 'data: {}'.format(self.dataUtil.sqlFormattedValues(self.data))) def test_meter_id(self): self.logger.log('testing meter id') meter_id = self.inserter.meterID(self.testMeterName) self.logger.log('meter id {}'.format(meter_id)) self.assertTrue(isinstance(meter_id, ( int, long ))) self.logger.log('getting meter id') sql = 'SELECT meter_id FROM "Meters" WHERE meter_name = \'{}\''.format( self.testMeterName) success = self.dbUtil.executeSQL(self.cursor, sql, exitOnFail = True) if success: result = self.cursor.fetchall() self.assertEquals(1, len(result)) else: self.logger.log('failed to retrieve meter id', 'error') def test_meter_name(self): """ Test getting the meter name. :return: """ self.assertEquals(self.inserter.meterName(), self.testMeterName) def test_insert_data_from_file(self): self.inserter.insertDataFromFile() sql = 'SELECT * FROM "MeterData" WHERE meter_id = {}'.format( self.inserter.meterID(self.testMeterName)) success = self.dbUtil.executeSQL(self.cursor, sql, exitOnFail = True) if success: result = self.cursor.fetchall() self.assertEquals(len(result), 10) self.assertTrue(success) def tearDown(self): self.logger.log('teardown', 'debug') sql = 'SELECT meter_id FROM "Meters" WHERE meter_name = \'{}\''.format( self.testMeterName) success = self.dbUtil.executeSQL(self.cursor, sql, exitOnFail = True) if success: result = self.cursor.fetchall() if len(result) == 1: sql = 'DELETE FROM "Meters" WHERE meter_id = {}'.format( result[0][0]) success = self.dbUtil.executeSQL(self.cursor, sql, exitOnFail = True) if success: self.conn.commit() sql = 'SELECT meter_id FROM "Meters" WHERE meter_name = \'{' \ '}\''.format(self.testMeterName) success = self.dbUtil.executeSQL(self.cursor, sql, exitOnFail = True) result = self.cursor.fetchall() self.assertEquals(0, len(result)) if __name__ == '__main__': RUN_SELECTED_TESTS = True if RUN_SELECTED_TESTS: tests = ['test_insert_data', 'test_meter_id', 'test_meter_name', 'test_insert_data_from_file'] # For testing: selected_tests = [] mySuite = unittest.TestSuite() if len(selected_tests) > 0: for t in selected_tests: mySuite.addTest(SingleFileLoaderTester(t)) else: for t in tests: mySuite.addTest(SingleFileLoaderTester(t)) unittest.TextTestRunner().run(mySuite) else: unittest.main()
bsd-3-clause
-93,411,469,757,494,100
38.818182
102
0.567615
false
imruahmed/microblog
flask/lib/python2.7/site-packages/pip/exceptions.py
280
1257
"""Exceptions used throughout package""" from __future__ import absolute_import class PipError(Exception): """Base pip exception""" class InstallationError(PipError): """General exception during installation""" class UninstallationError(PipError): """General exception during uninstallation""" class DistributionNotFound(InstallationError): """Raised when a distribution cannot be found to satisfy a requirement""" class RequirementsFileParseError(InstallationError): """Raised when a general error occurs parsing a requirements file line.""" class BestVersionAlreadyInstalled(PipError): """Raised when the most up-to-date version of a package is already installed.""" class BadCommand(PipError): """Raised when virtualenv or a command is not found""" class CommandError(PipError): """Raised when there is an error in command-line arguments""" class PreviousBuildDirError(PipError): """Raised when there's a previous conflicting build directory""" class HashMismatch(InstallationError): """Distribution file hash values don't match.""" class InvalidWheelFilename(InstallationError): """Invalid wheel filename.""" class UnsupportedWheel(InstallationError): """Unsupported wheel."""
bsd-3-clause
-8,862,171,454,575,108,000
23.647059
78
0.746221
false
abdulbaqi/quranf
venv/lib/python2.7/site-packages/werkzeug/exceptions.py
148
18577
# -*- coding: utf-8 -*- """ werkzeug.exceptions ~~~~~~~~~~~~~~~~~~~ This module implements a number of Python exceptions you can raise from within your views to trigger a standard non-200 response. Usage Example ------------- :: from werkzeug.wrappers import BaseRequest from werkzeug.wsgi import responder from werkzeug.exceptions import HTTPException, NotFound def view(request): raise NotFound() @responder def application(environ, start_response): request = BaseRequest(environ) try: return view(request) except HTTPException as e: return e As you can see from this example those exceptions are callable WSGI applications. Because of Python 2.4 compatibility those do not extend from the response objects but only from the python exception class. As a matter of fact they are not Werkzeug response objects. However you can get a response object by calling ``get_response()`` on a HTTP exception. Keep in mind that you have to pass an environment to ``get_response()`` because some errors fetch additional information from the WSGI environment. If you want to hook in a different exception page to say, a 404 status code, you can add a second except for a specific subclass of an error:: @responder def application(environ, start_response): request = BaseRequest(environ) try: return view(request) except NotFound, e: return not_found(request) except HTTPException, e: return e :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import sys # Because of bootstrapping reasons we need to manually patch ourselves # onto our parent module. import werkzeug werkzeug.exceptions = sys.modules[__name__] from werkzeug._internal import _get_environ from werkzeug._compat import iteritems, integer_types, text_type, \ implements_to_string from werkzeug.wrappers import Response @implements_to_string class HTTPException(Exception): """ Baseclass for all HTTP exceptions. This exception can be called as WSGI application to render a default error page or you can catch the subclasses of it independently and render nicer error messages. """ code = None description = None def __init__(self, description=None, response=None): Exception.__init__(self) if description is not None: self.description = description self.response = response @classmethod def wrap(cls, exception, name=None): """This method returns a new subclass of the exception provided that also is a subclass of `BadRequest`. """ class newcls(cls, exception): def __init__(self, arg=None, *args, **kwargs): cls.__init__(self, *args, **kwargs) exception.__init__(self, arg) newcls.__module__ = sys._getframe(1).f_globals.get('__name__') newcls.__name__ = name or cls.__name__ + exception.__name__ return newcls @property def name(self): """The status name.""" return HTTP_STATUS_CODES.get(self.code, 'Unknown Error') def get_description(self, environ=None): """Get the description.""" return u'<p>%s</p>' % escape(self.description) def get_body(self, environ=None): """Get the HTML body.""" return text_type(( u'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n' u'<title>%(code)s %(name)s</title>\n' u'<h1>%(name)s</h1>\n' u'%(description)s\n' ) % { 'code': self.code, 'name': escape(self.name), 'description': self.get_description(environ) }) def get_headers(self, environ=None): """Get a list of headers.""" return [('Content-Type', 'text/html')] def get_response(self, environ=None): """Get a response object. If one was passed to the exception it's returned directly. :param environ: the optional environ for the request. This can be used to modify the response depending on how the request looked like. :return: a :class:`Response` object or a subclass thereof. """ if self.response is not None: return self.response if environ is not None: environ = _get_environ(environ) headers = self.get_headers(environ) return Response(self.get_body(environ), self.code, headers) def __call__(self, environ, start_response): """Call the exception as WSGI application. :param environ: the WSGI environment. :param start_response: the response callable provided by the WSGI server. """ response = self.get_response(environ) return response(environ, start_response) def __str__(self): return '%d: %s' % (self.code, self.name) def __repr__(self): return '<%s \'%s\'>' % (self.__class__.__name__, self) class BadRequest(HTTPException): """*400* `Bad Request` Raise if the browser sends something to the application the application or server cannot handle. """ code = 400 description = ( 'The browser (or proxy) sent a request that this server could ' 'not understand.' ) class ClientDisconnected(BadRequest): """Internal exception that is raised if Werkzeug detects a disconnected client. Since the client is already gone at that point attempting to send the error message to the client might not work and might ultimately result in another exception in the server. Mainly this is here so that it is silenced by default as far as Werkzeug is concerned. Since disconnections cannot be reliably detected and are unspecified by WSGI to a large extent this might or might not be raised if a client is gone. .. versionadded:: 0.8 """ class SecurityError(BadRequest): """Raised if something triggers a security error. This is otherwise exactly like a bad request error. .. versionadded:: 0.9 """ class Unauthorized(HTTPException): """*401* `Unauthorized` Raise if the user is not authorized. Also used if you want to use HTTP basic auth. """ code = 401 description = ( 'The server could not verify that you are authorized to access ' 'the URL requested. You either supplied the wrong credentials (e.g. ' 'a bad password), or your browser doesn\'t understand how to supply ' 'the credentials required.' ) class Forbidden(HTTPException): """*403* `Forbidden` Raise if the user doesn't have the permission for the requested resource but was authenticated. """ code = 403 description = ( 'You don\'t have the permission to access the requested resource. ' 'It is either read-protected or not readable by the server.' ) class NotFound(HTTPException): """*404* `Not Found` Raise if a resource does not exist and never existed. """ code = 404 description = ( 'The requested URL was not found on the server. ' 'If you entered the URL manually please check your spelling and ' 'try again.' ) class MethodNotAllowed(HTTPException): """*405* `Method Not Allowed` Raise if the server used a method the resource does not handle. For example `POST` if the resource is view only. Especially useful for REST. The first argument for this exception should be a list of allowed methods. Strictly speaking the response would be invalid if you don't provide valid methods in the header which you can do with that list. """ code = 405 description = 'The method is not allowed for the requested URL.' def __init__(self, valid_methods=None, description=None): """Takes an optional list of valid http methods starting with werkzeug 0.3 the list will be mandatory.""" HTTPException.__init__(self, description) self.valid_methods = valid_methods def get_headers(self, environ): headers = HTTPException.get_headers(self, environ) if self.valid_methods: headers.append(('Allow', ', '.join(self.valid_methods))) return headers class NotAcceptable(HTTPException): """*406* `Not Acceptable` Raise if the server can't return any content conforming to the `Accept` headers of the client. """ code = 406 description = ( 'The resource identified by the request is only capable of ' 'generating response entities which have content characteristics ' 'not acceptable according to the accept headers sent in the ' 'request.' ) class RequestTimeout(HTTPException): """*408* `Request Timeout` Raise to signalize a timeout. """ code = 408 description = ( 'The server closed the network connection because the browser ' 'didn\'t finish the request within the specified time.' ) class Conflict(HTTPException): """*409* `Conflict` Raise to signal that a request cannot be completed because it conflicts with the current state on the server. .. versionadded:: 0.7 """ code = 409 description = ( 'A conflict happened while processing the request. The resource ' 'might have been modified while the request was being processed.' ) class Gone(HTTPException): """*410* `Gone` Raise if a resource existed previously and went away without new location. """ code = 410 description = ( 'The requested URL is no longer available on this server and there ' 'is no forwarding address. If you followed a link from a foreign ' 'page, please contact the author of this page.' ) class LengthRequired(HTTPException): """*411* `Length Required` Raise if the browser submitted data but no ``Content-Length`` header which is required for the kind of processing the server does. """ code = 411 description = ( 'A request with this method requires a valid <code>Content-' 'Length</code> header.' ) class PreconditionFailed(HTTPException): """*412* `Precondition Failed` Status code used in combination with ``If-Match``, ``If-None-Match``, or ``If-Unmodified-Since``. """ code = 412 description = ( 'The precondition on the request for the URL failed positive ' 'evaluation.' ) class RequestEntityTooLarge(HTTPException): """*413* `Request Entity Too Large` The status code one should return if the data submitted exceeded a given limit. """ code = 413 description = ( 'The data value transmitted exceeds the capacity limit.' ) class RequestURITooLarge(HTTPException): """*414* `Request URI Too Large` Like *413* but for too long URLs. """ code = 414 description = ( 'The length of the requested URL exceeds the capacity limit ' 'for this server. The request cannot be processed.' ) class UnsupportedMediaType(HTTPException): """*415* `Unsupported Media Type` The status code returned if the server is unable to handle the media type the client transmitted. """ code = 415 description = ( 'The server does not support the media type transmitted in ' 'the request.' ) class RequestedRangeNotSatisfiable(HTTPException): """*416* `Requested Range Not Satisfiable` The client asked for a part of the file that lies beyond the end of the file. .. versionadded:: 0.7 """ code = 416 description = ( 'The server cannot provide the requested range.' ) class ExpectationFailed(HTTPException): """*417* `Expectation Failed` The server cannot meet the requirements of the Expect request-header. .. versionadded:: 0.7 """ code = 417 description = ( 'The server could not meet the requirements of the Expect header' ) class ImATeapot(HTTPException): """*418* `I'm a teapot` The server should return this if it is a teapot and someone attempted to brew coffee with it. .. versionadded:: 0.7 """ code = 418 description = ( 'This server is a teapot, not a coffee machine' ) class UnprocessableEntity(HTTPException): """*422* `Unprocessable Entity` Used if the request is well formed, but the instructions are otherwise incorrect. """ code = 422 description = ( 'The request was well-formed but was unable to be followed ' 'due to semantic errors.' ) class PreconditionRequired(HTTPException): """*428* `Precondition Required` The server requires this request to be conditional, typically to prevent the lost update problem, which is a race condition between two or more clients attempting to update a resource through PUT or DELETE. By requiring each client to include a conditional header ("If-Match" or "If-Unmodified- Since") with the proper value retained from a recent GET request, the server ensures that each client has at least seen the previous revision of the resource. """ code = 428 description = ( 'This request is required to be conditional; try using "If-Match" ' 'or "If-Unmodified-Since".' ) class TooManyRequests(HTTPException): """*429* `Too Many Requests` The server is limiting the rate at which this user receives responses, and this request exceeds that rate. (The server may use any convenient method to identify users and their request rates). The server may include a "Retry-After" header to indicate how long the user should wait before retrying. """ code = 429 description = ( 'This user has exceeded an allotted request count. Try again later.' ) class RequestHeaderFieldsTooLarge(HTTPException): """*431* `Request Header Fields Too Large` The server refuses to process the request because the header fields are too large. One or more individual fields may be too large, or the set of all headers is too large. """ code = 431 description = ( 'One or more header fields exceeds the maximum size.' ) class InternalServerError(HTTPException): """*500* `Internal Server Error` Raise if an internal server error occurred. This is a good fallback if an unknown error occurred in the dispatcher. """ code = 500 description = ( 'The server encountered an internal error and was unable to ' 'complete your request. Either the server is overloaded or there ' 'is an error in the application.' ) class NotImplemented(HTTPException): """*501* `Not Implemented` Raise if the application does not support the action requested by the browser. """ code = 501 description = ( 'The server does not support the action requested by the ' 'browser.' ) class BadGateway(HTTPException): """*502* `Bad Gateway` If you do proxying in your application you should return this status code if you received an invalid response from the upstream server it accessed in attempting to fulfill the request. """ code = 502 description = ( 'The proxy server received an invalid response from an upstream ' 'server.' ) class ServiceUnavailable(HTTPException): """*503* `Service Unavailable` Status code you should return if a service is temporarily unavailable. """ code = 503 description = ( 'The server is temporarily unable to service your request due to ' 'maintenance downtime or capacity problems. Please try again ' 'later.' ) class GatewayTimeout(HTTPException): """*504* `Gateway Timeout` Status code you should return if a connection to an upstream server times out. """ code = 504 description = ( 'The connection to an upstream server timed out.' ) class HTTPVersionNotSupported(HTTPException): """*505* `HTTP Version Not Supported` The server does not support the HTTP protocol version used in the request. """ code = 505 description = ( 'The server does not support the HTTP protocol version used in the ' 'request.' ) default_exceptions = {} __all__ = ['HTTPException'] def _find_exceptions(): for name, obj in iteritems(globals()): try: is_http_exception = issubclass(obj, HTTPException) except TypeError: is_http_exception = False if not is_http_exception or obj.code is None: continue __all__.append(obj.__name__) old_obj = default_exceptions.get(obj.code, None) if old_obj is not None and issubclass(obj, old_obj): continue default_exceptions[obj.code] = obj _find_exceptions() del _find_exceptions class Aborter(object): """ When passed a dict of code -> exception items it can be used as callable that raises exceptions. If the first argument to the callable is an integer it will be looked up in the mapping, if it's a WSGI application it will be raised in a proxy exception. The rest of the arguments are forwarded to the exception constructor. """ def __init__(self, mapping=None, extra=None): if mapping is None: mapping = default_exceptions self.mapping = dict(mapping) if extra is not None: self.mapping.update(extra) def __call__(self, code, *args, **kwargs): if not args and not kwargs and not isinstance(code, integer_types): raise HTTPException(response=code) if code not in self.mapping: raise LookupError('no exception for %r' % code) raise self.mapping[code](*args, **kwargs) abort = Aborter() #: an exception that is used internally to signal both a key error and a #: bad request. Used by a lot of the datastructures. BadRequestKeyError = BadRequest.wrap(KeyError) # imported here because of circular dependencies of werkzeug.utils from werkzeug.utils import escape from werkzeug.http import HTTP_STATUS_CODES
mit
-2,466,692,818,210,942,000
29.10859
79
0.646391
false
jmouriz/sanaviron
doc/api/source/conf.py
3
9479
# -*- coding: utf-8 -*- # # Sanaviron documentation build configuration file, created by # sphinx-quickstart on Mon Sep 17 17:01:35 2012. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../..')) print os.path.abspath('../../../sanaviron') # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['.templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Sanaviron' copyright = u'2012, Denis Ivlev, Juan Manuel Mouriz' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.0.1' # The full version, including alpha/beta/rc tags. release = '0.0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['.static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Sanavirondoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'Sanaviron.tex', u'Sanaviron Documentation', u'Denis Ivlev, Juan Manuel Mouriz', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'sanaviron', u'Sanaviron Documentation', [u'Denis Ivlev, Juan Manuel Mouriz'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Sanaviron', u'Sanaviron Documentation', u'Denis Ivlev, Juan Manuel Mouriz', 'Sanaviron', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'Sanaviron' epub_author = u'Denis Ivlev, Juan Manuel Mouriz' epub_publisher = u'Denis Ivlev, Juan Manuel Mouriz' epub_copyright = u'2012, Denis Ivlev, Juan Manuel Mouriz' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None}
apache-2.0
-1,942,385,923,860,571,600
31.686207
193
0.705349
false
sambitgaan/nupic
external/linux32/lib/python2.6/site-packages/pytz/__init__.py
69
30648
''' datetime.tzinfo timezone definitions generated from the Olson timezone database: ftp://elsie.nci.nih.gov/pub/tz*.tar.gz See the datetime section of the Python Library Reference for information on how to use these modules. ''' # The Olson database has historically been updated about 4 times a year OLSON_VERSION = '2008c' VERSION = OLSON_VERSION #VERSION = OLSON_VERSION + '.2' __version__ = OLSON_VERSION OLSEN_VERSION = OLSON_VERSION # Old releases had this misspelling __all__ = [ 'timezone', 'utc', 'country_timezones', 'AmbiguousTimeError', 'UnknownTimeZoneError', 'all_timezones', 'all_timezones_set', 'common_timezones', 'common_timezones_set', ] import sys, datetime, os.path, gettext try: from pkg_resources import resource_stream except ImportError: resource_stream = None from tzinfo import AmbiguousTimeError, unpickler from tzfile import build_tzinfo # Use 2.3 sets module implementation if set builtin is not available try: set except NameError: from sets import Set as set def open_resource(name): """Open a resource from the zoneinfo subdir for reading. Uses the pkg_resources module if available. """ if resource_stream is not None: return resource_stream(__name__, 'zoneinfo/' + name) else: name_parts = name.lstrip('/').split('/') for part in name_parts: if part == os.path.pardir or os.path.sep in part: raise ValueError('Bad path segment: %r' % part) filename = os.path.join(os.path.dirname(__file__), 'zoneinfo', *name_parts) return open(filename, 'rb') # Enable this when we get some translations? # We want an i18n API that is useful to programs using Python's gettext # module, as well as the Zope3 i18n package. Perhaps we should just provide # the POT file and translations, and leave it up to callers to make use # of them. # # t = gettext.translation( # 'pytz', os.path.join(os.path.dirname(__file__), 'locales'), # fallback=True # ) # def _(timezone_name): # """Translate a timezone name using the current locale, returning Unicode""" # return t.ugettext(timezone_name) class UnknownTimeZoneError(KeyError): '''Exception raised when pytz is passed an unknown timezone. >>> isinstance(UnknownTimeZoneError(), LookupError) True This class is actually a subclass of KeyError to provide backwards compatibility with code relying on the undocumented behavior of earlier pytz releases. >>> isinstance(UnknownTimeZoneError(), KeyError) True ''' pass _tzinfo_cache = {} def timezone(zone): r''' Return a datetime.tzinfo implementation for the given timezone >>> from datetime import datetime, timedelta >>> utc = timezone('UTC') >>> eastern = timezone('US/Eastern') >>> eastern.zone 'US/Eastern' >>> timezone(u'US/Eastern') is eastern True >>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) >>> loc_dt = utc_dt.astimezone(eastern) >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' >>> loc_dt.strftime(fmt) '2002-10-27 01:00:00 EST (-0500)' >>> (loc_dt - timedelta(minutes=10)).strftime(fmt) '2002-10-27 00:50:00 EST (-0500)' >>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt) '2002-10-27 01:50:00 EDT (-0400)' >>> (loc_dt + timedelta(minutes=10)).strftime(fmt) '2002-10-27 01:10:00 EST (-0500)' Raises UnknownTimeZoneError if passed an unknown zone. >>> timezone('Asia/Shangri-La') Traceback (most recent call last): ... UnknownTimeZoneError: 'Asia/Shangri-La' >>> timezone(u'\N{TRADE MARK SIGN}') Traceback (most recent call last): ... UnknownTimeZoneError: u'\u2122' ''' if zone.upper() == 'UTC': return utc try: zone = zone.encode('US-ASCII') except UnicodeEncodeError: # All valid timezones are ASCII raise UnknownTimeZoneError(zone) zone = _unmunge_zone(zone) if zone not in _tzinfo_cache: if zone in all_timezones_set: _tzinfo_cache[zone] = build_tzinfo(zone, open_resource(zone)) else: raise UnknownTimeZoneError(zone) return _tzinfo_cache[zone] def _unmunge_zone(zone): """Undo the time zone name munging done by older versions of pytz.""" return zone.replace('_plus_', '+').replace('_minus_', '-') ZERO = datetime.timedelta(0) HOUR = datetime.timedelta(hours=1) class UTC(datetime.tzinfo): """UTC Identical to the reference UTC implementation given in Python docs except that it unpickles using the single module global instance defined beneath this class declaration. Also contains extra attributes and methods to match other pytz tzinfo instances. """ zone = "UTC" def utcoffset(self, dt): return ZERO def tzname(self, dt): return "UTC" def dst(self, dt): return ZERO def __reduce__(self): return _UTC, () def localize(self, dt, is_dst=False): '''Convert naive time to local time''' if dt.tzinfo is not None: raise ValueError, 'Not naive datetime (tzinfo is already set)' return dt.replace(tzinfo=self) def normalize(self, dt, is_dst=False): '''Correct the timezone information on the given datetime''' if dt.tzinfo is None: raise ValueError, 'Naive time - no tzinfo set' return dt.replace(tzinfo=self) def __repr__(self): return "<UTC>" def __str__(self): return "UTC" UTC = utc = UTC() # UTC is a singleton def _UTC(): """Factory function for utc unpickling. Makes sure that unpickling a utc instance always returns the same module global. These examples belong in the UTC class above, but it is obscured; or in the README.txt, but we are not depending on Python 2.4 so integrating the README.txt examples with the unit tests is not trivial. >>> import datetime, pickle >>> dt = datetime.datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc) >>> naive = dt.replace(tzinfo=None) >>> p = pickle.dumps(dt, 1) >>> naive_p = pickle.dumps(naive, 1) >>> len(p), len(naive_p), len(p) - len(naive_p) (60, 43, 17) >>> new = pickle.loads(p) >>> new == dt True >>> new is dt False >>> new.tzinfo is dt.tzinfo True >>> utc is UTC is timezone('UTC') True >>> utc is timezone('GMT') False """ return utc _UTC.__safe_for_unpickling__ = True def _p(*args): """Factory function for unpickling pytz tzinfo instances. Just a wrapper around tzinfo.unpickler to save a few bytes in each pickle by shortening the path. """ return unpickler(*args) _p.__safe_for_unpickling__ = True _country_timezones_cache = {} def country_timezones(iso3166_code): """Return a list of timezones used in a particular country. iso3166_code is the two letter code used to identify the country. >>> country_timezones('ch') ['Europe/Zurich'] >>> country_timezones('CH') ['Europe/Zurich'] >>> country_timezones(u'ch') ['Europe/Zurich'] >>> country_timezones('XXX') Traceback (most recent call last): ... KeyError: 'XXX' """ iso3166_code = iso3166_code.upper() if not _country_timezones_cache: zone_tab = open_resource('zone.tab') for line in zone_tab: if line.startswith('#'): continue code, coordinates, zone = line.split(None, 4)[:3] try: _country_timezones_cache[code].append(zone) except KeyError: _country_timezones_cache[code] = [zone] return _country_timezones_cache[iso3166_code] # Time-zone info based solely on fixed offsets class _FixedOffset(datetime.tzinfo): zone = None # to match the standard pytz API def __init__(self, minutes): if abs(minutes) >= 1440: raise ValueError("absolute offset is too large", minutes) self._minutes = minutes self._offset = datetime.timedelta(minutes=minutes) def utcoffset(self, dt): return self._offset def __reduce__(self): return FixedOffset, (self._minutes, ) def dst(self, dt): return None def tzname(self, dt): return None def __repr__(self): return 'pytz.FixedOffset(%d)' % self._minutes def localize(self, dt, is_dst=False): '''Convert naive time to local time''' if dt.tzinfo is not None: raise ValueError, 'Not naive datetime (tzinfo is already set)' return dt.replace(tzinfo=self) def normalize(self, dt, is_dst=False): '''Correct the timezone information on the given datetime''' if dt.tzinfo is None: raise ValueError, 'Naive time - no tzinfo set' return dt.replace(tzinfo=self) def FixedOffset(offset, _tzinfos = {}): """return a fixed-offset timezone based off a number of minutes. >>> one = FixedOffset(-330) >>> one pytz.FixedOffset(-330) >>> one.utcoffset(datetime.datetime.now()) datetime.timedelta(-1, 66600) >>> two = FixedOffset(1380) >>> two pytz.FixedOffset(1380) >>> two.utcoffset(datetime.datetime.now()) datetime.timedelta(0, 82800) The datetime.timedelta must be between the range of -1 and 1 day, non-inclusive. >>> FixedOffset(1440) Traceback (most recent call last): ... ValueError: ('absolute offset is too large', 1440) >>> FixedOffset(-1440) Traceback (most recent call last): ... ValueError: ('absolute offset is too large', -1440) An offset of 0 is special-cased to return UTC. >>> FixedOffset(0) is UTC True There should always be only one instance of a FixedOffset per timedelta. This should be true for multiple creation calls. >>> FixedOffset(-330) is one True >>> FixedOffset(1380) is two True It should also be true for pickling. >>> import pickle >>> pickle.loads(pickle.dumps(one)) is one True >>> pickle.loads(pickle.dumps(two)) is two True """ if offset == 0: return UTC info = _tzinfos.get(offset) if info is None: # We haven't seen this one before. we need to save it. # Use setdefault to avoid a race condition and make sure we have # only one info = _tzinfos.setdefault(offset, _FixedOffset(offset)) return info FixedOffset.__safe_for_unpickling__ = True def _test(): import doctest, os, sys sys.path.insert(0, os.pardir) import pytz return doctest.testmod(pytz) if __name__ == '__main__': _test() common_timezones = \ ['Africa/Abidjan', 'Africa/Accra', 'Africa/Addis_Ababa', 'Africa/Algiers', 'Africa/Asmara', 'Africa/Asmera', 'Africa/Bamako', 'Africa/Bangui', 'Africa/Banjul', 'Africa/Bissau', 'Africa/Blantyre', 'Africa/Brazzaville', 'Africa/Bujumbura', 'Africa/Cairo', 'Africa/Casablanca', 'Africa/Ceuta', 'Africa/Conakry', 'Africa/Dakar', 'Africa/Dar_es_Salaam', 'Africa/Djibouti', 'Africa/Douala', 'Africa/El_Aaiun', 'Africa/Freetown', 'Africa/Gaborone', 'Africa/Harare', 'Africa/Johannesburg', 'Africa/Kampala', 'Africa/Khartoum', 'Africa/Kigali', 'Africa/Kinshasa', 'Africa/Lagos', 'Africa/Libreville', 'Africa/Lome', 'Africa/Luanda', 'Africa/Lubumbashi', 'Africa/Lusaka', 'Africa/Malabo', 'Africa/Maputo', 'Africa/Maseru', 'Africa/Mbabane', 'Africa/Mogadishu', 'Africa/Monrovia', 'Africa/Nairobi', 'Africa/Ndjamena', 'Africa/Niamey', 'Africa/Nouakchott', 'Africa/Ouagadougou', 'Africa/Porto-Novo', 'Africa/Sao_Tome', 'Africa/Timbuktu', 'Africa/Tripoli', 'Africa/Tunis', 'Africa/Windhoek', 'America/Adak', 'America/Anchorage', 'America/Anguilla', 'America/Antigua', 'America/Araguaina', 'America/Aruba', 'America/Asuncion', 'America/Atikokan', 'America/Atka', 'America/Bahia', 'America/Barbados', 'America/Belem', 'America/Belize', 'America/Blanc-Sablon', 'America/Boa_Vista', 'America/Bogota', 'America/Boise', 'America/Buenos_Aires', 'America/Cambridge_Bay', 'America/Campo_Grande', 'America/Cancun', 'America/Caracas', 'America/Catamarca', 'America/Cayenne', 'America/Cayman', 'America/Chicago', 'America/Chihuahua', 'America/Coral_Harbour', 'America/Cordoba', 'America/Costa_Rica', 'America/Cuiaba', 'America/Curacao', 'America/Danmarkshavn', 'America/Dawson', 'America/Dawson_Creek', 'America/Denver', 'America/Detroit', 'America/Dominica', 'America/Edmonton', 'America/Eirunepe', 'America/El_Salvador', 'America/Ensenada', 'America/Fort_Wayne', 'America/Fortaleza', 'America/Glace_Bay', 'America/Godthab', 'America/Goose_Bay', 'America/Grand_Turk', 'America/Grenada', 'America/Guadeloupe', 'America/Guatemala', 'America/Guayaquil', 'America/Guyana', 'America/Halifax', 'America/Havana', 'America/Hermosillo', 'America/Indianapolis', 'America/Inuvik', 'America/Iqaluit', 'America/Jamaica', 'America/Jujuy', 'America/Juneau', 'America/Knox_IN', 'America/La_Paz', 'America/Lima', 'America/Los_Angeles', 'America/Louisville', 'America/Maceio', 'America/Managua', 'America/Manaus', 'America/Marigot', 'America/Martinique', 'America/Mazatlan', 'America/Mendoza', 'America/Menominee', 'America/Merida', 'America/Mexico_City', 'America/Miquelon', 'America/Moncton', 'America/Monterrey', 'America/Montevideo', 'America/Montreal', 'America/Montserrat', 'America/Nassau', 'America/New_York', 'America/Nipigon', 'America/Nome', 'America/Noronha', 'America/Panama', 'America/Pangnirtung', 'America/Paramaribo', 'America/Phoenix', 'America/Port-au-Prince', 'America/Port_of_Spain', 'America/Porto_Acre', 'America/Porto_Velho', 'America/Puerto_Rico', 'America/Rainy_River', 'America/Rankin_Inlet', 'America/Recife', 'America/Regina', 'America/Resolute', 'America/Rio_Branco', 'America/Rosario', 'America/Santiago', 'America/Santo_Domingo', 'America/Sao_Paulo', 'America/Scoresbysund', 'America/Shiprock', 'America/St_Barthelemy', 'America/St_Johns', 'America/St_Kitts', 'America/St_Lucia', 'America/St_Thomas', 'America/St_Vincent', 'America/Swift_Current', 'America/Tegucigalpa', 'America/Thule', 'America/Thunder_Bay', 'America/Tijuana', 'America/Toronto', 'America/Tortola', 'America/Vancouver', 'America/Virgin', 'America/Whitehorse', 'America/Winnipeg', 'America/Yakutat', 'America/Yellowknife', 'Antarctica/Casey', 'Antarctica/Davis', 'Antarctica/DumontDUrville', 'Antarctica/Mawson', 'Antarctica/McMurdo', 'Antarctica/Palmer', 'Antarctica/Rothera', 'Antarctica/South_Pole', 'Antarctica/Syowa', 'Antarctica/Vostok', 'Arctic/Longyearbyen', 'Asia/Aden', 'Asia/Almaty', 'Asia/Amman', 'Asia/Anadyr', 'Asia/Aqtau', 'Asia/Aqtobe', 'Asia/Ashgabat', 'Asia/Ashkhabad', 'Asia/Baghdad', 'Asia/Bahrain', 'Asia/Baku', 'Asia/Bangkok', 'Asia/Beirut', 'Asia/Bishkek', 'Asia/Brunei', 'Asia/Calcutta', 'Asia/Choibalsan', 'Asia/Chongqing', 'Asia/Chungking', 'Asia/Colombo', 'Asia/Dacca', 'Asia/Damascus', 'Asia/Dhaka', 'Asia/Dili', 'Asia/Dubai', 'Asia/Dushanbe', 'Asia/Gaza', 'Asia/Harbin', 'Asia/Ho_Chi_Minh', 'Asia/Hong_Kong', 'Asia/Hovd', 'Asia/Irkutsk', 'Asia/Istanbul', 'Asia/Jakarta', 'Asia/Jayapura', 'Asia/Jerusalem', 'Asia/Kabul', 'Asia/Kamchatka', 'Asia/Karachi', 'Asia/Kashgar', 'Asia/Katmandu', 'Asia/Kolkata', 'Asia/Krasnoyarsk', 'Asia/Kuala_Lumpur', 'Asia/Kuching', 'Asia/Kuwait', 'Asia/Macao', 'Asia/Macau', 'Asia/Magadan', 'Asia/Makassar', 'Asia/Manila', 'Asia/Muscat', 'Asia/Nicosia', 'Asia/Novosibirsk', 'Asia/Omsk', 'Asia/Oral', 'Asia/Phnom_Penh', 'Asia/Pontianak', 'Asia/Pyongyang', 'Asia/Qatar', 'Asia/Qyzylorda', 'Asia/Rangoon', 'Asia/Riyadh', 'Asia/Saigon', 'Asia/Sakhalin', 'Asia/Samarkand', 'Asia/Seoul', 'Asia/Shanghai', 'Asia/Singapore', 'Asia/Taipei', 'Asia/Tashkent', 'Asia/Tbilisi', 'Asia/Tehran', 'Asia/Tel_Aviv', 'Asia/Thimbu', 'Asia/Thimphu', 'Asia/Tokyo', 'Asia/Ujung_Pandang', 'Asia/Ulaanbaatar', 'Asia/Ulan_Bator', 'Asia/Urumqi', 'Asia/Vientiane', 'Asia/Vladivostok', 'Asia/Yakutsk', 'Asia/Yekaterinburg', 'Asia/Yerevan', 'Atlantic/Azores', 'Atlantic/Bermuda', 'Atlantic/Canary', 'Atlantic/Cape_Verde', 'Atlantic/Faeroe', 'Atlantic/Faroe', 'Atlantic/Jan_Mayen', 'Atlantic/Madeira', 'Atlantic/Reykjavik', 'Atlantic/South_Georgia', 'Atlantic/St_Helena', 'Atlantic/Stanley', 'Australia/ACT', 'Australia/Adelaide', 'Australia/Brisbane', 'Australia/Broken_Hill', 'Australia/Canberra', 'Australia/Currie', 'Australia/Darwin', 'Australia/Eucla', 'Australia/Hobart', 'Australia/LHI', 'Australia/Lindeman', 'Australia/Lord_Howe', 'Australia/Melbourne', 'Australia/NSW', 'Australia/North', 'Australia/Perth', 'Australia/Queensland', 'Australia/South', 'Australia/Sydney', 'Australia/Tasmania', 'Australia/Victoria', 'Australia/West', 'Australia/Yancowinna', 'Brazil/Acre', 'Brazil/DeNoronha', 'Brazil/East', 'Brazil/West', 'Canada/Atlantic', 'Canada/Central', 'Canada/East-Saskatchewan', 'Canada/Eastern', 'Canada/Mountain', 'Canada/Newfoundland', 'Canada/Pacific', 'Canada/Saskatchewan', 'Canada/Yukon', 'Chile/Continental', 'Chile/EasterIsland', 'Europe/Amsterdam', 'Europe/Andorra', 'Europe/Athens', 'Europe/Belfast', 'Europe/Belgrade', 'Europe/Berlin', 'Europe/Bratislava', 'Europe/Brussels', 'Europe/Bucharest', 'Europe/Budapest', 'Europe/Chisinau', 'Europe/Copenhagen', 'Europe/Dublin', 'Europe/Gibraltar', 'Europe/Guernsey', 'Europe/Helsinki', 'Europe/Isle_of_Man', 'Europe/Istanbul', 'Europe/Jersey', 'Europe/Kaliningrad', 'Europe/Kiev', 'Europe/Lisbon', 'Europe/Ljubljana', 'Europe/London', 'Europe/Luxembourg', 'Europe/Madrid', 'Europe/Malta', 'Europe/Mariehamn', 'Europe/Minsk', 'Europe/Monaco', 'Europe/Moscow', 'Europe/Nicosia', 'Europe/Oslo', 'Europe/Paris', 'Europe/Podgorica', 'Europe/Prague', 'Europe/Riga', 'Europe/Rome', 'Europe/Samara', 'Europe/San_Marino', 'Europe/Sarajevo', 'Europe/Simferopol', 'Europe/Skopje', 'Europe/Sofia', 'Europe/Stockholm', 'Europe/Tallinn', 'Europe/Tirane', 'Europe/Tiraspol', 'Europe/Uzhgorod', 'Europe/Vaduz', 'Europe/Vatican', 'Europe/Vienna', 'Europe/Vilnius', 'Europe/Volgograd', 'Europe/Warsaw', 'Europe/Zagreb', 'Europe/Zaporozhye', 'Europe/Zurich', 'GMT', 'Indian/Antananarivo', 'Indian/Chagos', 'Indian/Christmas', 'Indian/Cocos', 'Indian/Comoro', 'Indian/Kerguelen', 'Indian/Mahe', 'Indian/Maldives', 'Indian/Mauritius', 'Indian/Mayotte', 'Indian/Reunion', 'Mexico/BajaNorte', 'Mexico/BajaSur', 'Mexico/General', 'Pacific/Apia', 'Pacific/Auckland', 'Pacific/Chatham', 'Pacific/Easter', 'Pacific/Efate', 'Pacific/Enderbury', 'Pacific/Fakaofo', 'Pacific/Fiji', 'Pacific/Funafuti', 'Pacific/Galapagos', 'Pacific/Gambier', 'Pacific/Guadalcanal', 'Pacific/Guam', 'Pacific/Honolulu', 'Pacific/Johnston', 'Pacific/Kiritimati', 'Pacific/Kosrae', 'Pacific/Kwajalein', 'Pacific/Majuro', 'Pacific/Marquesas', 'Pacific/Midway', 'Pacific/Nauru', 'Pacific/Niue', 'Pacific/Norfolk', 'Pacific/Noumea', 'Pacific/Pago_Pago', 'Pacific/Palau', 'Pacific/Pitcairn', 'Pacific/Ponape', 'Pacific/Port_Moresby', 'Pacific/Rarotonga', 'Pacific/Saipan', 'Pacific/Samoa', 'Pacific/Tahiti', 'Pacific/Tarawa', 'Pacific/Tongatapu', 'Pacific/Truk', 'Pacific/Wake', 'Pacific/Wallis', 'Pacific/Yap', 'US/Alaska', 'US/Aleutian', 'US/Arizona', 'US/Central', 'US/East-Indiana', 'US/Eastern', 'US/Hawaii', 'US/Indiana-Starke', 'US/Michigan', 'US/Mountain', 'US/Pacific', 'US/Pacific-New', 'US/Samoa', 'UTC'] common_timezones_set = set(common_timezones) all_timezones = \ ['Africa/Abidjan', 'Africa/Accra', 'Africa/Addis_Ababa', 'Africa/Algiers', 'Africa/Asmara', 'Africa/Asmera', 'Africa/Bamako', 'Africa/Bangui', 'Africa/Banjul', 'Africa/Bissau', 'Africa/Blantyre', 'Africa/Brazzaville', 'Africa/Bujumbura', 'Africa/Cairo', 'Africa/Casablanca', 'Africa/Ceuta', 'Africa/Conakry', 'Africa/Dakar', 'Africa/Dar_es_Salaam', 'Africa/Djibouti', 'Africa/Douala', 'Africa/El_Aaiun', 'Africa/Freetown', 'Africa/Gaborone', 'Africa/Harare', 'Africa/Johannesburg', 'Africa/Kampala', 'Africa/Khartoum', 'Africa/Kigali', 'Africa/Kinshasa', 'Africa/Lagos', 'Africa/Libreville', 'Africa/Lome', 'Africa/Luanda', 'Africa/Lubumbashi', 'Africa/Lusaka', 'Africa/Malabo', 'Africa/Maputo', 'Africa/Maseru', 'Africa/Mbabane', 'Africa/Mogadishu', 'Africa/Monrovia', 'Africa/Nairobi', 'Africa/Ndjamena', 'Africa/Niamey', 'Africa/Nouakchott', 'Africa/Ouagadougou', 'Africa/Porto-Novo', 'Africa/Sao_Tome', 'Africa/Timbuktu', 'Africa/Tripoli', 'Africa/Tunis', 'Africa/Windhoek', 'America/Adak', 'America/Anchorage', 'America/Anguilla', 'America/Antigua', 'America/Araguaina', 'America/Argentina/Buenos_Aires', 'America/Argentina/Catamarca', 'America/Argentina/ComodRivadavia', 'America/Argentina/Cordoba', 'America/Argentina/Jujuy', 'America/Argentina/La_Rioja', 'America/Argentina/Mendoza', 'America/Argentina/Rio_Gallegos', 'America/Argentina/San_Juan', 'America/Argentina/San_Luis', 'America/Argentina/Tucuman', 'America/Argentina/Ushuaia', 'America/Aruba', 'America/Asuncion', 'America/Atikokan', 'America/Atka', 'America/Bahia', 'America/Barbados', 'America/Belem', 'America/Belize', 'America/Blanc-Sablon', 'America/Boa_Vista', 'America/Bogota', 'America/Boise', 'America/Buenos_Aires', 'America/Cambridge_Bay', 'America/Campo_Grande', 'America/Cancun', 'America/Caracas', 'America/Catamarca', 'America/Cayenne', 'America/Cayman', 'America/Chicago', 'America/Chihuahua', 'America/Coral_Harbour', 'America/Cordoba', 'America/Costa_Rica', 'America/Cuiaba', 'America/Curacao', 'America/Danmarkshavn', 'America/Dawson', 'America/Dawson_Creek', 'America/Denver', 'America/Detroit', 'America/Dominica', 'America/Edmonton', 'America/Eirunepe', 'America/El_Salvador', 'America/Ensenada', 'America/Fort_Wayne', 'America/Fortaleza', 'America/Glace_Bay', 'America/Godthab', 'America/Goose_Bay', 'America/Grand_Turk', 'America/Grenada', 'America/Guadeloupe', 'America/Guatemala', 'America/Guayaquil', 'America/Guyana', 'America/Halifax', 'America/Havana', 'America/Hermosillo', 'America/Indiana/Indianapolis', 'America/Indiana/Knox', 'America/Indiana/Marengo', 'America/Indiana/Petersburg', 'America/Indiana/Tell_City', 'America/Indiana/Vevay', 'America/Indiana/Vincennes', 'America/Indiana/Winamac', 'America/Indianapolis', 'America/Inuvik', 'America/Iqaluit', 'America/Jamaica', 'America/Jujuy', 'America/Juneau', 'America/Kentucky/Louisville', 'America/Kentucky/Monticello', 'America/Knox_IN', 'America/La_Paz', 'America/Lima', 'America/Los_Angeles', 'America/Louisville', 'America/Maceio', 'America/Managua', 'America/Manaus', 'America/Marigot', 'America/Martinique', 'America/Mazatlan', 'America/Mendoza', 'America/Menominee', 'America/Merida', 'America/Mexico_City', 'America/Miquelon', 'America/Moncton', 'America/Monterrey', 'America/Montevideo', 'America/Montreal', 'America/Montserrat', 'America/Nassau', 'America/New_York', 'America/Nipigon', 'America/Nome', 'America/Noronha', 'America/North_Dakota/Center', 'America/North_Dakota/New_Salem', 'America/Panama', 'America/Pangnirtung', 'America/Paramaribo', 'America/Phoenix', 'America/Port-au-Prince', 'America/Port_of_Spain', 'America/Porto_Acre', 'America/Porto_Velho', 'America/Puerto_Rico', 'America/Rainy_River', 'America/Rankin_Inlet', 'America/Recife', 'America/Regina', 'America/Resolute', 'America/Rio_Branco', 'America/Rosario', 'America/Santiago', 'America/Santo_Domingo', 'America/Sao_Paulo', 'America/Scoresbysund', 'America/Shiprock', 'America/St_Barthelemy', 'America/St_Johns', 'America/St_Kitts', 'America/St_Lucia', 'America/St_Thomas', 'America/St_Vincent', 'America/Swift_Current', 'America/Tegucigalpa', 'America/Thule', 'America/Thunder_Bay', 'America/Tijuana', 'America/Toronto', 'America/Tortola', 'America/Vancouver', 'America/Virgin', 'America/Whitehorse', 'America/Winnipeg', 'America/Yakutat', 'America/Yellowknife', 'Antarctica/Casey', 'Antarctica/Davis', 'Antarctica/DumontDUrville', 'Antarctica/Mawson', 'Antarctica/McMurdo', 'Antarctica/Palmer', 'Antarctica/Rothera', 'Antarctica/South_Pole', 'Antarctica/Syowa', 'Antarctica/Vostok', 'Arctic/Longyearbyen', 'Asia/Aden', 'Asia/Almaty', 'Asia/Amman', 'Asia/Anadyr', 'Asia/Aqtau', 'Asia/Aqtobe', 'Asia/Ashgabat', 'Asia/Ashkhabad', 'Asia/Baghdad', 'Asia/Bahrain', 'Asia/Baku', 'Asia/Bangkok', 'Asia/Beirut', 'Asia/Bishkek', 'Asia/Brunei', 'Asia/Calcutta', 'Asia/Choibalsan', 'Asia/Chongqing', 'Asia/Chungking', 'Asia/Colombo', 'Asia/Dacca', 'Asia/Damascus', 'Asia/Dhaka', 'Asia/Dili', 'Asia/Dubai', 'Asia/Dushanbe', 'Asia/Gaza', 'Asia/Harbin', 'Asia/Ho_Chi_Minh', 'Asia/Hong_Kong', 'Asia/Hovd', 'Asia/Irkutsk', 'Asia/Istanbul', 'Asia/Jakarta', 'Asia/Jayapura', 'Asia/Jerusalem', 'Asia/Kabul', 'Asia/Kamchatka', 'Asia/Karachi', 'Asia/Kashgar', 'Asia/Katmandu', 'Asia/Kolkata', 'Asia/Krasnoyarsk', 'Asia/Kuala_Lumpur', 'Asia/Kuching', 'Asia/Kuwait', 'Asia/Macao', 'Asia/Macau', 'Asia/Magadan', 'Asia/Makassar', 'Asia/Manila', 'Asia/Muscat', 'Asia/Nicosia', 'Asia/Novosibirsk', 'Asia/Omsk', 'Asia/Oral', 'Asia/Phnom_Penh', 'Asia/Pontianak', 'Asia/Pyongyang', 'Asia/Qatar', 'Asia/Qyzylorda', 'Asia/Rangoon', 'Asia/Riyadh', 'Asia/Saigon', 'Asia/Sakhalin', 'Asia/Samarkand', 'Asia/Seoul', 'Asia/Shanghai', 'Asia/Singapore', 'Asia/Taipei', 'Asia/Tashkent', 'Asia/Tbilisi', 'Asia/Tehran', 'Asia/Tel_Aviv', 'Asia/Thimbu', 'Asia/Thimphu', 'Asia/Tokyo', 'Asia/Ujung_Pandang', 'Asia/Ulaanbaatar', 'Asia/Ulan_Bator', 'Asia/Urumqi', 'Asia/Vientiane', 'Asia/Vladivostok', 'Asia/Yakutsk', 'Asia/Yekaterinburg', 'Asia/Yerevan', 'Atlantic/Azores', 'Atlantic/Bermuda', 'Atlantic/Canary', 'Atlantic/Cape_Verde', 'Atlantic/Faeroe', 'Atlantic/Faroe', 'Atlantic/Jan_Mayen', 'Atlantic/Madeira', 'Atlantic/Reykjavik', 'Atlantic/South_Georgia', 'Atlantic/St_Helena', 'Atlantic/Stanley', 'Australia/ACT', 'Australia/Adelaide', 'Australia/Brisbane', 'Australia/Broken_Hill', 'Australia/Canberra', 'Australia/Currie', 'Australia/Darwin', 'Australia/Eucla', 'Australia/Hobart', 'Australia/LHI', 'Australia/Lindeman', 'Australia/Lord_Howe', 'Australia/Melbourne', 'Australia/NSW', 'Australia/North', 'Australia/Perth', 'Australia/Queensland', 'Australia/South', 'Australia/Sydney', 'Australia/Tasmania', 'Australia/Victoria', 'Australia/West', 'Australia/Yancowinna', 'Brazil/Acre', 'Brazil/DeNoronha', 'Brazil/East', 'Brazil/West', 'CET', 'CST6CDT', 'Canada/Atlantic', 'Canada/Central', 'Canada/East-Saskatchewan', 'Canada/Eastern', 'Canada/Mountain', 'Canada/Newfoundland', 'Canada/Pacific', 'Canada/Saskatchewan', 'Canada/Yukon', 'Chile/Continental', 'Chile/EasterIsland', 'Cuba', 'EET', 'EST', 'EST5EDT', 'Egypt', 'Eire', 'Etc/GMT', 'Etc/GMT+0', 'Etc/GMT+1', 'Etc/GMT+10', 'Etc/GMT+11', 'Etc/GMT+12', 'Etc/GMT+2', 'Etc/GMT+3', 'Etc/GMT+4', 'Etc/GMT+5', 'Etc/GMT+6', 'Etc/GMT+7', 'Etc/GMT+8', 'Etc/GMT+9', 'Etc/GMT-0', 'Etc/GMT-1', 'Etc/GMT-10', 'Etc/GMT-11', 'Etc/GMT-12', 'Etc/GMT-13', 'Etc/GMT-14', 'Etc/GMT-2', 'Etc/GMT-3', 'Etc/GMT-4', 'Etc/GMT-5', 'Etc/GMT-6', 'Etc/GMT-7', 'Etc/GMT-8', 'Etc/GMT-9', 'Etc/GMT0', 'Etc/Greenwich', 'Etc/UCT', 'Etc/UTC', 'Etc/Universal', 'Etc/Zulu', 'Europe/Amsterdam', 'Europe/Andorra', 'Europe/Athens', 'Europe/Belfast', 'Europe/Belgrade', 'Europe/Berlin', 'Europe/Bratislava', 'Europe/Brussels', 'Europe/Bucharest', 'Europe/Budapest', 'Europe/Chisinau', 'Europe/Copenhagen', 'Europe/Dublin', 'Europe/Gibraltar', 'Europe/Guernsey', 'Europe/Helsinki', 'Europe/Isle_of_Man', 'Europe/Istanbul', 'Europe/Jersey', 'Europe/Kaliningrad', 'Europe/Kiev', 'Europe/Lisbon', 'Europe/Ljubljana', 'Europe/London', 'Europe/Luxembourg', 'Europe/Madrid', 'Europe/Malta', 'Europe/Mariehamn', 'Europe/Minsk', 'Europe/Monaco', 'Europe/Moscow', 'Europe/Nicosia', 'Europe/Oslo', 'Europe/Paris', 'Europe/Podgorica', 'Europe/Prague', 'Europe/Riga', 'Europe/Rome', 'Europe/Samara', 'Europe/San_Marino', 'Europe/Sarajevo', 'Europe/Simferopol', 'Europe/Skopje', 'Europe/Sofia', 'Europe/Stockholm', 'Europe/Tallinn', 'Europe/Tirane', 'Europe/Tiraspol', 'Europe/Uzhgorod', 'Europe/Vaduz', 'Europe/Vatican', 'Europe/Vienna', 'Europe/Vilnius', 'Europe/Volgograd', 'Europe/Warsaw', 'Europe/Zagreb', 'Europe/Zaporozhye', 'Europe/Zurich', 'GB', 'GB-Eire', 'GMT', 'GMT+0', 'GMT-0', 'GMT0', 'Greenwich', 'HST', 'Hongkong', 'Iceland', 'Indian/Antananarivo', 'Indian/Chagos', 'Indian/Christmas', 'Indian/Cocos', 'Indian/Comoro', 'Indian/Kerguelen', 'Indian/Mahe', 'Indian/Maldives', 'Indian/Mauritius', 'Indian/Mayotte', 'Indian/Reunion', 'Iran', 'Israel', 'Jamaica', 'Japan', 'Kwajalein', 'Libya', 'MET', 'MST', 'MST7MDT', 'Mexico/BajaNorte', 'Mexico/BajaSur', 'Mexico/General', 'NZ', 'NZ-CHAT', 'Navajo', 'PRC', 'PST8PDT', 'Pacific/Apia', 'Pacific/Auckland', 'Pacific/Chatham', 'Pacific/Easter', 'Pacific/Efate', 'Pacific/Enderbury', 'Pacific/Fakaofo', 'Pacific/Fiji', 'Pacific/Funafuti', 'Pacific/Galapagos', 'Pacific/Gambier', 'Pacific/Guadalcanal', 'Pacific/Guam', 'Pacific/Honolulu', 'Pacific/Johnston', 'Pacific/Kiritimati', 'Pacific/Kosrae', 'Pacific/Kwajalein', 'Pacific/Majuro', 'Pacific/Marquesas', 'Pacific/Midway', 'Pacific/Nauru', 'Pacific/Niue', 'Pacific/Norfolk', 'Pacific/Noumea', 'Pacific/Pago_Pago', 'Pacific/Palau', 'Pacific/Pitcairn', 'Pacific/Ponape', 'Pacific/Port_Moresby', 'Pacific/Rarotonga', 'Pacific/Saipan', 'Pacific/Samoa', 'Pacific/Tahiti', 'Pacific/Tarawa', 'Pacific/Tongatapu', 'Pacific/Truk', 'Pacific/Wake', 'Pacific/Wallis', 'Pacific/Yap', 'Poland', 'Portugal', 'ROC', 'ROK', 'Singapore', 'Turkey', 'UCT', 'US/Alaska', 'US/Aleutian', 'US/Arizona', 'US/Central', 'US/East-Indiana', 'US/Eastern', 'US/Hawaii', 'US/Indiana-Starke', 'US/Michigan', 'US/Mountain', 'US/Pacific', 'US/Pacific-New', 'US/Samoa', 'UTC', 'Universal', 'W-SU', 'WET', 'Zulu', 'posixrules'] all_timezones_set = set(all_timezones)
agpl-3.0
-3,484,383,658,359,607,300
20.813523
81
0.66112
false
felipenaselva/felipe.repository
script.module.placenta/lib/resources/lib/modules/trakt.py
1
17250
# -*- coding: UTF-8 -*- ####################################################################### # ---------------------------------------------------------------------------- # "THE BEER-WARE LICENSE" (Revision 42): # @tantrumdev wrote this file. As long as you retain this notice you # can do whatever you want with this stuff. If we meet some day, and you think # this stuff is worth it, you can buy me a beer in return. - Muad'Dib # ---------------------------------------------------------------------------- ####################################################################### # Addon Name: Placenta # Addon id: plugin.video.placenta # Addon Provider: Mr.Blamo import json import re import time import urllib import urlparse from resources.lib.modules import cache from resources.lib.modules import cleandate from resources.lib.modules import client from resources.lib.modules import control from resources.lib.modules import log_utils from resources.lib.modules import utils BASE_URL = 'http://api.trakt.tv' V2_API_KEY = '42740047aba33b1f04c1ba3893ce805a9ecfebd05de544a30fe0c99fabec972e' CLIENT_SECRET = 'c7a3e7fdf5c3863872c8f45e1d3f33797b492ed574a00a01a3fadcb3d270f926' REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob' def __getTrakt(url, post=None): try: url = urlparse.urljoin(BASE_URL, url) post = json.dumps(post) if post else None headers = {'Content-Type': 'application/json', 'trakt-api-key': V2_API_KEY, 'trakt-api-version': 2} if getTraktCredentialsInfo(): headers.update({'Authorization': 'Bearer %s' % control.setting('trakt.token')}) result = client.request(url, post=post, headers=headers, output='extended', error=True) resp_code = result[1] resp_header = result[2] result = result[0] if resp_code in ['500', '502', '503', '504', '520', '521', '522', '524']: log_utils.log('Temporary Trakt Error: %s' % resp_code, log_utils.LOGWARNING) return elif resp_code in ['404']: log_utils.log('Object Not Found : %s' % resp_code, log_utils.LOGWARNING) return # elif resp_code in ['429']: # log_utils.log('Trakt Rate Limit Reached: %s' % resp_code, log_utils.LOGWARNING) # return if resp_code not in ['401', '405']: return result, resp_header oauth = urlparse.urljoin(BASE_URL, '/oauth/token') opost = {'client_id': V2_API_KEY, 'client_secret': CLIENT_SECRET, 'redirect_uri': REDIRECT_URI, 'grant_type': 'refresh_token', 'refresh_token': control.setting('trakt.refresh')} result = client.request(oauth, post=json.dumps(opost), headers=headers) result = utils.json_loads_as_str(result) token, refresh = result['access_token'], result['refresh_token'] control.setSetting(id='trakt.token', value=token) control.setSetting(id='trakt.refresh', value=refresh) headers['Authorization'] = 'Bearer %s' % token result = client.request(url, post=post, headers=headers, output='extended', error=True) return result[0], result[2] except Exception as e: log_utils.log('Unknown Trakt Error: %s' % e, log_utils.LOGWARNING) pass def getTraktAsJson(url, post=None): try: r, res_headers = __getTrakt(url, post) r = utils.json_loads_as_str(r) if 'X-Sort-By' in res_headers and 'X-Sort-How' in res_headers: r = sort_list(res_headers['X-Sort-By'], res_headers['X-Sort-How'], r) return r except: pass def authTrakt(): try: if getTraktCredentialsInfo() == True: if control.yesnoDialog(control.lang(32511).encode('utf-8'), control.lang(32512).encode('utf-8'), '', 'Trakt'): control.setSetting(id='trakt.user', value='') control.setSetting(id='trakt.token', value='') control.setSetting(id='trakt.refresh', value='') raise Exception() result = getTraktAsJson('/oauth/device/code', {'client_id': V2_API_KEY}) verification_url = (control.lang(32513) % result['verification_url']).encode('utf-8') user_code = (control.lang(32514) % result['user_code']).encode('utf-8') expires_in = int(result['expires_in']) device_code = result['device_code'] interval = result['interval'] progressDialog = control.progressDialog progressDialog.create('Trakt', verification_url, user_code) for i in range(0, expires_in): try: if progressDialog.iscanceled(): break time.sleep(1) if not float(i) % interval == 0: raise Exception() r = getTraktAsJson('/oauth/device/token', {'client_id': V2_API_KEY, 'client_secret': CLIENT_SECRET, 'code': device_code}) if 'access_token' in r: break except: pass try: progressDialog.close() except: pass token, refresh = r['access_token'], r['refresh_token'] headers = {'Content-Type': 'application/json', 'trakt-api-key': V2_API_KEY, 'trakt-api-version': 2, 'Authorization': 'Bearer %s' % token} result = client.request(urlparse.urljoin(BASE_URL, '/users/me'), headers=headers) result = utils.json_loads_as_str(result) user = result['username'] control.setSetting(id='trakt.user', value=user) control.setSetting(id='trakt.token', value=token) control.setSetting(id='trakt.refresh', value=refresh) raise Exception() except: control.openSettings('3.1') def getTraktCredentialsInfo(): user = control.setting('trakt.user').strip() token = control.setting('trakt.token') refresh = control.setting('trakt.refresh') if (user == '' or token == '' or refresh == ''): return False return True def getTraktIndicatorsInfo(): indicators = control.setting('indicators') if getTraktCredentialsInfo() == False else control.setting('indicators.alt') indicators = True if indicators == '1' else False return indicators def getTraktAddonMovieInfo(): try: scrobble = control.addon('script.trakt').getSetting('scrobble_movie') except: scrobble = '' try: ExcludeHTTP = control.addon('script.trakt').getSetting('ExcludeHTTP') except: ExcludeHTTP = '' try: authorization = control.addon('script.trakt').getSetting('authorization') except: authorization = '' if scrobble == 'true' and ExcludeHTTP == 'false' and not authorization == '': return True else: return False def getTraktAddonEpisodeInfo(): try: scrobble = control.addon('script.trakt').getSetting('scrobble_episode') except: scrobble = '' try: ExcludeHTTP = control.addon('script.trakt').getSetting('ExcludeHTTP') except: ExcludeHTTP = '' try: authorization = control.addon('script.trakt').getSetting('authorization') except: authorization = '' if scrobble == 'true' and ExcludeHTTP == 'false' and not authorization == '': return True else: return False def manager(name, imdb, tvdb, content): try: post = {"movies": [{"ids": {"imdb": imdb}}]} if content == 'movie' else {"shows": [{"ids": {"tvdb": tvdb}}]} items = [(control.lang(32516).encode('utf-8'), '/sync/collection')] items += [(control.lang(32517).encode('utf-8'), '/sync/collection/remove')] items += [(control.lang(32518).encode('utf-8'), '/sync/watchlist')] items += [(control.lang(32519).encode('utf-8'), '/sync/watchlist/remove')] items += [(control.lang(32520).encode('utf-8'), '/users/me/lists/%s/items')] result = getTraktAsJson('/users/me/lists') lists = [(i['name'], i['ids']['slug']) for i in result] lists = [lists[i//2] for i in range(len(lists)*2)] for i in range(0, len(lists), 2): lists[i] = ((control.lang(32521) % lists[i][0]).encode('utf-8'), '/users/me/lists/%s/items' % lists[i][1]) for i in range(1, len(lists), 2): lists[i] = ((control.lang(32522) % lists[i][0]).encode('utf-8'), '/users/me/lists/%s/items/remove' % lists[i][1]) items += lists select = control.selectDialog([i[0] for i in items], control.lang(32515).encode('utf-8')) if select == -1: return elif select == 4: t = control.lang(32520).encode('utf-8') k = control.keyboard('', t) ; k.doModal() new = k.getText() if k.isConfirmed() else None if (new == None or new == ''): return result = __getTrakt('/users/me/lists', post={"name": new, "privacy": "private"})[0] try: slug = utils.json_loads_as_str(result)['ids']['slug'] except: return control.infoDialog(control.lang(32515).encode('utf-8'), heading=str(name), sound=True, icon='ERROR') result = __getTrakt(items[select][1] % slug, post=post)[0] else: result = __getTrakt(items[select][1], post=post)[0] icon = control.infoLabel('ListItem.Icon') if not result == None else 'ERROR' control.infoDialog(control.lang(32515).encode('utf-8'), heading=str(name), sound=True, icon=icon) except: return def slug(name): name = name.strip() name = name.lower() name = re.sub('[^a-z0-9_]', '-', name) name = re.sub('--+', '-', name) return name def sort_list(sort_key, sort_direction, list_data): reverse = False if sort_direction == 'asc' else True if sort_key == 'rank': return sorted(list_data, key=lambda x: x['rank'], reverse=reverse) elif sort_key == 'added': return sorted(list_data, key=lambda x: x['listed_at'], reverse=reverse) elif sort_key == 'title': return sorted(list_data, key=lambda x: utils.title_key(x[x['type']].get('title')), reverse=reverse) elif sort_key == 'released': return sorted(list_data, key=lambda x: _released_key(x[x['type']]), reverse=reverse) elif sort_key == 'runtime': return sorted(list_data, key=lambda x: x[x['type']].get('runtime', 0), reverse=reverse) elif sort_key == 'popularity': return sorted(list_data, key=lambda x: x[x['type']].get('votes', 0), reverse=reverse) elif sort_key == 'percentage': return sorted(list_data, key=lambda x: x[x['type']].get('rating', 0), reverse=reverse) elif sort_key == 'votes': return sorted(list_data, key=lambda x: x[x['type']].get('votes', 0), reverse=reverse) else: return list_data def _released_key(item): if 'released' in item: return item['released'] elif 'first_aired' in item: return item['first_aired'] else: return 0 def getActivity(): try: i = getTraktAsJson('/sync/last_activities') activity = [] activity.append(i['movies']['collected_at']) activity.append(i['episodes']['collected_at']) activity.append(i['movies']['watchlisted_at']) activity.append(i['shows']['watchlisted_at']) activity.append(i['seasons']['watchlisted_at']) activity.append(i['episodes']['watchlisted_at']) activity.append(i['lists']['updated_at']) activity.append(i['lists']['liked_at']) activity = [int(cleandate.iso_2_utc(i)) for i in activity] activity = sorted(activity, key=int)[-1] return activity except: pass def getWatchedActivity(): try: i = getTraktAsJson('/sync/last_activities') activity = [] activity.append(i['movies']['watched_at']) activity.append(i['episodes']['watched_at']) activity = [int(cleandate.iso_2_utc(i)) for i in activity] activity = sorted(activity, key=int)[-1] return activity except: pass def cachesyncMovies(timeout=0): indicators = cache.get(syncMovies, timeout, control.setting('trakt.user').strip()) return indicators def timeoutsyncMovies(): timeout = cache.timeout(syncMovies, control.setting('trakt.user').strip()) return timeout def syncMovies(user): try: if getTraktCredentialsInfo() == False: return indicators = getTraktAsJson('/users/me/watched/movies') indicators = [i['movie']['ids'] for i in indicators] indicators = [str(i['imdb']) for i in indicators if 'imdb' in i] return indicators except: pass def cachesyncTVShows(timeout=0): indicators = cache.get(syncTVShows, timeout, control.setting('trakt.user').strip()) return indicators def timeoutsyncTVShows(): timeout = cache.timeout(syncTVShows, control.setting('trakt.user').strip()) return timeout def syncTVShows(user): try: if getTraktCredentialsInfo() == False: return indicators = getTraktAsJson('/users/me/watched/shows?extended=full') indicators = [(i['show']['ids']['tvdb'], i['show']['aired_episodes'], sum([[(s['number'], e['number']) for e in s['episodes']] for s in i['seasons']], [])) for i in indicators] indicators = [(str(i[0]), int(i[1]), i[2]) for i in indicators] return indicators except: pass def syncSeason(imdb): try: if getTraktCredentialsInfo() == False: return indicators = getTraktAsJson('/shows/%s/progress/watched?specials=false&hidden=false' % imdb) indicators = indicators['seasons'] indicators = [(i['number'], [x['completed'] for x in i['episodes']]) for i in indicators] indicators = ['%01d' % int(i[0]) for i in indicators if not False in i[1]] return indicators except: pass def markMovieAsWatched(imdb): if not imdb.startswith('tt'): imdb = 'tt' + imdb return __getTrakt('/sync/history', {"movies": [{"ids": {"imdb": imdb}}]})[0] def markMovieAsNotWatched(imdb): if not imdb.startswith('tt'): imdb = 'tt' + imdb return __getTrakt('/sync/history/remove', {"movies": [{"ids": {"imdb": imdb}}]})[0] def markTVShowAsWatched(tvdb): return __getTrakt('/sync/history', {"shows": [{"ids": {"tvdb": tvdb}}]})[0] def markTVShowAsNotWatched(tvdb): return __getTrakt('/sync/history/remove', {"shows": [{"ids": {"tvdb": tvdb}}]})[0] def markEpisodeAsWatched(tvdb, season, episode): season, episode = int('%01d' % int(season)), int('%01d' % int(episode)) return __getTrakt('/sync/history', {"shows": [{"seasons": [{"episodes": [{"number": episode}], "number": season}], "ids": {"tvdb": tvdb}}]})[0] def markEpisodeAsNotWatched(tvdb, season, episode): season, episode = int('%01d' % int(season)), int('%01d' % int(episode)) return __getTrakt('/sync/history/remove', {"shows": [{"seasons": [{"episodes": [{"number": episode}], "number": season}], "ids": {"tvdb": tvdb}}]})[0] def getMovieTranslation(id, lang, full=False): url = '/movies/%s/translations/%s' % (id, lang) try: item = getTraktAsJson(url)[0] return item if full else item.get('title') except: pass def getTVShowTranslation(id, lang, season=None, episode=None, full=False): if season and episode: url = '/shows/%s/seasons/%s/episodes/%s/translations/%s' % (id, season, episode, lang) else: url = '/shows/%s/translations/%s' % (id, lang) try: item = getTraktAsJson(url)[0] return item if full else item.get('title') except: pass def getMovieAliases(id): try: return getTraktAsJson('/movies/%s/aliases' % id) except: return [] def getTVShowAliases(id): try: return getTraktAsJson('/shows/%s/aliases' % id) except: return [] def getMovieSummary(id, full=True): try: url = '/movies/%s' % id if full: url += '?extended=full' return getTraktAsJson(url) except: return def getTVShowSummary(id, full=True): try: url = '/shows/%s' % id if full: url += '?extended=full' return getTraktAsJson(url) except: return def getPeople(id, content_type, full=True): try: url = '/%s/%s/people' % (content_type, id) if full: url += '?extended=full' return getTraktAsJson(url) except: return def SearchAll(title, year, full=True): try: return SearchMovie(title, year, full) + SearchTVShow(title, year, full) except: return def SearchMovie(title, year, full=True): try: url = '/search/movie?query=%s' % urllib.quote_plus(title) if year: url += '&year=%s' % year if full: url += '&extended=full' return getTraktAsJson(url) except: return def SearchTVShow(title, year, full=True): try: url = '/search/show?query=%s' % urllib.quote_plus(title) if year: url += '&year=%s' % year if full: url += '&extended=full' return getTraktAsJson(url) except: return def IdLookup(content, type, type_id): try: r = getTraktAsJson('/search/%s/%s?type=%s' % (type, type_id, content)) return r[0].get(content, {}).get('ids', []) except: return {} def getGenre(content, type, type_id): try: r = '/search/%s/%s?type=%s&extended=full' % (type, type_id, content) r = getTraktAsJson(r) r = r[0].get(content, {}).get('genres', []) return r except: return []
gpl-2.0
-689,592,721,035,571,500
35.469345
185
0.601043
false
nside/appcrawl
appcrawl/spiders/playstore.py
1
3616
from scrapy.selector import HtmlXPathSelector from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor from scrapy.contrib.spiders import CrawlSpider, Rule from appcrawl.items import AppItem, AppStoreItem from scrapy.http import Request import datetime import re class PlaystoreSpider(CrawlSpider): def gen_urls(): for c in ('ARCADE', 'BRAIN', 'CARDS', 'CASUAL', 'GAME_WALLPAPER', 'RACING', 'SPORTS_GAMES', 'GAME_WIDGETS', 'BOOKS_AND_REFERENCE', 'BUSINESS', 'COMICS', 'COMMUNICATION', 'EDUCATION', 'ENTERTAINMENT', 'FINANCE', 'HEALTH', 'LIBRARIES_AND_DEMO', 'LIFESTYLE', 'APP_WALLPAPER', 'MEDIA_AND_VIDEO', 'MEDICAL', 'MUSIC_AND_AUDIO', 'NEWS_AND_MAGAZINES', 'PERSONALIZATION', 'PHOTOGRAPHY', 'PRODUCTIVITY', 'SHOPPING', 'SOCIAL', 'SPORTS', 'TOOLS', 'TRANSPORTATION', 'TRAVEL_AND_LOCAL', 'WEATHER', 'APP_WIDGETS'): yield 'https://play.google.com/store/apps/category/%s/collection/topselling_paid' % c yield 'https://play.google.com/store/apps/category/%s/collection/topselling_free' % c name = 'playstore' allowed_domains = ['play.google.com'] start_urls = gen_urls() reg_start = re.compile('start=([\d]+)') rules = ( #Rule(SgmlLinkExtractor(allow=r'Items/'), callback='parse_item', follow=True), Rule(SgmlLinkExtractor(allow=r'category/[A-Z_]+\?', deny=r'/accounts/'), follow=True, callback='parse_app'), #categories # Rule(SgmlLinkExtractor(allow=r'start=[\d]+&num=[\d]+', deny=r'/accounts/'), follow=True), #categories Rule(SgmlLinkExtractor(allow=r'/collection/', deny=r'editors_choice'), follow=True), #categories #parse_app ) def parse(self, response): hxs = HtmlXPathSelector(response) m = PlaystoreSpider.reg_start.search(response.url) start = 0 if m: start = int(m.group(1)) artworks = hxs.select('//div[@class="thumbnail-wrapper goog-inline-block"]/a/img/@src').extract() ids = hxs.select('//li[@class="goog-inline-block"]/@data-docid').extract() ids += hxs.select('//li[@class="goog-inline-block z-last-child"]/@data-docid').extract() #scary! names = hxs.select('//div[@class="details goog-inline-block"]/div/a/text()').extract() urls = hxs.select('//div[@class="details goog-inline-block"]/div/a/@href').extract() reg_cat = re.compile('/category/([\w_]+)(/|\?|/)*') category = reg_cat.search(response.url).group(1).replace('_', ' ').title() sellers = hxs.select('//span[@class="attribution"]/div/a').extract() seller_links = hxs.select('//span[@class="attribution"]/div/a/@href').extract() assert not "We're sorry" in response.body assert len(artworks) == len(ids) == len(names) == len(urls) == len(sellers) == len(seller_links), (len(artworks) , len(ids) , len(names) , len(urls) , len(sellers) , len(seller_links)) for artwork, id, name, url, seller, seller_link in zip(artworks, ids, names, urls, sellers, seller_links): i = AppStoreItem() i['store'] = 'play' i['id'] = id i['artwork'] = artwork i['category'] = category i['url'] = 'https://play.google.com' + url i['name'] = name i['last_update'] = datetime.date.today().isoformat() i['seller'] = seller i['seller_link'] = 'https://play.google.com' + seller_link yield i if start == 0: prefix = '?' if '?' in response.url: prefix = '&' for i in range(24, 480 + 1, 24): yield Request(response.url + prefix + 'start=%d&num=24' % i)
bsd-2-clause
2,205,364,849,234,729,200
53.787879
505
0.61781
false
prutseltje/ansible
lib/ansible/plugins/callback/stderr.py
59
3194
# (c) 2017, Frederic Van Espen <[email protected]> # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' callback: stderr callback_type: stdout requirements: - set as main display callback short_description: Splits output, sending failed tasks to stderr version_added: "2.4" extends_documentation_fragment: - default_callback description: - This is the stderr callback plugin, it behaves like the default callback plugin but sends error output to stderr. - Also it does not output skipped host/task/item status ''' from ansible import constants as C from ansible.plugins.callback.default import CallbackModule as CallbackModule_default class CallbackModule(CallbackModule_default): ''' This is the stderr callback plugin, which reuses the default callback plugin but sends error output to stderr. ''' CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'stdout' CALLBACK_NAME = 'stderr' def __init__(self): self.super_ref = super(CallbackModule, self) self.super_ref.__init__() def v2_runner_on_failed(self, result, ignore_errors=False): delegated_vars = result._result.get('_ansible_delegated_vars', None) self._clean_results(result._result, result._task.action) if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid: self._print_task_banner(result._task) self._handle_exception(result._result, errors_to_stderr=True) self._handle_warnings(result._result) if result._task.loop and 'results' in result._result: self._process_items(result) else: if delegated_vars: self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR, stderr=True) else: self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR, stderr=True) if ignore_errors: self._display.display("...ignoring", color=C.COLOR_SKIP) def _handle_exception(self, result, errors_to_stderr=False): if 'exception' in result: msg = "An exception occurred during task execution. " if self._display.verbosity < 3: # extract just the actual error message from the exception text error = result['exception'].strip().split('\n')[-1] msg += "To see the full traceback, use -vvv. The error was: %s" % error else: msg = "The full traceback is:\n" + result['exception'] del result['exception'] self._display.display(msg, color=C.COLOR_ERROR, stderr=errors_to_stderr)
gpl-3.0
-1,762,108,654,163,185,200
38.432099
133
0.61052
false
srvg/ansible-modules-extras
identity/ipa/ipa_sudocmdgroup.py
28
8567
#!/usr/bin/python # -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: ipa_sudocmdgroup author: Thomas Krahn (@Nosmoht) short_description: Manage FreeIPA sudo command group description: - Add, modify or delete sudo command group within IPA server using IPA API. options: cn: description: - Sudo Command Group. aliases: ['name'] required: true description: description: - Group description. state: description: State to ensure required: false default: present choices: ['present', 'absent'] sudocmd: description: - List of sudo commands to assign to the group. - If an empty list is passed all assigned commands will be removed from the group. - If option is omitted sudo commands will not be checked or changed. required: false ipa_port: description: Port of IPA server required: false default: 443 ipa_host: description: IP or hostname of IPA server required: false default: "ipa.example.com" ipa_user: description: Administrative account used on IPA server required: false default: "admin" ipa_pass: description: Password of administrative user required: true ipa_prot: description: Protocol used by IPA server required: false default: "https" choices: ["http", "https"] validate_certs: description: - This only applies if C(ipa_prot) is I(https). - If set to C(no), the SSL certificates will not be validated. - This should only set to C(no) used on personally controlled sites using self-signed certificates. required: false default: true version_added: "2.3" ''' EXAMPLES = ''' - name: Ensure sudo command group exists ipa_sudocmdgroup: name: group01 description: Group of important commands sudocmd: - su ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret - name: Ensure sudo command group does not exists ipa_sudocmdgroup: name: group01 state: absent ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret ''' RETURN = ''' sudocmdgroup: description: Sudo command group as returned by IPA API returned: always type: dict ''' from ansible.module_utils.ipa import IPAClient class SudoCmdGroupIPAClient(IPAClient): def __init__(self, module, host, port, protocol): super(SudoCmdGroupIPAClient, self).__init__(module, host, port, protocol) def sudocmdgroup_find(self, name): return self._post_json(method='sudocmdgroup_find', name=None, item={'all': True, 'cn': name}) def sudocmdgroup_add(self, name, item): return self._post_json(method='sudocmdgroup_add', name=name, item=item) def sudocmdgroup_mod(self, name, item): return self._post_json(method='sudocmdgroup_mod', name=name, item=item) def sudocmdgroup_del(self, name): return self._post_json(method='sudocmdgroup_del', name=name) def sudocmdgroup_add_member(self, name, item): return self._post_json(method='sudocmdgroup_add_member', name=name, item=item) def sudocmdgroup_add_member_sudocmd(self, name, item): return self.sudocmdgroup_add_member(name=name, item={'sudocmd': item}) def sudocmdgroup_remove_member(self, name, item): return self._post_json(method='sudocmdgroup_remove_member', name=name, item=item) def sudocmdgroup_remove_member_sudocmd(self, name, item): return self.sudocmdgroup_remove_member(name=name, item={'sudocmd': item}) def get_sudocmdgroup_dict(description=None): data = {} if description is not None: data['description'] = description return data def modify_if_diff(module, name, ipa_list, module_list, add_method, remove_method): changed = False diff = list(set(ipa_list) - set(module_list)) if len(diff) > 0: changed = True if not module.check_mode: remove_method(name=name, item=diff) diff = list(set(module_list) - set(ipa_list)) if len(diff) > 0: changed = True if not module.check_mode: add_method(name=name, item=diff) return changed def get_sudocmdgroup_diff(ipa_sudocmdgroup, module_sudocmdgroup): data = [] for key in module_sudocmdgroup.keys(): module_value = module_sudocmdgroup.get(key, None) ipa_value = ipa_sudocmdgroup.get(key, None) if isinstance(ipa_value, list) and not isinstance(module_value, list): module_value = [module_value] if isinstance(ipa_value, list) and isinstance(module_value, list): ipa_value = sorted(ipa_value) module_value = sorted(module_value) if ipa_value != module_value: data.append(key) return data def ensure(module, client): name = module.params['name'] state = module.params['state'] sudocmd = module.params['sudocmd'] module_sudocmdgroup = get_sudocmdgroup_dict(description=module.params['description']) ipa_sudocmdgroup = client.sudocmdgroup_find(name=name) changed = False if state == 'present': if not ipa_sudocmdgroup: changed = True if not module.check_mode: ipa_sudocmdgroup = client.sudocmdgroup_add(name=name, item=module_sudocmdgroup) else: diff = get_sudocmdgroup_diff(ipa_sudocmdgroup, module_sudocmdgroup) if len(diff) > 0: changed = True if not module.check_mode: data = {} for key in diff: data[key] = module_sudocmdgroup.get(key) client.sudocmdgroup_mod(name=name, item=data) if sudocmd is not None: changed = modify_if_diff(module, name, ipa_sudocmdgroup.get('member_sudocmd', []), sudocmd, client.sudocmdgroup_add_member_sudocmd, client.sudocmdgroup_remove_member_sudocmd) else: if ipa_sudocmdgroup: changed = True if not module.check_mode: client.sudocmdgroup_del(name=name) return changed, client.sudocmdgroup_find(name=name) def main(): module = AnsibleModule( argument_spec=dict( cn=dict(type='str', required=True, aliases=['name']), description=dict(type='str', required=False), state=dict(type='str', required=False, default='present', choices=['present', 'absent', 'enabled', 'disabled']), sudocmd=dict(type='list', required=False), ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']), ipa_host=dict(type='str', required=False, default='ipa.example.com'), ipa_port=dict(type='int', required=False, default=443), ipa_user=dict(type='str', required=False, default='admin'), ipa_pass=dict(type='str', required=True, no_log=True), validate_certs=dict(type='bool', required=False, default=True), ), supports_check_mode=True, ) client = SudoCmdGroupIPAClient(module=module, host=module.params['ipa_host'], port=module.params['ipa_port'], protocol=module.params['ipa_prot']) try: client.login(username=module.params['ipa_user'], password=module.params['ipa_pass']) changed, sudocmdgroup = ensure(module, client) module.exit_json(changed=changed, sudorule=sudocmdgroup) except Exception: e = get_exception() module.fail_json(msg=str(e)) from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.pycompat24 import get_exception if __name__ == '__main__': main()
gpl-3.0
6,082,881,859,837,278,000
33.405622
103
0.63908
false
cgwalters/anaconda
pyanaconda/ui/gui/spokes/storage.py
1
41626
# Storage configuration spoke classes # # Copyright (C) 2011-2014 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Lehman <[email protected]> # Chris Lumens <[email protected]> # """ TODO: - add button within sw_needs text in options dialogs 2,3 - udev data gathering - udev fwraid, mpath would sure be nice - status/completed - what are noteworthy status events? - disks selected - exclusiveDisks non-empty - sufficient space for software selection - autopart selected - custom selected - performing custom configuration - storage configuration complete - spacing and border width always 6 """ from gi.repository import Gdk, GLib, AnacondaWidgets from pyanaconda.ui.communication import hubQ from pyanaconda.ui.lib.disks import getDisks, isLocalDisk, applyDiskSelection from pyanaconda.ui.gui import GUIObject from pyanaconda.ui.gui.spokes import NormalSpoke from pyanaconda.ui.gui.spokes.lib.cart import SelectedDisksDialog from pyanaconda.ui.gui.spokes.lib.passphrase import PassphraseDialog from pyanaconda.ui.gui.spokes.lib.detailederror import DetailedErrorDialog from pyanaconda.ui.gui.spokes.lib.resize import ResizeDialog from pyanaconda.ui.gui.spokes.lib.dasdfmt import DasdFormatDialog from pyanaconda.ui.categories.system import SystemCategory from pyanaconda.ui.gui.utils import escape_markup, gtk_action_nowait, ignoreEscape from pyanaconda.ui.helpers import StorageChecker from pyanaconda.kickstart import doKickstartStorage, refreshAutoSwapSize, resetCustomStorageData from blivet import arch from blivet import autopart from blivet.size import Size from blivet.devices import MultipathDevice, ZFCPDiskDevice from blivet.errors import StorageError, DasdFormatError from blivet.platform import platform from blivet.devicelibs.dasd import make_unformatted_dasd_list, format_dasd from pyanaconda.threads import threadMgr, AnacondaThread from pyanaconda.product import productName from pyanaconda.flags import flags from pyanaconda.i18n import _, C_, CN_, P_ from pyanaconda import constants, iutil, isys from pyanaconda.bootloader import BootLoaderError from pykickstart.constants import CLEARPART_TYPE_NONE, AUTOPART_TYPE_LVM from pykickstart.errors import KickstartValueError import sys import logging log = logging.getLogger("anaconda") __all__ = ["StorageSpoke"] # Response ID codes for all the various buttons on all the dialogs. RESPONSE_CANCEL = 0 RESPONSE_MODIFY_SW = 2 RESPONSE_RECLAIM = 3 RESPONSE_QUIT = 4 class InstallOptionsDialogBase(GUIObject): uiFile = "spokes/storage.glade" def __init__(self, *args, **kwargs): self.payload = kwargs.pop("payload", None) GUIObject.__init__(self, *args, **kwargs) self._grabObjects() def _grabObjects(self): pass def run(self): rc = self.window.run() self.window.destroy() return rc def _modify_sw_link_clicked(self, label, uri): if self._software_is_ready(): self.window.response(RESPONSE_MODIFY_SW) return True def _get_sw_needs_text(self, required_space, auto_swap): tooltip = _("Please wait... software metadata still loading.") if flags.livecdInstall: sw_text = (_("Your current <b>%(product)s</b> software " "selection requires <b>%(total)s</b> of available " "space, including <b>%(software)s</b> for software and " "<b>%(swap)s</b> for swap space.") % {"product": escape_markup(productName), "total": escape_markup(str(required_space + auto_swap)), "software": escape_markup(str(required_space)), "swap": escape_markup(str(auto_swap))}) else: sw_text = (_("Your current <a href=\"\" title=\"%(tooltip)s\"><b>%(product)s</b> software " "selection</a> requires <b>%(total)s</b> of available " "space, including <b>%(software)s</b> for software and " "<b>%(swap)s</b> for swap space.") % {"tooltip": escape_markup(tooltip), "product": escape_markup(productName), "total": escape_markup(str(required_space + auto_swap)), "software": escape_markup(str(required_space)), "swap": escape_markup(str(auto_swap))}) return sw_text # Methods to handle sensitivity of the modify button. def _software_is_ready(self): # FIXME: Would be nicer to just ask the spoke if it's ready. return (not threadMgr.get(constants.THREAD_PAYLOAD) and not threadMgr.get(constants.THREAD_SOFTWARE_WATCHER) and not threadMgr.get(constants.THREAD_CHECK_SOFTWARE) and self.payload.baseRepo is not None) def _check_for_storage_thread(self, button): if self._software_is_ready(): button.set_has_tooltip(False) # False means this function should never be called again. return False else: return True def _add_modify_watcher(self, widget): # If the payload fetching thread is still running, the user can't go to # modify the software selection screen. Thus, we have to set the button # insensitive and wait until software selection is ready to go. if not self._software_is_ready(): GLib.timeout_add_seconds(1, self._check_for_storage_thread, widget) class NeedSpaceDialog(InstallOptionsDialogBase): builderObjects = ["need_space_dialog"] mainWidgetName = "need_space_dialog" def _grabObjects(self): self.disk_free_label = self.builder.get_object("need_space_disk_free_label") self.fs_free_label = self.builder.get_object("need_space_fs_free_label") def _set_free_space_labels(self, disk_free, fs_free): self.disk_free_label.set_text(str(disk_free)) self.fs_free_label.set_text(str(fs_free)) # pylint: disable=arguments-differ def refresh(self, required_space, auto_swap, disk_free, fs_free): sw_text = self._get_sw_needs_text(required_space, auto_swap) label_text = _("%s The disks you've selected have the following " "amounts of free space:") % sw_text label = self.builder.get_object("need_space_desc_label") label.set_markup(label_text) if not flags.livecdInstall: label.connect("activate-link", self._modify_sw_link_clicked) self._set_free_space_labels(disk_free, fs_free) label_text = _("<b>You don't have enough space available to install " "%s</b>. You can shrink or remove existing partitions " "via our guided reclaim space tool, or you can adjust your " "partitions on your own in the custom partitioning " "interface.") % escape_markup(productName) self.builder.get_object("need_space_options_label").set_markup(label_text) self._add_modify_watcher(label) class NoSpaceDialog(InstallOptionsDialogBase): builderObjects = ["no_space_dialog"] mainWidgetName = "no_space_dialog" def _grabObjects(self): self.disk_free_label = self.builder.get_object("no_space_disk_free_label") self.fs_free_label = self.builder.get_object("no_space_fs_free_label") def _set_free_space_labels(self, disk_free, fs_free): self.disk_free_label.set_text(str(disk_free)) self.fs_free_label.set_text(str(fs_free)) # pylint: disable=arguments-differ def refresh(self, required_space, auto_swap, disk_free, fs_free): label_text = self._get_sw_needs_text(required_space, auto_swap) label_text += (_(" You don't have enough space available to install " "<b>%(product)s</b>, even if you used all of the free space " "available on the selected disks.") % {"product": escape_markup(productName)}) label = self.builder.get_object("no_space_desc_label") label.set_markup(label_text) if not flags.livecdInstall: label.connect("activate-link", self._modify_sw_link_clicked) self._set_free_space_labels(disk_free, fs_free) label_text = _("<b>You don't have enough space available to install " "%(productName)s</b>, even if you used all of the free space " "available on the selected disks. You could add more " "disks for additional space, " "modify your software selection to install a smaller " "version of <b>%(productName)s</b>, or quit the installer.") % \ {"productName": escape_markup(productName)} self.builder.get_object("no_space_options_label").set_markup(label_text) self._add_modify_watcher(label) class StorageSpoke(NormalSpoke, StorageChecker): builderObjects = ["storageWindow", "addSpecializedImage"] mainWidgetName = "storageWindow" uiFile = "spokes/storage.glade" helpFile = "StorageSpoke.xml" category = SystemCategory # other candidates: computer-symbolic, folder-symbolic icon = "drive-harddisk-symbolic" title = CN_("GUI|Spoke", "INSTALLATION _DESTINATION") def __init__(self, *args, **kwargs): StorageChecker.__init__(self, min_ram=isys.MIN_GUI_RAM) NormalSpoke.__init__(self, *args, **kwargs) self.applyOnSkip = True self._ready = False self.autoPartType = None self.encrypted = False self.passphrase = "" self.selected_disks = self.data.ignoredisk.onlyuse[:] # This list contains all possible disks that can be included in the install. # All types of advanced disks should be set up for us ahead of time, so # there should be no need to modify this list. self.disks = [] if not flags.automatedInstall: # default to using autopart for interactive installs self.data.autopart.autopart = True self.autopart = self.data.autopart.autopart self.autoPartType = None self.clearPartType = CLEARPART_TYPE_NONE if self.data.zerombr.zerombr and arch.isS390(): # run dasdfmt on any unformatted DASDs automatically threadMgr.add(AnacondaThread(name=constants.THREAD_DASDFMT, target=self.run_dasdfmt)) self._previous_autopart = False self._last_clicked_overview = None self._cur_clicked_overview = None self._grabObjects() def _grabObjects(self): self._customPart = self.builder.get_object("customRadioButton") self._encrypted = self.builder.get_object("encryptionCheckbox") self._reclaim = self.builder.get_object("reclaimCheckbox") def apply(self): applyDiskSelection(self.storage, self.data, self.selected_disks) self.data.autopart.autopart = self.autopart self.data.autopart.type = self.autoPartType self.data.autopart.encrypted = self.encrypted self.data.autopart.passphrase = self.passphrase self.clearPartType = CLEARPART_TYPE_NONE if self.data.bootloader.bootDrive and \ self.data.bootloader.bootDrive not in self.selected_disks: self.data.bootloader.bootDrive = "" self.storage.bootloader.reset() self.data.clearpart.initAll = True self.data.clearpart.type = self.clearPartType self.storage.config.update(self.data) self.storage.autoPartType = self.data.autopart.type self.storage.encryptedAutoPart = self.data.autopart.encrypted self.storage.encryptionPassphrase = self.data.autopart.passphrase # If autopart is selected we want to remove whatever has been # created/scheduled to make room for autopart. # If custom is selected, we want to leave alone any storage layout the # user may have set up before now. self.storage.config.clearNonExistent = self.data.autopart.autopart @gtk_action_nowait def execute(self): # Spawn storage execution as a separate thread so there's no big delay # going back from this spoke to the hub while StorageChecker.run runs. # Yes, this means there's a thread spawning another thread. Sorry. threadMgr.add(AnacondaThread(name=constants.THREAD_EXECUTE_STORAGE, target=self._doExecute)) def _doExecute(self): self._ready = False hubQ.send_not_ready(self.__class__.__name__) # on the off-chance dasdfmt is running, we can't proceed further threadMgr.wait(constants.THREAD_DASDFMT) hubQ.send_message(self.__class__.__name__, _("Saving storage configuration...")) try: doKickstartStorage(self.storage, self.data, self.instclass) except (StorageError, KickstartValueError) as e: log.error("storage configuration failed: %s", e) StorageChecker.errors = str(e).split("\n") hubQ.send_message(self.__class__.__name__, _("Failed to save storage configuration...")) self.data.bootloader.bootDrive = "" self.data.ignoredisk.drives = [] self.data.ignoredisk.onlyuse = [] self.storage.config.update(self.data) self.storage.reset() self.disks = getDisks(self.storage.devicetree) # now set ksdata back to the user's specified config applyDiskSelection(self.storage, self.data, self.selected_disks) except BootLoaderError as e: log.error("BootLoader setup failed: %s", e) StorageChecker.errors = str(e).split("\n") hubQ.send_message(self.__class__.__name__, _("Failed to save storage configuration...")) self.data.bootloader.bootDrive = "" else: if self.autopart: self.run() finally: resetCustomStorageData(self.data) self._ready = True hubQ.send_ready(self.__class__.__name__, True) @property def completed(self): retval = (threadMgr.get(constants.THREAD_EXECUTE_STORAGE) is None and threadMgr.get(constants.THREAD_CHECK_STORAGE) is None and self.storage.rootDevice is not None and not self.errors) return retval @property def ready(self): # By default, the storage spoke is not ready. We have to wait until # storageInitialize is done. return self._ready @property def showable(self): return not flags.dirInstall @property def status(self): """ A short string describing the current status of storage setup. """ msg = _("No disks selected") if flags.automatedInstall and not self.storage.rootDevice: msg = _("Kickstart insufficient") elif threadMgr.get(constants.THREAD_DASDFMT): msg = _("Formatting DASDs") elif self.data.ignoredisk.onlyuse: msg = P_(("%d disk selected"), ("%d disks selected"), len(self.data.ignoredisk.onlyuse)) % len(self.data.ignoredisk.onlyuse) if self.errors: msg = _("Error checking storage configuration") elif self.warnings: msg = _("Warning checking storage configuration") elif self.data.autopart.autopart: msg = _("Automatic partitioning selected") else: msg = _("Custom partitioning selected") return msg @property def localOverviews(self): return self.local_disks_box.get_children() @property def advancedOverviews(self): return [child for child in self.specialized_disks_box.get_children() if isinstance(child, AnacondaWidgets.DiskOverview)] def _on_disk_clicked(self, overview, event): # This handler only runs for these two kinds of events, and only for # activate-type keys (space, enter) in the latter event's case. if not event.type in [Gdk.EventType.BUTTON_PRESS, Gdk.EventType.KEY_RELEASE]: return if event.type == Gdk.EventType.KEY_RELEASE and \ event.keyval not in [Gdk.KEY_space, Gdk.KEY_Return, Gdk.KEY_ISO_Enter, Gdk.KEY_KP_Enter, Gdk.KEY_KP_Space]: return if event.type == Gdk.EventType.BUTTON_PRESS and \ event.state & Gdk.ModifierType.SHIFT_MASK: # clicked with Shift held down if self._last_clicked_overview is None: # nothing clicked before, cannot apply Shift-click return local_overviews = self.localOverviews advanced_overviews = self.advancedOverviews # find out which list of overviews the clicked one belongs to if overview in local_overviews: from_overviews = local_overviews elif overview in advanced_overviews: from_overviews = advanced_overviews else: # should never happen, but if it does, no other actions should be done return if self._last_clicked_overview in from_overviews: # get index of the last clicked overview last_idx = from_overviews.index(self._last_clicked_overview) else: # overview from the other list clicked before, cannot apply "Shift-click" return # get index and state of the clicked overview cur_idx = from_overviews.index(overview) state = self._last_clicked_overview.get_chosen() if cur_idx > last_idx: copy_to = from_overviews[last_idx:cur_idx+1] else: copy_to = from_overviews[cur_idx:last_idx] # copy the state of the last clicked overview to the ones between it and the # one clicked with the Shift held down for disk_overview in copy_to: disk_overview.set_chosen(state) self._update_disk_list() self._update_summary() def _on_disk_focus_in(self, overview, event): self._last_clicked_overview = self._cur_clicked_overview self._cur_clicked_overview = overview def refresh(self): self.disks = getDisks(self.storage.devicetree) # synchronize our local data store with the global ksdata disk_names = [d.name for d in self.disks] # don't put disks with hidden formats in selected_disks self.selected_disks = [d for d in self.data.ignoredisk.onlyuse if d in disk_names] self.autopart = self.data.autopart.autopart self.autoPartType = self.data.autopart.type if self.autoPartType is None: self.autoPartType = AUTOPART_TYPE_LVM self.encrypted = self.data.autopart.encrypted self.passphrase = self.data.autopart.passphrase self._previous_autopart = self.autopart # First, remove all non-button children. for child in self.localOverviews + self.advancedOverviews: child.destroy() # Then deal with local disks, which are really easy. They need to be # handled here instead of refresh to take into account the user pressing # the rescan button on custom partitioning. for disk in filter(isLocalDisk, self.disks): # While technically local disks, zFCP devices are specialized # storage and should not be shown here. if disk.type is not "zfcp": self._add_disk_overview(disk, self.local_disks_box) # Advanced disks are different. Because there can potentially be a lot # of them, we do not display them in the box by default. Instead, only # those selected in the filter UI are displayed. This means refresh # needs to know to create and destroy overviews as appropriate. for name in self.data.ignoredisk.onlyuse: if name not in disk_names: continue obj = self.storage.devicetree.getDeviceByName(name, hidden=True) # since zfcp devices may be detected as local disks when added # manually, specifically check the disk type here to make sure # we won't accidentally bypass adding zfcp devices to the disk # overview if isLocalDisk(obj) and obj.type is not "zfcp": continue self._add_disk_overview(obj, self.specialized_disks_box) # update the selections in the ui for overview in self.localOverviews + self.advancedOverviews: name = overview.get_property("name") overview.set_chosen(name in self.selected_disks) self._customPart.set_active(not self.autopart) self._update_summary() if self.errors: self.set_warning(_("Error checking storage configuration. Click for details.")) elif self.warnings: self.set_warning(_("Warning checking storage configuration. Click for details.")) def initialize(self): NormalSpoke.initialize(self) self.local_disks_box = self.builder.get_object("local_disks_box") self.specialized_disks_box = self.builder.get_object("specialized_disks_box") threadMgr.add(AnacondaThread(name=constants.THREAD_STORAGE_WATCHER, target=self._initialize)) def _add_disk_overview(self, disk, box): if disk.removable: kind = "drive-removable-media" else: kind = "drive-harddisk" if disk.serial: popup_info = "%s" % disk.serial else: popup_info = None # We don't want to display the whole huge WWID for a multipath device. # That makes the DO way too wide. if isinstance(disk, MultipathDevice): desc = disk.wwid.split(":") description = ":".join(desc[0:3]) + "..." + ":".join(desc[-4:]) elif isinstance(disk, ZFCPDiskDevice): # manually mangle the desc of a zFCP device to be multi-line since # it's so long it makes the disk selection screen look odd description = _("FCP device %(hba_id)s\nWWPN %(wwpn)s\nLUN %(lun)s") % \ {"hba_id": disk.hba_id, "wwpn": disk.wwpn, "lun": disk.fcp_lun} else: description = disk.description free = self.storage.getFreeSpace(disks=[disk])[disk.name][0] overview = AnacondaWidgets.DiskOverview(description, kind, str(disk.size), _("%s free") % free, disk.name, popup=popup_info) box.pack_start(overview, False, False, 0) # FIXME: this will need to get smarter # # maybe a little function that resolves each item in onlyuse using # udev_resolve_devspec and compares that to the DiskDevice? overview.set_chosen(disk.name in self.selected_disks) overview.connect("button-press-event", self._on_disk_clicked) overview.connect("key-release-event", self._on_disk_clicked) overview.connect("focus-in-event", self._on_disk_focus_in) overview.show_all() def _initialize(self): hubQ.send_message(self.__class__.__name__, _("Probing storage...")) threadMgr.wait(constants.THREAD_STORAGE) threadMgr.wait(constants.THREAD_CUSTOM_STORAGE_INIT) self.disks = getDisks(self.storage.devicetree) # if there's only one disk, select it by default if len(self.disks) == 1 and not self.selected_disks: applyDiskSelection(self.storage, self.data, [self.disks[0].name]) self._ready = True hubQ.send_ready(self.__class__.__name__, False) def _update_summary(self): """ Update the summary based on the UI. """ count = 0 capacity = Size(0) free = Size(0) # pass in our disk list so hidden disks' free space is available free_space = self.storage.getFreeSpace(disks=self.disks) selected = [d for d in self.disks if d.name in self.selected_disks] for disk in selected: capacity += disk.size free += free_space[disk.name][0] count += 1 anySelected = count > 0 summary = (P_("%(count)d disk selected; %(capacity)s capacity; %(free)s free", "%(count)d disks selected; %(capacity)s capacity; %(free)s free", count) % {"count" : count, "capacity" : capacity, "free" : free}) summary_label = self.builder.get_object("summary_label") summary_label.set_text(summary) summary_label.set_sensitive(anySelected) # only show the "we won't touch your other disks" labels and summary button when # some disks are selected self.builder.get_object("summary_button_revealer").set_reveal_child(anySelected) self.builder.get_object("local_untouched_label_revealer").set_reveal_child(anySelected) self.builder.get_object("special_untouched_label_revealer").set_reveal_child(anySelected) self.builder.get_object("other_options_label").set_sensitive(anySelected) self.builder.get_object("other_options_grid").set_sensitive(anySelected) if len(self.disks) == 0: self.set_warning(_("No disks detected. Please shut down the computer, connect at least one disk, and restart to complete installation.")) elif not anySelected: self.set_warning(_("No disks selected; please select at least one disk to install to.")) else: self.clear_info() def _update_disk_list(self): """ Update self.selected_disks based on the UI. """ for overview in self.localOverviews + self.advancedOverviews: selected = overview.get_chosen() name = overview.get_property("name") if selected and name not in self.selected_disks: self.selected_disks.append(name) if not selected and name in self.selected_disks: self.selected_disks.remove(name) def run_dasdfmt(self): """ Though the same function exists in pyanaconda.ui.gui.spokes.lib.dasdfmt, this instance doesn't include any of the UI pieces and should only really be getting called on ks installations with "zerombr". """ # wait for the initial storage thread to complete before taking any new # actions on storage devices threadMgr.wait(constants.THREAD_STORAGE) to_format = make_unformatted_dasd_list(d.name for d in getDisks(self.storage.devicetree)) if not to_format: # nothing to do here; bail return hubQ.send_message(self.__class__.__name__, _("Formatting DASDs")) for disk in to_format: try: format_dasd(disk) except DasdFormatError as err: # Log errors if formatting fails, but don't halt the installer log.error(str(err)) continue # signal handlers def on_summary_clicked(self, button): # show the selected disks dialog # pass in our disk list so hidden disks' free space is available free_space = self.storage.getFreeSpace(disks=self.disks) dialog = SelectedDisksDialog(self.data,) dialog.refresh([d for d in self.disks if d.name in self.selected_disks], free_space) self.run_lightbox_dialog(dialog) # update selected disks since some may have been removed self.selected_disks = [d.name for d in dialog.disks] # update the UI to reflect changes to self.selected_disks for overview in self.localOverviews: name = overview.get_property("name") overview.set_chosen(name in self.selected_disks) self._update_summary() self.data.bootloader.seen = True if self.data.bootloader.location == "none": self.set_warning(_("You have chosen to skip boot loader installation. Your system may not be bootable.")) else: self.clear_info() def run_lightbox_dialog(self, dialog): with self.main_window.enlightbox(dialog.window): rc = dialog.run() return rc def _check_encrypted(self): # even if they're not doing autopart, setting autopart.encrypted # establishes a default of encrypting new devices if not self.encrypted: return True dialog = PassphraseDialog(self.data) rc = self.run_lightbox_dialog(dialog) if rc != 1: return False self.passphrase = dialog.passphrase for device in self.storage.devices: if device.format.type == "luks" and not device.format.exists: if not device.format.hasKey: device.format.passphrase = self.passphrase return True def on_back_clicked(self, button): # We can't exit early if it looks like nothing has changed because the # user might want to change settings presented in the dialogs shown from # within this method. # Remove all non-existing devices if autopart was active when we last # refreshed. if self._previous_autopart: self._previous_autopart = False for partition in self.storage.partitions[:]: # check if it's been removed in a previous iteration if not partition.exists and \ partition in self.storage.partitions: self.storage.recursiveRemove(partition) # hide/unhide disks as requested for disk in self.disks: if disk.name not in self.selected_disks and \ disk in self.storage.devices: self.storage.devicetree.hide(disk) elif disk.name in self.selected_disks and \ disk not in self.storage.devices: self.storage.devicetree.unhide(disk) # show the installation options dialog disks = [d for d in self.disks if d.name in self.selected_disks] disks_size = sum((d.size for d in disks), Size(0)) # No disks selected? The user wants to back out of the storage spoke. if not disks: NormalSpoke.on_back_clicked(self, button) return if arch.isS390(): # check for unformatted DASDs and launch dasdfmt if any discovered dasds = make_unformatted_dasd_list(self.selected_disks) if len(dasds) > 0: # We want to apply current selection before running dasdfmt to # prevent this information from being lost afterward applyDiskSelection(self.storage, self.data, self.selected_disks) dialog = DasdFormatDialog(self.data, self.storage, dasds) ignoreEscape(dialog.window) rc = self.run_lightbox_dialog(dialog) if rc == 1: # User hit OK on the dialog self.refresh() elif rc == 2: # User clicked uri to return to hub. NormalSpoke.on_back_clicked(self, button) return elif rc != 2: # User either hit cancel on the dialog or closed it via escape, # there was no formatting done. # NOTE: rc == 2 means the user clicked on the link that takes t # back to the hub. return # Figure out if the existing disk labels will work on this platform # you need to have at least one of the platform's labels in order for # any of the free space to be useful. disk_labels = set(disk.format.labelType for disk in disks if hasattr(disk.format, "labelType")) platform_labels = set(platform.diskLabelTypes) if disk_labels and platform_labels.isdisjoint(disk_labels): disk_free = 0 fs_free = 0 log.debug("Need disklabel: %s have: %s", ", ".join(platform_labels), ", ".join(disk_labels)) else: free_space = self.storage.getFreeSpace(disks=disks, clearPartType=CLEARPART_TYPE_NONE) disk_free = sum(f[0] for f in free_space.values()) fs_free = sum(f[1] for f in free_space.values()) required_space = self.payload.spaceRequired auto_swap = sum((r.size for r in self.storage.autoPartitionRequests if r.fstype == "swap"), Size(0)) if self.autopart and auto_swap == Size(0): # autopartitioning requested, but not applied yet (=> no auto swap # requests), ask user for enough space to fit in the suggested swap auto_swap = autopart.swapSuggestion() log.debug("disk free: %s fs free: %s sw needs: %s auto swap: %s", disk_free, fs_free, required_space, auto_swap) if disk_free >= required_space + auto_swap: dialog = None elif disks_size >= required_space: if self._customPart.get_active() or self._reclaim.get_active(): dialog = None else: dialog = NeedSpaceDialog(self.data, payload=self.payload) dialog.refresh(required_space, auto_swap, disk_free, fs_free) rc = self.run_lightbox_dialog(dialog) else: dialog = NoSpaceDialog(self.data, payload=self.payload) dialog.refresh(required_space, auto_swap, disk_free, fs_free) rc = self.run_lightbox_dialog(dialog) if not dialog: # Plenty of room - there's no need to pop up a dialog, so just send # the user to wherever they asked to go. That's either the custom # spoke or the hub. # - OR - # Not enough room, but the user checked the reclaim button. self.encrypted = self._encrypted.get_active() if self._customPart.get_active(): self.autopart = False self.skipTo = "CustomPartitioningSpoke" else: self.autopart = True # We might first need to ask about an encryption passphrase. if not self._check_encrypted(): return # Oh and then we might also want to go to the reclaim dialog. if self._reclaim.get_active(): self.apply() if not self._show_resize_dialog(disks): # User pressed cancel on the reclaim dialog, so don't leave # the storage spoke. return elif rc == RESPONSE_CANCEL: # A cancel button was clicked on one of the dialogs. Stay on this # spoke. Generally, this is because the user wants to add more disks. return elif rc == RESPONSE_MODIFY_SW: # The "Fedora software selection" link was clicked on one of the # dialogs. Send the user to the software spoke. self.skipTo = "SoftwareSelectionSpoke" elif rc == RESPONSE_RECLAIM: # Not enough space, but the user can make enough if they do some # work and free up space. self.encrypted = self._encrypted.get_active() if not self._check_encrypted(): return self.apply() if not self._show_resize_dialog(disks): # User pressed cancel on the reclaim dialog, so don't leave # the storage spoke. return # And then go to the custom partitioning spoke if they chose to # do so. if self._customPart.get_active(): self.autopart = False self.skipTo = "CustomPartitioningSpoke" else: self.autopart = True elif rc == RESPONSE_QUIT: # Not enough space, and the user can't do anything about it so # they chose to quit. raise SystemExit("user-selected exit") else: # I don't know how we'd get here, but might as well have a # catch-all. Just stay on this spoke. return if self.autopart: refreshAutoSwapSize(self.storage) self.applyOnSkip = True NormalSpoke.on_back_clicked(self, button) def _show_resize_dialog(self, disks): resizeDialog = ResizeDialog(self.data, self.storage, self.payload) resizeDialog.refresh(disks) rc = self.run_lightbox_dialog(resizeDialog) return rc def on_custom_toggled(self, button): # The custom button won't be active until after this handler is run, # so we have to negate everything here. self._reclaim.set_sensitive(not button.get_active()) if self._reclaim.get_sensitive(): self._reclaim.set_has_tooltip(False) else: self._reclaim.set_tooltip_text(_("You'll be able to make space available during custom partitioning.")) def on_specialized_clicked(self, button): # Don't want to run apply or execute in this case, since we have to # collect some more disks first. The user will be back to this spoke. self.applyOnSkip = False # However, we do want to apply current selections so the disk cart off # the filter spoke will display the correct information. applyDiskSelection(self.storage, self.data, self.selected_disks) self.skipTo = "FilterSpoke" NormalSpoke.on_back_clicked(self, button) def on_info_bar_clicked(self, *args): if self.errors: label = _("The following errors were encountered when checking your storage " "configuration. You can modify your storage layout or quit the " "installer.") dialog = DetailedErrorDialog(self.data, buttons=[ C_("GUI|Storage|Error Dialog", "_Quit"), C_("GUI|Storage|Error Dialog", "_Modify Storage Layout")], label=label) with self.main_window.enlightbox(dialog.window): errors = "\n".join(self.errors) dialog.refresh(errors) rc = dialog.run() dialog.window.destroy() if rc == 0: # Quit. sys.exit(0) iutil.ipmi_report(constants.IPMI_ABORTED) elif self.warnings: label = _("The following warnings were encountered when checking your storage " "configuration. These are not fatal, but you may wish to make " "changes to your storage layout.") dialog = DetailedErrorDialog(self.data, buttons=[_("_OK")], label=label) with self.main_window.enlightbox(dialog.window): warnings = "\n".join(self.warnings) dialog.refresh(warnings) rc = dialog.run() dialog.window.destroy() def on_disks_key_released(self, box, event): # we want to react only on Ctrl-A being pressed if not bool(event.state & Gdk.ModifierType.CONTROL_MASK) or \ (event.keyval not in (Gdk.KEY_a, Gdk.KEY_A)): return # select disks in the right box if box is self.local_disks_box: overviews = self.localOverviews elif box is self.specialized_disks_box: overviews = self.advancedOverviews else: # no other box contains disk overviews return for overview in overviews: overview.set_chosen(True) self._update_disk_list()
gpl-2.0
4,752,424,531,771,857,000
41.913402
150
0.607265
false
pbrunet/pythran
pythran/analyses/range_values.py
3
10642
""" Module Analysing code to extract positive subscripts from code. """ # TODO check bound of while and if for more occurate values. import ast import copy from pythran.analyses import Globals, Aliases from pythran.intrinsic import Intrinsic from pythran.passmanager import FunctionAnalysis from pythran.range import Range, UNKNOWN_RANGE, combine class RangeValues(FunctionAnalysis): """ This analyse extract positive subscripts from code. It is flow insensitif and aliasing is not taken into account as integer doesn't create aliasing in Python. """ def __init__(self): """Initialize instance variable and gather globals name information.""" self.result = dict() super(RangeValues, self).__init__(Globals, Aliases) def add(self, variable, range_): """ Add a new low and high bound for a variable. As it is flow insensitif, it compares it with olds values and update it if needed. """ if variable not in self.result: self.result[variable] = range_ else: self.result[variable].update(range_) def visit_FunctionDef(self, node): """ Set default range value for globals and attributs. >>> import ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(a, b): pass") >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['a'] Range(low=-inf, high=inf) """ for global_name in self.globals: self.result[global_name] = UNKNOWN_RANGE for attr in node.args.args: self.result[attr.id] = UNKNOWN_RANGE map(self.visit, node.body) def visit_Assign(self, node): """ Set range value for assigned variable. We do not handle container values. >>> import ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(): a = b = 2") >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['a'] Range(low=2, high=2) >>> res['b'] Range(low=2, high=2) """ assigned_range = self.visit(node.value) for target in node.targets: if isinstance(target, ast.Name): # Make sure all Range doesn't alias for multiple variables. self.add(target.id, copy.deepcopy(assigned_range)) def visit_AugAssign(self, node): """ Update range value for augassigned variables. >>> import ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(): a = 2; a -= 1") >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['a'] Range(low=1, high=2) """ if isinstance(node.target, ast.Name): self.result[node.target.id].update( combine(self.result[node.target.id], self.visit(node.value), node.op)) def visit_For(self, node): """ Handle iterate variable in for loops. >>> import ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = b = c = 2 ... for i in __builtin__.range(1): ... a -= 1 ... b += 1''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['a'] Range(low=-inf, high=2) >>> res['b'] Range(low=2, high=inf) >>> res['c'] Range(low=2, high=2) """ assert isinstance(node.target, ast.Name), "For apply on variables." if isinstance(node.iter, ast.Call): for alias in self.aliases[node.iter.func].aliases: if isinstance(alias, Intrinsic): self.add(node.target.id, alias.return_range_content(map(self.visit, node.iter.args))) else: self.add(node.target.id, UNKNOWN_RANGE) else: self.add(node.target.id, UNKNOWN_RANGE) self.visit_loop(node) def visit_loop(self, node): """ Handle incremented variables in loop body. >>> import ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = b = c = 2 ... while a > 0: ... a -= 1 ... b += 1''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['a'] Range(low=-inf, high=2) >>> res['b'] Range(low=2, high=inf) >>> res['c'] Range(low=2, high=2) """ old_range = copy.deepcopy(self.result) map(self.visit, node.body) for name, range_ in old_range.iteritems(): self.result[name].widen(range_) map(self.visit, node.orelse) visit_While = visit_loop def visit_BoolOp(self, node): """ Merge right and left operands ranges. TODO : We could exclude some operand with this range information... >>> import ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = 2 ... c = 3 ... d = a or c''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['d'] Range(low=2, high=3) """ res = zip(*map(self.visit, node.values)) return Range(min(res[0]), max(res[1])) def visit_BinOp(self, node): """ Combine operands ranges for given operator. >>> import ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = 2 ... c = 3 ... d = a - c''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['d'] Range(low=-1, high=-1) """ return combine(self.visit(node.left), self.visit(node.right), node.op) def visit_UnaryOp(self, node): """ Update range with given unary operation. >>> import ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = 2 ... c = -a ... d = ~a ... f = +a ... e = not a''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['f'] Range(low=2, high=2) >>> res['c'] Range(low=-2, high=-2) >>> res['d'] Range(low=-3, high=-3) >>> res['e'] Range(low=0, high=1) """ res = self.visit(node.operand) if isinstance(node.op, ast.Not): return Range(0, 1) elif(isinstance(node.op, ast.Invert) and isinstance(res.high, int) and isinstance(res.low, int)): return Range(~res.high, ~res.low) elif isinstance(node.op, ast.UAdd): return res elif isinstance(node.op, ast.USub): return Range(-res.high, -res.low) else: return UNKNOWN_RANGE def visit_IfExp(self, node): """ Use worst case for both possible values. >>> import ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = 2 or 3 ... b = 4 or 5 ... c = a if a else b''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['c'] Range(low=2, high=5) """ self.visit(node.test) body_res = self.visit(node.body) orelse_res = self.visit(node.orelse) return Range(min(orelse_res.low, body_res.low), max(orelse_res.high, body_res.high)) @staticmethod def visit_Compare(_): """ Boolean are possible index. >>> import ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = 2 or 3 ... b = 4 or 5 ... c = a < b''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['c'] Range(low=0, high=1) """ return Range(0, 1) def visit_Call(self, node): """ Function calls are not handled for now. >>> import ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = __builtin__.range(10)''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['a'] Range(low=-inf, high=inf) """ result = None for alias in self.aliases[node.func].aliases: if isinstance(alias, Intrinsic): alias_range = alias.return_range(map(self.visit, node.args)) result = result.update(alias_range) if result else alias_range else: return UNKNOWN_RANGE return result or UNKNOWN_RANGE @staticmethod def visit_Num(node): """ Handle literals integers values. """ if isinstance(node.n, int): return Range(node.n, node.n) else: return UNKNOWN_RANGE def visit_Name(self, node): """ Get range for parameters for examples or false branching. """ return self.result[node.id] def visit_ExceptHandler(self, node): """ Add a range value for exception variable. >>> import ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... try: ... pass ... except __builtin__.RuntimeError as e: ... pass''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['e'] Range(low=-inf, high=inf) """ if node.name: self.result[node.name.id] = UNKNOWN_RANGE map(self.visit, node.body) def generic_visit(self, node): """ Other nodes are not known and range value neither. """ super(RangeValues, self).generic_visit(node) return UNKNOWN_RANGE
bsd-3-clause
3,094,377,842,183,470,600
31.445122
79
0.511464
false
supertom/ansible
lib/ansible/plugins/callback/context_demo.py
29
2079
# (C) 2012, Michael DeHaan, <[email protected]> # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.plugins.callback import CallbackBase class CallbackModule(CallbackBase): """ This is a very trivial example of how any callback function can get at play and task objects. play will be 'None' for runner invocations, and task will be None for 'setup' invocations. """ CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'aggregate' CALLBACK_NAME = 'context_demo' CALLBACK_NEEDS_WHITELIST = True def __init__(self, *args, **kwargs): self.task = None self.play = None def v2_on_any(self, *args, **kwargs): i = 0 if self.play: play_str = 'play: %s' % self.play.name if self.task: task_str = 'task: %s' % self.task self._display.display("--- %s %s ---" % (self.play_str, self.task_str)) self._display.display(" --- ARGS ") for a in args: self._display.display(' %s: %s' % (i, a)) i += 1 self._display.display(" --- KWARGS ") for k in kwargs: self._display.display(' %s: %s' % (k, kwargs[k])) def v2_playbook_on_play_start(self, play): self.play = play def v2_playbook_on_task_start(self, task, is_conditional): self.task = task
gpl-3.0
1,484,611,275,987,105,000
34.237288
97
0.63925
false
pigeonflight/strider-plone
docker/appengine/lib/django-1.5/django/contrib/staticfiles/management/commands/runserver.py
243
1343
from optparse import make_option from django.conf import settings from django.core.management.commands.runserver import Command as RunserverCommand from django.contrib.staticfiles.handlers import StaticFilesHandler class Command(RunserverCommand): option_list = RunserverCommand.option_list + ( make_option('--nostatic', action="store_false", dest='use_static_handler', default=True, help='Tells Django to NOT automatically serve static files at STATIC_URL.'), make_option('--insecure', action="store_true", dest='insecure_serving', default=False, help='Allows serving static files even if DEBUG is False.'), ) help = "Starts a lightweight Web server for development and also serves static files." def get_handler(self, *args, **options): """ Returns the static files serving handler wrapping the default handler, if static files should be served. Otherwise just returns the default handler. """ handler = super(Command, self).get_handler(*args, **options) use_static_handler = options.get('use_static_handler', True) insecure_serving = options.get('insecure_serving', False) if use_static_handler and (settings.DEBUG or insecure_serving): return StaticFilesHandler(handler) return handler
mit
5,166,146,093,612,658,000
45.310345
96
0.698436
false
phoebusliang/parallel-lettuce
tests/integration/lib/Django-1.3/tests/regressiontests/urlpatterns_reverse/urls.py
105
3133
from django.conf.urls.defaults import * from views import empty_view, absolute_kwargs_view other_patterns = patterns('', url(r'non_path_include/$', empty_view, name='non_path_include'), ) urlpatterns = patterns('', url(r'^places/(\d+)/$', empty_view, name='places'), url(r'^places?/$', empty_view, name="places?"), url(r'^places+/$', empty_view, name="places+"), url(r'^places*/$', empty_view, name="places*"), url(r'^(?:places/)?$', empty_view, name="places2?"), url(r'^(?:places/)+$', empty_view, name="places2+"), url(r'^(?:places/)*$', empty_view, name="places2*"), url(r'^places/(\d+|[a-z_]+)/', empty_view, name="places3"), url(r'^places/(?P<id>\d+)/$', empty_view, name="places4"), url(r'^people/(?P<name>\w+)/$', empty_view, name="people"), url(r'^people/(?:name/)', empty_view, name="people2"), url(r'^people/(?:name/(\w+)/)?', empty_view, name="people2a"), url(r'^optional/(?P<name>.*)/(?:.+/)?', empty_view, name="optional"), url(r'^hardcoded/$', 'hardcoded/', empty_view, name="hardcoded"), url(r'^hardcoded/doc\.pdf$', empty_view, name="hardcoded2"), url(r'^people/(?P<state>\w\w)/(?P<name>\w+)/$', empty_view, name="people3"), url(r'^people/(?P<state>\w\w)/(?P<name>\d)/$', empty_view, name="people4"), url(r'^people/((?P<state>\w\w)/test)?/(\w+)/$', empty_view, name="people6"), url(r'^character_set/[abcdef0-9]/$', empty_view, name="range"), url(r'^character_set/[\w]/$', empty_view, name="range2"), url(r'^price/\$(\d+)/$', empty_view, name="price"), url(r'^price/[$](\d+)/$', empty_view, name="price2"), url(r'^price/[\$](\d+)/$', empty_view, name="price3"), url(r'^product/(?P<product>\w+)\+\(\$(?P<price>\d+(\.\d+)?)\)/$', empty_view, name="product"), url(r'^headlines/(?P<year>\d+)\.(?P<month>\d+)\.(?P<day>\d+)/$', empty_view, name="headlines"), url(r'^windows_path/(?P<drive_name>[A-Z]):\\(?P<path>.+)/$', empty_view, name="windows"), url(r'^special_chars/(.+)/$', empty_view, name="special"), url(r'^(?P<name>.+)/\d+/$', empty_view, name="mixed"), url(r'^repeats/a{1,2}/$', empty_view, name="repeats"), url(r'^repeats/a{2,4}/$', empty_view, name="repeats2"), url(r'^repeats/a{2}/$', empty_view, name="repeats3"), url(r'^(?i)CaseInsensitive/(\w+)', empty_view, name="insensitive"), url(r'^test/1/?', empty_view, name="test"), url(r'^(?i)test/2/?$', empty_view, name="test2"), url(r'^outer/(?P<outer>\d+)/', include('regressiontests.urlpatterns_reverse.included_urls')), url('', include('regressiontests.urlpatterns_reverse.extra_urls')), # This is non-reversible, but we shouldn't blow up when parsing it. url(r'^(?:foo|bar)(\w+)/$', empty_view, name="disjunction"), # Regression views for #9038. See tests for more details url(r'arg_view/$', 'kwargs_view'), url(r'arg_view/(?P<arg1>\d+)/$', 'kwargs_view'), url(r'absolute_arg_view/(?P<arg1>\d+)/$', absolute_kwargs_view), url(r'absolute_arg_view/$', absolute_kwargs_view), url('^includes/', include(other_patterns)), )
gpl-3.0
4,116,499,614,388,982,000
48.730159
80
0.569741
false
sniperyen/MyDjango
xadmin/plugins/editable.py
9
6739
from django import template from django.core.exceptions import PermissionDenied, ObjectDoesNotExist from django.db import models, transaction from django.forms.models import modelform_factory from django.http import Http404, HttpResponse from django.utils.encoding import force_unicode, smart_unicode from django.utils.html import escape, conditional_escape from django.utils.safestring import mark_safe from django.utils.translation import ugettext as _ from xadmin.plugins.ajax import JsonErrorDict from xadmin.sites import site from xadmin.util import lookup_field, display_for_field, label_for_field, unquote, boolean_icon from xadmin.views import BaseAdminPlugin, ModelFormAdminView, ListAdminView from xadmin.views.base import csrf_protect_m, filter_hook from xadmin.views.edit import ModelFormAdminUtil from xadmin.views.list import EMPTY_CHANGELIST_VALUE from xadmin.layout import FormHelper class EditablePlugin(BaseAdminPlugin): list_editable = [] def __init__(self, admin_view): super(EditablePlugin, self).__init__(admin_view) self.editable_need_fields = {} def init_request(self, *args, **kwargs): active = bool(self.request.method == 'GET' and self.admin_view.has_change_permission() and self.list_editable) if active: self.model_form = self.get_model_view(ModelFormAdminUtil, self.model).form_obj return active def result_item(self, item, obj, field_name, row): if self.list_editable and item.field and item.field.editable and (field_name in self.list_editable): pk = getattr(obj, obj._meta.pk.attname) field_label = label_for_field(field_name, obj, model_admin=self.admin_view, return_attr=False ) item.wraps.insert(0, '<span class="editable-field">%s</span>') item.btns.append(( '<a class="editable-handler" title="%s" data-editable-field="%s" data-editable-loadurl="%s">' + '<i class="fa fa-edit"></i></a>') % (_(u"Enter %s") % field_label, field_name, self.admin_view.model_admin_url('patch', pk) + '?fields=' + field_name)) if field_name not in self.editable_need_fields: self.editable_need_fields[field_name] = item.field return item # Media def get_media(self, media): if self.editable_need_fields: media = media + self.model_form.media + \ self.vendor( 'xadmin.plugin.editable.js', 'xadmin.widget.editable.css') return media class EditPatchView(ModelFormAdminView, ListAdminView): def init_request(self, object_id, *args, **kwargs): self.org_obj = self.get_object(unquote(object_id)) # For list view get new field display html self.pk_attname = self.opts.pk.attname if not self.has_change_permission(self.org_obj): raise PermissionDenied if self.org_obj is None: raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(self.opts.verbose_name), 'key': escape(object_id)}) def get_new_field_html(self, f): result = self.result_item(self.org_obj, f, {'is_display_first': False, 'object': self.org_obj}) return mark_safe(result.text) if result.allow_tags else conditional_escape(result.text) def _get_new_field_html(self, field_name): try: f, attr, value = lookup_field(field_name, self.org_obj, self) except (AttributeError, ObjectDoesNotExist): return EMPTY_CHANGELIST_VALUE else: allow_tags = False if f is None: allow_tags = getattr(attr, 'allow_tags', False) boolean = getattr(attr, 'boolean', False) if boolean: allow_tags = True text = boolean_icon(value) else: text = smart_unicode(value) else: if isinstance(f.rel, models.ManyToOneRel): field_val = getattr(self.org_obj, f.name) if field_val is None: text = EMPTY_CHANGELIST_VALUE else: text = field_val else: text = display_for_field(value, f) return mark_safe(text) if allow_tags else conditional_escape(text) @filter_hook def get(self, request, object_id): model_fields = [f.name for f in self.opts.fields] fields = [f for f in request.GET['fields'].split(',') if f in model_fields] defaults = { "form": self.form, "fields": fields, "formfield_callback": self.formfield_for_dbfield, } form_class = modelform_factory(self.model, **defaults) form = form_class(instance=self.org_obj) helper = FormHelper() helper.form_tag = False helper.include_media = False form.helper = helper s = '{% load i18n crispy_forms_tags %}<form method="post" action="{{action_url}}">{% crispy form %}' + \ '<button type="submit" class="btn btn-success btn-block btn-sm">{% trans "Apply" %}</button></form>' t = template.Template(s) c = template.Context({'form': form, 'action_url': self.model_admin_url('patch', self.org_obj.pk)}) return HttpResponse(t.render(c)) @filter_hook @csrf_protect_m @transaction.atomic def post(self, request, object_id): model_fields = [f.name for f in self.opts.fields] fields = [f for f in request.POST.keys() if f in model_fields] defaults = { "form": self.form, "fields": fields, "formfield_callback": self.formfield_for_dbfield, } form_class = modelform_factory(self.model, **defaults) form = form_class( instance=self.org_obj, data=request.POST, files=request.FILES) result = {} if form.is_valid(): form.save(commit=True) result['result'] = 'success' result['new_data'] = form.cleaned_data result['new_html'] = dict( [(f, self.get_new_field_html(f)) for f in fields]) else: result['result'] = 'error' result['errors'] = JsonErrorDict(form.errors, form).as_json() return self.render_response(result) site.register_plugin(EditablePlugin, ListAdminView) site.register_modelview(r'^(.+)/patch/$', EditPatchView, name='%s_%s_patch')
apache-2.0
6,013,316,570,899,070,000
41.11875
131
0.59445
false
elysium001/zamboni
mkt/feed/tests/test_models.py
9
10673
# -*- coding: utf-8 -*- import random import string from itertools import cycle from django.core.exceptions import ValidationError import mock from nose.tools import eq_, ok_ import mkt.site.tests import mkt.feed.constants as feed from mkt.feed.models import (FeedApp, FeedBrand, FeedCollection, FeedItem, FeedShelf) from mkt.operators.models import OperatorPermission from mkt.site.fixtures import fixture from mkt.webapps.models import Webapp class FeedTestMixin(object): fixtures = fixture('webapp_337141') def feed_app_factory(self, app_id=None, app_type=feed.FEEDAPP_ICON, **kwargs): count = FeedApp.objects.count() return FeedApp.objects.create( app_id=app_id or Webapp.objects.get(id=337141).id, slug='feed-app-%s' % count, type=app_type, **kwargs) def feed_brand_factory(self, app_ids=None, layout=feed.BRAND_GRID, brand_type='mystery-app', **kwargs): count = FeedBrand.objects.count() brand = FeedBrand.objects.create(slug='feed-brand-%s' % count, type=brand_type, **kwargs) brand.set_apps(app_ids or [337141]) return brand def feed_collection_factory(self, app_ids=None, name='test-coll', coll_type=feed.COLLECTION_LISTING, grouped=False, **kwargs): count = FeedCollection.objects.count() coll = FeedCollection.objects.create( name=name, slug='feed-coll-%s' % count, type=coll_type, **kwargs) app_ids = app_ids or [337141] coll.set_apps(app_ids) if grouped: for i, mem in enumerate(coll.feedcollectionmembership_set.all()): if i == len(app_ids) - 1 and len(app_ids) > 1: mem.group = 'second-group' else: mem.group = 'first-group' mem.save() return coll def feed_shelf_factory(self, app_ids=None, name='test-shelf', carrier=1, region=1, grouped=False, **kwargs): count = FeedShelf.objects.count() shelf = FeedShelf.objects.create( name=name, slug='feed-shelf-%s' % count, carrier=carrier, region=region, **kwargs) app_ids = app_ids or [337141] shelf.set_apps(app_ids) if grouped: for i, mem in enumerate(shelf.feedshelfmembership_set.all()): if i == len(app_ids) - 1 and len(app_ids) > 1: mem.group = 'second-group' else: mem.group = 'first-group' mem.save() return shelf def feed_shelf_permission_factory(self, user, carrier=1, region=1): return OperatorPermission.objects.create(user=user, carrier=carrier, region=region) def feed_item_factory(self, carrier=1, region=1, item_type=feed.FEED_TYPE_APP, **kw): """Creates a single FeedItem of any feed element type specified.""" feed_item = FeedItem(carrier=carrier, region=region, item_type=item_type, **kw) if item_type == feed.FEED_TYPE_APP: feed_item.app = self.feed_app_factory() elif item_type == feed.FEED_TYPE_BRAND: feed_item.brand = self.feed_brand_factory() elif item_type == feed.FEED_TYPE_COLL: feed_item.collection = self.feed_collection_factory() elif item_type == feed.FEED_TYPE_SHELF: feed_item.shelf = self.feed_shelf_factory(carrier=carrier, region=region) feed_item.save() return feed_item def feed_factory(self, carrier=1, region=1, item_types=None, num_items=None): """ Iterates over a list of feed element types and creates `num_items` FeedItems, cycling over those types. By default, creates one of each type. Returns a list of FeedItems. """ item_types = item_types or [feed.FEED_TYPE_APP, feed.FEED_TYPE_BRAND, feed.FEED_TYPE_COLL, feed.FEED_TYPE_SHELF] if not num_items: num_items = len(item_types) item_types = cycle(item_types) feed_items = [] for i in xrange(num_items): feed_items.append( self.feed_item_factory(carrier=carrier, region=region, item_type=item_types.next())) return feed_items class FeedAppMixin(object): fixtures = fixture('webapp_337141') def setUp(self): self.feedapp_data = { 'app': 337141, 'color': 'emerald', 'type': 'icon', 'description': { 'en-US': u'pan-fried potatoes' }, 'slug': self.random_slug() } self.pullquote_data = { 'pullquote_text': {'en-US': u'The bést!'}, 'pullquote_rating': 4, 'pullquote_attribution': u'Jamés Bod' } self.feedapps = [] super(FeedAppMixin, self).setUp() def random_slug(self): return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10)).lower() def create_feedapps(self, n=2, **kwargs): data = dict(self.feedapp_data) data.update(kwargs) if not isinstance(data['app'], Webapp): data['app'] = Webapp.objects.get(pk=data['app']) feedapps = [] for idx in xrange(n): data['slug'] = self.random_slug() feedapps.append(FeedApp.objects.create(**data)) self.feedapps.extend(feedapps) return feedapps class TestFeedApp(FeedAppMixin, mkt.site.tests.TestCase): def setUp(self): super(TestFeedApp, self).setUp() self.feedapp_data.update(**self.pullquote_data) self.feedapp_data['app'] = ( Webapp.objects.get(pk=self.feedapp_data['app'])) def test_create(self): feedapp = FeedApp(**self.feedapp_data) ok_(isinstance(feedapp, FeedApp)) feedapp.clean_fields() # Tests validators on fields. feedapp.clean() # Test model validation. feedapp.save() # Tests required fields. def test_missing_pullquote_rating(self): del self.feedapp_data['pullquote_rating'] self.test_create() def test_missing_pullquote_text(self): del self.feedapp_data['pullquote_text'] with self.assertRaises(ValidationError): self.test_create() def test_pullquote_rating_fractional(self): """ This passes because PositiveSmallIntegerField will coerce the float into an int, which effectively returns math.floor(value). """ self.feedapp_data['pullquote_rating'] = 4.5 self.test_create() def test_bad_pullquote_rating_low(self): self.feedapp_data['pullquote_rating'] = -1 with self.assertRaises(ValidationError): self.test_create() def test_bad_pullquote_rating_high(self): self.feedapp_data['pullquote_rating'] = 6 with self.assertRaises(ValidationError): self.test_create() class TestFeedBrand(mkt.site.tests.TestCase): def setUp(self): super(TestFeedBrand, self).setUp() self.apps = [mkt.site.tests.app_factory() for i in xrange(3)] self.brand = None self.brand_data = { 'slug': 'potato', 'type': 1, 'layout': 1 } def test_create(self): self.brand = FeedBrand.objects.create(**self.brand_data) ok_(isinstance(self.brand, FeedBrand)) for name, value in self.brand_data.iteritems(): eq_(getattr(self.brand, name), value, name) def test_add_app(self): self.test_create() m = self.brand.add_app(self.apps[0], order=3) ok_(self.brand.apps(), [self.apps[0]]) eq_(m.order, 3) eq_(m.app, self.apps[0]) eq_(m.obj, self.brand) def test_add_app_sort_order_respected(self): self.test_add_app() self.brand.add_app(self.apps[1], order=1) ok_(self.brand.apps(), [self.apps[1], self.apps[0]]) def test_add_app_no_order_passed(self): self.test_add_app() m = self.brand.add_app(self.apps[1]) ok_(m.order, 4) def test_remove_app(self): self.test_add_app() ok_(self.apps[0] in self.brand.apps()) removed = self.brand.remove_app(self.apps[0]) ok_(removed) ok_(self.apps[0] not in self.brand.apps()) def test_remove_app_not_in_brand(self): self.test_remove_app() removed = self.brand.remove_app(self.apps[1]) ok_(not removed) def test_set_apps(self): self.test_add_app_sort_order_respected() new_apps = [app.pk for app in self.apps][::-1] self.brand.set_apps(new_apps) eq_(new_apps, [app.pk for app in self.brand.apps()]) def test_set_apps_nonexistant(self): self.test_add_app_sort_order_respected() with self.assertRaises(Webapp.DoesNotExist): self.brand.set_apps([99999]) class TestESReceivers(FeedTestMixin, mkt.site.tests.TestCase): @mock.patch('mkt.search.indexers.BaseIndexer.index_ids') def test_update_search_index(self, update_mock): feed_items = self.feed_factory() calls = [update_call[0][0][0] for update_call in update_mock.call_args_list] for feed_item in feed_items: assert feed_item.id in calls assert getattr(feed_item, feed_item.item_type).id in calls @mock.patch('mkt.search.indexers.BaseIndexer.unindex') def test_delete_search_index(self, delete_mock): for x in xrange(4): self.feed_item_factory() count = FeedItem.objects.count() FeedItem.objects.all().delete() eq_(delete_mock.call_count, count) class TestFeedShelf(FeedTestMixin, mkt.site.tests.TestCase): def test_is_published(self): shelf = self.feed_shelf_factory() assert not shelf.is_published shelf.feeditem_set.create() assert shelf.is_published class TestFeedCollection(FeedTestMixin, mkt.site.tests.TestCase): def test_update_apps(self): coll = self.feed_collection_factory() eq_(coll.apps().count(), 1) coll.set_apps([337141, mkt.site.tests.app_factory().id, mkt.site.tests.app_factory().id]) eq_(coll.apps().count(), 3)
bsd-3-clause
-7,016,449,288,444,239,000
34.57
78
0.578484
false
leiferikb/bitpop
src/third_party/WebKit/Source/bindings/scripts/idl_validator.py
1
5029
# Copyright (C) 2013 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Validate extended attributes. Design doc: http://www.chromium.org/developers/design-documents/idl-compiler#TOC-Extended-attribute-validation """ import os.path import re module_path = os.path.dirname(__file__) source_path = os.path.join(module_path, os.pardir, os.pardir) EXTENDED_ATTRIBUTES_RELATIVE_PATH = os.path.join('bindings', 'IDLExtendedAttributes.txt') EXTENDED_ATTRIBUTES_FILENAME = os.path.join(source_path, EXTENDED_ATTRIBUTES_RELATIVE_PATH) class IDLInvalidExtendedAttributeError(Exception): pass class IDLExtendedAttributeValidator(object): def __init__(self): self.valid_extended_attributes = read_extended_attributes_file() def validate_extended_attributes(self, definitions): # FIXME: this should be done when parsing the file, rather than after. for interface in definitions.interfaces.itervalues(): self.validate_extended_attributes_node(interface) for attribute in interface.attributes: self.validate_extended_attributes_node(attribute) for operation in interface.operations: self.validate_extended_attributes_node(operation) for argument in operation.arguments: self.validate_extended_attributes_node(argument) def validate_extended_attributes_node(self, node): for name, values_string in node.extended_attributes.iteritems(): self.validate_name_values_string(name, values_string) def validate_name_values_string(self, name, values_string): if name not in self.valid_extended_attributes: raise IDLInvalidExtendedAttributeError( 'Unknown extended attribute [%s]' % name) valid_values = self.valid_extended_attributes[name] if values_string is None and None not in valid_values: raise IDLInvalidExtendedAttributeError( 'Missing required argument for extended attribute [%s]' % name) if '*' in valid_values: # wildcard, any (non-empty) value ok return if values_string is None: values = set([None]) else: values = set(re.split('[|&]', values_string)) invalid_values = values - valid_values if invalid_values: invalid_value = invalid_values.pop() raise IDLInvalidExtendedAttributeError( 'Invalid value "%s" found in extended attribute [%s=%s]' % (invalid_value, name, values_string)) def read_extended_attributes_file(): def extended_attribute_name_values(): with open(EXTENDED_ATTRIBUTES_FILENAME) as extended_attributes_file: for line in extended_attributes_file: line = line.strip() if not line or line.startswith('#'): continue name, _, values_string = map(str.strip, line.partition('=')) value_list = [value.strip() for value in values_string.split('|')] yield name, value_list valid_extended_attributes = {} for name, value_list in extended_attribute_name_values(): if not value_list: valid_extended_attributes[name] = set([None]) continue valid_extended_attributes[name] = set([value if value else None for value in value_list]) return valid_extended_attributes
gpl-3.0
-6,825,456,615,714,710,000
45.564815
110
0.673891
false
Livit/Livit.Learn.EdX
common/test/acceptance/tests/studio/test_import_export.py
13
16914
""" Acceptance tests for the Import and Export pages """ from nose.plugins.attrib import attr from datetime import datetime from flaky import flaky from abc import abstractmethod from bok_choy.promise import EmptyPromise from .base_studio_test import StudioLibraryTest, StudioCourseTest from ...fixtures.course import XBlockFixtureDesc from ...pages.studio.import_export import ExportLibraryPage, ExportCoursePage, ImportLibraryPage, ImportCoursePage from ...pages.studio.library import LibraryEditPage from ...pages.studio.container import ContainerPage from ...pages.studio.overview import CourseOutlinePage from ...pages.lms.courseware import CoursewarePage from ...pages.lms.staff_view import StaffPage class ExportTestMixin(object): """ Tests to run both for course and library export pages. """ def test_export(self): """ Scenario: I am able to export a course or library Given that I have a course or library And I click the download button The download will succeed And the file will be of the right MIME type. """ good_status, is_tarball_mimetype = self.export_page.download_tarball() self.assertTrue(good_status) self.assertTrue(is_tarball_mimetype) @attr('shard_7') class TestCourseExport(ExportTestMixin, StudioCourseTest): """ Export tests for courses. """ def setUp(self): # pylint: disable=arguments-differ super(TestCourseExport, self).setUp() self.export_page = ExportCoursePage( self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run'], ) self.export_page.visit() def test_header(self): """ Scenario: I should see the correct text when exporting a course. Given that I have a course to export from When I visit the export page The correct header should be shown """ self.assertEqual(self.export_page.header_text, 'Course Export') @attr('shard_7') class TestLibraryExport(ExportTestMixin, StudioLibraryTest): """ Export tests for libraries. """ def setUp(self): """ Ensure a library exists and navigate to the library edit page. """ super(TestLibraryExport, self).setUp() self.export_page = ExportLibraryPage(self.browser, self.library_key) self.export_page.visit() def test_header(self): """ Scenario: I should see the correct text when exporting a library. Given that I have a library to export from When I visit the export page The correct header should be shown """ self.assertEqual(self.export_page.header_text, 'Library Export') class BadExportMixin(object): """ Test mixin for bad exports. """ def test_bad_export(self): """ Scenario: I should receive an error when attempting to export a broken course or library. Given that I have a course or library No error modal should be showing When I click the export button An error modal should be shown When I click the modal's action button I should arrive at the edit page for the broken component """ # No error should be there to start. self.assertFalse(self.export_page.is_error_modal_showing()) self.export_page.click_export() self.export_page.wait_for_error_modal() self.export_page.click_modal_button() EmptyPromise( lambda: self.edit_page.is_browser_on_page, 'Arrived at component edit page', timeout=30 ) @attr('shard_7') class TestLibraryBadExport(BadExportMixin, StudioLibraryTest): """ Verify exporting a bad library causes an error. """ def setUp(self): """ Set up the pages and start the tests. """ super(TestLibraryBadExport, self).setUp() self.export_page = ExportLibraryPage(self.browser, self.library_key) self.edit_page = LibraryEditPage(self.browser, self.library_key) self.export_page.visit() def populate_library_fixture(self, library_fixture): """ Create a library with a bad component. """ library_fixture.add_children( XBlockFixtureDesc("problem", "Bad Problem", data='<'), ) @attr('shard_7') class TestCourseBadExport(BadExportMixin, StudioCourseTest): """ Verify exporting a bad course causes an error. """ ready_method = 'wait_for_component_menu' def setUp(self): # pylint: disable=arguments-differ super(TestCourseBadExport, self).setUp() self.export_page = ExportCoursePage( self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run'], ) self.edit_page = ContainerPage(self.browser, self.unit.locator) self.export_page.visit() def populate_course_fixture(self, course_fixture): """ Populate the course with a unit that has a bad problem. """ self.unit = XBlockFixtureDesc('vertical', 'Unit') course_fixture.add_children( XBlockFixtureDesc('chapter', 'Main Section').add_children( XBlockFixtureDesc('sequential', 'Subsection').add_children( self.unit.add_children( XBlockFixtureDesc("problem", "Bad Problem", data='<') ) ) ) ) @attr('shard_7') class ImportTestMixin(object): """ Tests to run for both course and library import pages. """ def setUp(self): super(ImportTestMixin, self).setUp() self.import_page = self.import_page_class(*self.page_args()) self.landing_page = self.landing_page_class(*self.page_args()) self.import_page.visit() @abstractmethod def page_args(self): """ Generates the args for initializing a page object. """ return [] def test_upload(self): """ Scenario: I want to upload a course or library for import. Given that I have a library or course to import into And I have a valid .tar.gz file containing data to replace it with I can select the file and upload it And the page will give me confirmation that it uploaded successfully """ self.import_page.upload_tarball(self.tarball_name) self.import_page.wait_for_upload() def test_import_timestamp(self): """ Scenario: I perform a course / library import On import success, the page displays a UTC timestamp previously not visible And if I refresh the page, the timestamp is still displayed """ self.assertFalse(self.import_page.is_timestamp_visible()) # Get the time when the import has started. # import_page timestamp is in (MM/DD/YYYY at HH:mm) so replacing (second, microsecond) to # keep the comparison consistent upload_start_time = datetime.utcnow().replace(microsecond=0, second=0) self.import_page.upload_tarball(self.tarball_name) self.import_page.wait_for_upload() # Get the time when the import has finished. # import_page timestamp is in (MM/DD/YYYY at HH:mm) so replacing (second, microsecond) to # keep the comparison consistent upload_finish_time = datetime.utcnow().replace(microsecond=0, second=0) import_timestamp = self.import_page.parsed_timestamp self.import_page.wait_for_timestamp_visible() # Verify that 'import_timestamp' is between start and finish upload time self.assertLessEqual( upload_start_time, import_timestamp, "Course import timestamp should be upload_start_time <= import_timestamp <= upload_end_time" ) self.assertGreaterEqual( upload_finish_time, import_timestamp, "Course import timestamp should be upload_start_time <= import_timestamp <= upload_end_time" ) self.import_page.visit() self.import_page.wait_for_tasks(completed=True) self.import_page.wait_for_timestamp_visible() def test_landing_url(self): """ Scenario: When uploading a library or course, a link appears for me to view the changes. Given that I upload a library or course A button will appear that contains the URL to the library or course's main page """ self.import_page.upload_tarball(self.tarball_name) self.assertEqual(self.import_page.finished_target_url(), self.landing_page.url) def test_bad_filename_error(self): """ Scenario: I should be reprimanded for trying to upload something that isn't a .tar.gz file. Given that I select a file that is an .mp4 for upload An error message will appear """ self.import_page.upload_tarball('funny_cat_video.mp4') self.import_page.wait_for_filename_error() def test_task_list(self): """ Scenario: I should see feedback checkpoints when uploading a course or library Given that I am on an import page No task checkpoint list should be showing When I upload a valid tarball Each task in the checklist should be marked confirmed And the task list should be visible """ # The task list shouldn't be visible to start. self.assertFalse(self.import_page.is_task_list_showing(), "Task list shown too early.") self.import_page.wait_for_tasks() self.import_page.upload_tarball(self.tarball_name) self.import_page.wait_for_tasks(completed=True) self.assertTrue(self.import_page.is_task_list_showing(), "Task list did not display.") def test_bad_import(self): """ Scenario: I should see a failed checklist when uploading an invalid course or library Given that I am on an import page And I upload a tarball with a broken XML file The tasks should be confirmed up until the 'Updating' task And the 'Updating' task should be marked failed And the remaining tasks should not be marked as started """ self.import_page.upload_tarball(self.bad_tarball_name) self.import_page.wait_for_tasks(fail_on='Updating') @attr('shard_7') class TestEntranceExamCourseImport(ImportTestMixin, StudioCourseTest): """ Tests the Course import page """ tarball_name = 'entrance_exam_course.2015.tar.gz' bad_tarball_name = 'bad_course.tar.gz' import_page_class = ImportCoursePage landing_page_class = CourseOutlinePage def page_args(self): return [self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']] def test_course_updated_with_entrance_exam(self): """ Given that I visit an empty course before import I should not see a section named 'Section' or 'Entrance Exam' When I visit the import page And I upload a course that has an entrance exam section named 'Entrance Exam' And I visit the course outline page again The section named 'Entrance Exam' should now be available. And when I switch the view mode to student view and Visit CourseWare Then I see one section in the sidebar that is 'Entrance Exam' """ self.landing_page.visit() # Should not exist yet. self.assertRaises(IndexError, self.landing_page.section, "Section") self.assertRaises(IndexError, self.landing_page.section, "Entrance Exam") self.import_page.visit() self.import_page.upload_tarball(self.tarball_name) self.import_page.wait_for_upload() self.landing_page.visit() # There should be two sections. 'Entrance Exam' and 'Section' on the landing page. self.landing_page.section("Entrance Exam") self.landing_page.section("Section") self.landing_page.view_live() courseware = CoursewarePage(self.browser, self.course_id) courseware.wait_for_page() StaffPage(self.browser, self.course_id).set_staff_view_mode('Student') self.assertEqual(courseware.num_sections, 1) self.assertIn( "To access course materials, you must score", courseware.entrance_exam_message_selector.text[0] ) @attr('shard_7') class TestCourseImport(ImportTestMixin, StudioCourseTest): """ Tests the Course import page """ tarball_name = '2015.lzdwNM.tar.gz' bad_tarball_name = 'bad_course.tar.gz' import_page_class = ImportCoursePage landing_page_class = CourseOutlinePage def page_args(self): return [self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']] def test_course_updated(self): """ Given that I visit an empty course before import I should not see a section named 'Section' When I visit the import page And I upload a course that has a section named 'Section' And I visit the course outline page again The section named 'Section' should now be available """ self.landing_page.visit() # Should not exist yet. self.assertRaises(IndexError, self.landing_page.section, "Section") self.import_page.visit() self.import_page.upload_tarball(self.tarball_name) self.import_page.wait_for_upload() self.landing_page.visit() # There's a section named 'Section' in the tarball. self.landing_page.section("Section") def test_header(self): """ Scenario: I should see the correct text when importing a course. Given that I have a course to import to When I visit the import page The correct header should be shown """ self.assertEqual(self.import_page.header_text, 'Course Import') def test_multiple_course_import_message(self): """ Given that I visit an empty course before import When I visit the import page And I upload a course with file name 2015.lzdwNM.tar.gz Then timestamp is visible after course is updated successfully And then I create a new course When I visit the import page of this new course Then timestamp is not visible """ self.import_page.visit() self.import_page.upload_tarball(self.tarball_name) self.import_page.wait_for_upload() self.assertTrue(self.import_page.is_timestamp_visible()) # Create a new course and visit the import page self.course_info = { 'org': 'orgX', 'number': self.unique_id + '_2', 'run': 'test_run_2', 'display_name': 'Test Course 2' + self.unique_id } self.install_course_fixture() self.import_page = self.import_page_class(*self.page_args()) self.import_page.visit() # As this is new course which is never import so timestamp should not present self.assertFalse(self.import_page.is_timestamp_visible()) @attr('shard_7') class TestLibraryImport(ImportTestMixin, StudioLibraryTest): """ Tests the Library import page """ tarball_name = 'library.HhJfPD.tar.gz' bad_tarball_name = 'bad_library.tar.gz' import_page_class = ImportLibraryPage landing_page_class = LibraryEditPage def page_args(self): return [self.browser, self.library_key] @flaky # TODO: SOL-430 def test_library_updated(self): """ Given that I visit an empty library No XBlocks should be shown When I visit the import page And I upload a library that contains three XBlocks And I visit the library page Three XBlocks should be shown """ self.landing_page.visit() self.landing_page.wait_until_ready() # No items should be in the library to start. self.assertEqual(len(self.landing_page.xblocks), 0) self.import_page.visit() self.import_page.upload_tarball(self.tarball_name) self.import_page.wait_for_upload() self.landing_page.visit() self.landing_page.wait_until_ready() # There are three blocks in the tarball. self.assertEqual(len(self.landing_page.xblocks), 3) def test_header(self): """ Scenario: I should see the correct text when importing a library. Given that I have a library to import to When I visit the import page The correct header should be shown """ self.assertEqual(self.import_page.header_text, 'Library Import')
agpl-3.0
-9,093,546,574,179,088,000
37.440909
114
0.640594
false
sfromm/ansible-report
alembic/versions/2f3bd55d88a_base_tables.py
1
1299
"""base tables Revision ID: 2f3bd55d88a Revises: None Create Date: 2013-04-22 15:26:47.296443 """ # revision identifiers, used by Alembic. revision = '2f3bd55d88a' down_revision = None from ansiblereport.model import JSONEncodedDict from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_table('user', sa.Column('id', sa.Integer(), nullable=False), sa.Column('username', sa.String(), nullable=True), sa.Column('euid', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint('id') ) op.create_table('task', sa.Column('id', sa.Integer(), nullable=False), sa.Column('timestamp', sa.DateTime(), nullable=True), sa.Column('hostname', sa.String(), nullable=True), sa.Column('module', sa.String(), nullable=True), sa.Column('result', sa.String(), nullable=True), sa.Column('data', JSONEncodedDict(), nullable=True), sa.Column('user_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['user_id'], ['user.id'], ), sa.PrimaryKeyConstraint('id') ) ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_table('task') op.drop_table('user') ### end Alembic commands ###
gpl-3.0
7,288,569,300,795,682,000
28.522727
63
0.661278
false
vFense/vFenseAgent-nix
agent/deps/rpm6-32/Python-2.7.5/lib/python2.7/lib-tk/test/test_ttk/test_extensions.py
88
9186
import sys import unittest import Tkinter import ttk from test.test_support import requires, run_unittest import support requires('gui') class LabeledScaleTest(unittest.TestCase): def setUp(self): support.root_deiconify() def tearDown(self): support.root_withdraw() def test_widget_destroy(self): # automatically created variable x = ttk.LabeledScale() var = x._variable._name x.destroy() self.assertRaises(Tkinter.TclError, x.tk.globalgetvar, var) # manually created variable myvar = Tkinter.DoubleVar() name = myvar._name x = ttk.LabeledScale(variable=myvar) x.destroy() self.assertEqual(x.tk.globalgetvar(name), myvar.get()) del myvar self.assertRaises(Tkinter.TclError, x.tk.globalgetvar, name) # checking that the tracing callback is properly removed myvar = Tkinter.IntVar() # LabeledScale will start tracing myvar x = ttk.LabeledScale(variable=myvar) x.destroy() # Unless the tracing callback was removed, creating a new # LabeledScale with the same var will cause an error now. This # happens because the variable will be set to (possibly) a new # value which causes the tracing callback to be called and then # it tries calling instance attributes not yet defined. ttk.LabeledScale(variable=myvar) if hasattr(sys, 'last_type'): self.assertFalse(sys.last_type == Tkinter.TclError) def test_initialization(self): # master passing x = ttk.LabeledScale() self.assertEqual(x.master, Tkinter._default_root) x.destroy() master = Tkinter.Frame() x = ttk.LabeledScale(master) self.assertEqual(x.master, master) x.destroy() # variable initialization/passing passed_expected = ((2.5, 2), ('0', 0), (0, 0), (10, 10), (-1, -1), (sys.maxint + 1, sys.maxint + 1)) for pair in passed_expected: x = ttk.LabeledScale(from_=pair[0]) self.assertEqual(x.value, pair[1]) x.destroy() x = ttk.LabeledScale(from_='2.5') self.assertRaises(ValueError, x._variable.get) x.destroy() x = ttk.LabeledScale(from_=None) self.assertRaises(ValueError, x._variable.get) x.destroy() # variable should have its default value set to the from_ value myvar = Tkinter.DoubleVar(value=20) x = ttk.LabeledScale(variable=myvar) self.assertEqual(x.value, 0) x.destroy() # check that it is really using a DoubleVar x = ttk.LabeledScale(variable=myvar, from_=0.5) self.assertEqual(x.value, 0.5) self.assertEqual(x._variable._name, myvar._name) x.destroy() # widget positionment def check_positions(scale, scale_pos, label, label_pos): self.assertEqual(scale.pack_info()['side'], scale_pos) self.assertEqual(label.place_info()['anchor'], label_pos) x = ttk.LabeledScale(compound='top') check_positions(x.scale, 'bottom', x.label, 'n') x.destroy() x = ttk.LabeledScale(compound='bottom') check_positions(x.scale, 'top', x.label, 's') x.destroy() x = ttk.LabeledScale(compound='unknown') # invert default positions check_positions(x.scale, 'top', x.label, 's') x.destroy() x = ttk.LabeledScale() # take default positions check_positions(x.scale, 'bottom', x.label, 'n') x.destroy() # extra, and invalid, kwargs self.assertRaises(Tkinter.TclError, ttk.LabeledScale, a='b') def test_horizontal_range(self): lscale = ttk.LabeledScale(from_=0, to=10) lscale.pack() lscale.wait_visibility() lscale.update() linfo_1 = lscale.label.place_info() prev_xcoord = lscale.scale.coords()[0] self.assertEqual(prev_xcoord, int(linfo_1['x'])) # change range to: from -5 to 5. This should change the x coord of # the scale widget, since 0 is at the middle of the new # range. lscale.scale.configure(from_=-5, to=5) # The following update is needed since the test doesn't use mainloop, # at the same time this shouldn't affect test outcome lscale.update() curr_xcoord = lscale.scale.coords()[0] self.assertTrue(prev_xcoord != curr_xcoord) # the label widget should have been repositioned too linfo_2 = lscale.label.place_info() self.assertEqual(lscale.label['text'], 0) self.assertEqual(curr_xcoord, int(linfo_2['x'])) # change the range back lscale.scale.configure(from_=0, to=10) self.assertTrue(prev_xcoord != curr_xcoord) self.assertEqual(prev_xcoord, int(linfo_1['x'])) lscale.destroy() def test_variable_change(self): x = ttk.LabeledScale() x.pack() x.wait_visibility() x.update() curr_xcoord = x.scale.coords()[0] newval = x.value + 1 x.value = newval # The following update is needed since the test doesn't use mainloop, # at the same time this shouldn't affect test outcome x.update() self.assertEqual(x.label['text'], newval) self.assertTrue(x.scale.coords()[0] > curr_xcoord) self.assertEqual(x.scale.coords()[0], int(x.label.place_info()['x'])) # value outside range x.value = x.scale['to'] + 1 # no changes shouldn't happen x.update() self.assertEqual(x.label['text'], newval) self.assertEqual(x.scale.coords()[0], int(x.label.place_info()['x'])) x.destroy() def test_resize(self): x = ttk.LabeledScale() x.pack(expand=True, fill='both') x.wait_visibility() x.update() width, height = x.master.winfo_width(), x.master.winfo_height() width_new, height_new = width * 2, height * 2 x.value = 3 x.update() x.master.wm_geometry("%dx%d" % (width_new, height_new)) self.assertEqual(int(x.label.place_info()['x']), x.scale.coords()[0]) # Reset geometry x.master.wm_geometry("%dx%d" % (width, height)) x.destroy() class OptionMenuTest(unittest.TestCase): def setUp(self): support.root_deiconify() self.textvar = Tkinter.StringVar() def tearDown(self): del self.textvar support.root_withdraw() def test_widget_destroy(self): var = Tkinter.StringVar() optmenu = ttk.OptionMenu(None, var) name = var._name optmenu.update_idletasks() optmenu.destroy() self.assertEqual(optmenu.tk.globalgetvar(name), var.get()) del var self.assertRaises(Tkinter.TclError, optmenu.tk.globalgetvar, name) def test_initialization(self): self.assertRaises(Tkinter.TclError, ttk.OptionMenu, None, self.textvar, invalid='thing') optmenu = ttk.OptionMenu(None, self.textvar, 'b', 'a', 'b') self.assertEqual(optmenu._variable.get(), 'b') self.assertTrue(optmenu['menu']) self.assertTrue(optmenu['textvariable']) optmenu.destroy() def test_menu(self): items = ('a', 'b', 'c') default = 'a' optmenu = ttk.OptionMenu(None, self.textvar, default, *items) found_default = False for i in range(len(items)): value = optmenu['menu'].entrycget(i, 'value') self.assertEqual(value, items[i]) if value == default: found_default = True self.assertTrue(found_default) optmenu.destroy() # default shouldn't be in menu if it is not part of values default = 'd' optmenu = ttk.OptionMenu(None, self.textvar, default, *items) curr = None i = 0 while True: last, curr = curr, optmenu['menu'].entryconfigure(i, 'value') if last == curr: # no more menu entries break self.assertFalse(curr == default) i += 1 self.assertEqual(i, len(items)) # check that variable is updated correctly optmenu.pack() optmenu.wait_visibility() optmenu['menu'].invoke(0) self.assertEqual(optmenu._variable.get(), items[0]) # changing to an invalid index shouldn't change the variable self.assertRaises(Tkinter.TclError, optmenu['menu'].invoke, -1) self.assertEqual(optmenu._variable.get(), items[0]) optmenu.destroy() # specifying a callback success = [] def cb_test(item): self.assertEqual(item, items[1]) success.append(True) optmenu = ttk.OptionMenu(None, self.textvar, 'a', command=cb_test, *items) optmenu['menu'].invoke(1) if not success: self.fail("Menu callback not invoked") optmenu.destroy() tests_gui = (LabeledScaleTest, OptionMenuTest) if __name__ == "__main__": run_unittest(*tests_gui)
lgpl-3.0
-351,469,206,361,310,340
32.525547
77
0.59373
false
Jeff20/sklearn_pycon2015
notebooks/fig_code/svm_gui.py
47
11549
""" ========== Libsvm GUI ========== A simple graphical frontend for Libsvm mainly intended for didactic purposes. You can create data points by point and click and visualize the decision region induced by different kernels and parameter settings. To create positive examples click the left mouse button; to create negative examples click the right button. If all examples are from the same class, it uses a one-class SVM. """ from __future__ import division, print_function print(__doc__) # Author: Peter Prettenhoer <[email protected]> # # License: BSD 3 clause import matplotlib matplotlib.use('TkAgg') from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg from matplotlib.figure import Figure from matplotlib.contour import ContourSet import Tkinter as Tk import sys import numpy as np from sklearn import svm from sklearn.datasets import dump_svmlight_file from sklearn.externals.six.moves import xrange y_min, y_max = -50, 50 x_min, x_max = -50, 50 class Model(object): """The Model which hold the data. It implements the observable in the observer pattern and notifies the registered observers on change event. """ def __init__(self): self.observers = [] self.surface = None self.data = [] self.cls = None self.surface_type = 0 def changed(self, event): """Notify the observers. """ for observer in self.observers: observer.update(event, self) def add_observer(self, observer): """Register an observer. """ self.observers.append(observer) def set_surface(self, surface): self.surface = surface def dump_svmlight_file(self, file): data = np.array(self.data) X = data[:, 0:2] y = data[:, 2] dump_svmlight_file(X, y, file) class Controller(object): def __init__(self, model): self.model = model self.kernel = Tk.IntVar() self.surface_type = Tk.IntVar() # Whether or not a model has been fitted self.fitted = False def fit(self): print("fit the model") train = np.array(self.model.data) X = train[:, 0:2] y = train[:, 2] C = float(self.complexity.get()) gamma = float(self.gamma.get()) coef0 = float(self.coef0.get()) degree = int(self.degree.get()) kernel_map = {0: "linear", 1: "rbf", 2: "poly"} if len(np.unique(y)) == 1: clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()], gamma=gamma, coef0=coef0, degree=degree) clf.fit(X) else: clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C, gamma=gamma, coef0=coef0, degree=degree) clf.fit(X, y) if hasattr(clf, 'score'): print("Accuracy:", clf.score(X, y) * 100) X1, X2, Z = self.decision_surface(clf) self.model.clf = clf self.model.set_surface((X1, X2, Z)) self.model.surface_type = self.surface_type.get() self.fitted = True self.model.changed("surface") def decision_surface(self, cls): delta = 1 x = np.arange(x_min, x_max + delta, delta) y = np.arange(y_min, y_max + delta, delta) X1, X2 = np.meshgrid(x, y) Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()]) Z = Z.reshape(X1.shape) return X1, X2, Z def clear_data(self): self.model.data = [] self.fitted = False self.model.changed("clear") def add_example(self, x, y, label): self.model.data.append((x, y, label)) self.model.changed("example_added") # update decision surface if already fitted. self.refit() def refit(self): """Refit the model if already fitted. """ if self.fitted: self.fit() class View(object): """Test docstring. """ def __init__(self, root, controller): f = Figure() ax = f.add_subplot(111) ax.set_xticks([]) ax.set_yticks([]) ax.set_xlim((x_min, x_max)) ax.set_ylim((y_min, y_max)) canvas = FigureCanvasTkAgg(f, master=root) canvas.show() canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1) canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1) canvas.mpl_connect('key_press_event', self.onkeypress) canvas.mpl_connect('key_release_event', self.onkeyrelease) canvas.mpl_connect('button_press_event', self.onclick) toolbar = NavigationToolbar2TkAgg(canvas, root) toolbar.update() self.shift_down = False self.controllbar = ControllBar(root, controller) self.f = f self.ax = ax self.canvas = canvas self.controller = controller self.contours = [] self.c_labels = None self.plot_kernels() def plot_kernels(self): self.ax.text(-50, -60, "Linear: $u^T v$") self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$") self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$") def onkeypress(self, event): if event.key == "shift": self.shift_down = True def onkeyrelease(self, event): if event.key == "shift": self.shift_down = False def onclick(self, event): if event.xdata and event.ydata: if self.shift_down or event.button == 3: self.controller.add_example(event.xdata, event.ydata, -1) elif event.button == 1: self.controller.add_example(event.xdata, event.ydata, 1) def update_example(self, model, idx): x, y, l = model.data[idx] if l == 1: color = 'w' elif l == -1: color = 'k' self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0) def update(self, event, model): if event == "examples_loaded": for i in xrange(len(model.data)): self.update_example(model, i) if event == "example_added": self.update_example(model, -1) if event == "clear": self.ax.clear() self.ax.set_xticks([]) self.ax.set_yticks([]) self.contours = [] self.c_labels = None self.plot_kernels() if event == "surface": self.remove_surface() self.plot_support_vectors(model.clf.support_vectors_) self.plot_decision_surface(model.surface, model.surface_type) self.canvas.draw() def remove_surface(self): """Remove old decision surface.""" if len(self.contours) > 0: for contour in self.contours: if isinstance(contour, ContourSet): for lineset in contour.collections: lineset.remove() else: contour.remove() self.contours = [] def plot_support_vectors(self, support_vectors): """Plot the support vectors by placing circles over the corresponding data points and adds the circle collection to the contours list.""" cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1], s=80, edgecolors="k", facecolors="none") self.contours.append(cs) def plot_decision_surface(self, surface, type): X1, X2, Z = surface if type == 0: levels = [-1.0, 0.0, 1.0] linestyles = ['dashed', 'solid', 'dashed'] colors = 'k' self.contours.append(self.ax.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)) elif type == 1: self.contours.append(self.ax.contourf(X1, X2, Z, 10, cmap=matplotlib.cm.bone, origin='lower', alpha=0.85)) self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k', linestyles=['solid'])) else: raise ValueError("surface type unknown") class ControllBar(object): def __init__(self, root, controller): fm = Tk.Frame(root) kernel_group = Tk.Frame(fm) Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel, value=0, command=controller.refit).pack(anchor=Tk.W) Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel, value=1, command=controller.refit).pack(anchor=Tk.W) Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel, value=2, command=controller.refit).pack(anchor=Tk.W) kernel_group.pack(side=Tk.LEFT) valbox = Tk.Frame(fm) controller.complexity = Tk.StringVar() controller.complexity.set("1.0") c = Tk.Frame(valbox) Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT) Tk.Entry(c, width=6, textvariable=controller.complexity).pack( side=Tk.LEFT) c.pack() controller.gamma = Tk.StringVar() controller.gamma.set("0.01") g = Tk.Frame(valbox) Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT) Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT) g.pack() controller.degree = Tk.StringVar() controller.degree.set("3") d = Tk.Frame(valbox) Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT) Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT) d.pack() controller.coef0 = Tk.StringVar() controller.coef0.set("0") r = Tk.Frame(valbox) Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT) Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT) r.pack() valbox.pack(side=Tk.LEFT) cmap_group = Tk.Frame(fm) Tk.Radiobutton(cmap_group, text="Hyperplanes", variable=controller.surface_type, value=0, command=controller.refit).pack(anchor=Tk.W) Tk.Radiobutton(cmap_group, text="Surface", variable=controller.surface_type, value=1, command=controller.refit).pack(anchor=Tk.W) cmap_group.pack(side=Tk.LEFT) train_button = Tk.Button(fm, text='Fit', width=5, command=controller.fit) train_button.pack() fm.pack(side=Tk.LEFT) Tk.Button(fm, text='Clear', width=5, command=controller.clear_data).pack(side=Tk.LEFT) def get_parser(): from optparse import OptionParser op = OptionParser() op.add_option("--output", action="store", type="str", dest="output", help="Path where to dump data.") return op def main(argv): op = get_parser() opts, args = op.parse_args(argv[1:]) root = Tk.Tk() model = Model() controller = Controller(model) root.wm_title("Scikit-learn Libsvm GUI") view = View(root, controller) model.add_observer(view) Tk.mainloop() if opts.output: model.dump_svmlight_file(opts.output) if __name__ == "__main__": main(sys.argv)
bsd-3-clause
4,971,655,977,755,502,000
32.868035
79
0.565157
false
Dima73/pli-openmultibootmanager
src/OMBManagerAbout.py
1
2459
############################################################################# # # Copyright (C) 2014 Impex-Sat Gmbh & Co.KG # Written by Sandro Cavazzoni <[email protected]> # All Rights Reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # ############################################################################# from Screens.Screen import Screen from Screens.MessageBox import MessageBox from Components.ActionMap import ActionMap from Components.Label import Label from OMBManagerCommon import OMB_DATA_DIR, OMB_UPLOAD_DIR, OMB_TMP_DIR, OMB_MANAGER_VERION from OMBManagerInstall import BOX_NAME from OMBManagerLocale import _ from enigma import getDesktop try: screenWidth = getDesktop(0).size().width() except: screenWidth = 720 class OMBManagerAbout(Screen): if screenWidth >= 1920: skin = """ <screen position="center,center" size="1000,400"> <widget name="about" position="20,20" size="940,340" font="Regular;33" zPosition="1" foregroundColor="yellow" /> </screen>""" else: skin = """ <screen position="center,center" size="560,300"> <widget name="about" position="10,10" size="540,240" font="Regular;22" zPosition="1" foregroundColor="yellow" /> </screen>""" def __init__(self, session): Screen.__init__(self, session) self.setTitle(_('openMultiboot About')) about = "openMultiboot Manager " + OMB_MANAGER_VERION + "\n" about += BOX_NAME + "\n" about += "(c) 2014 Impex-Sat Gmbh & Co.KG\n\n" about += "Written by Sandro Cavazzoni <[email protected]>" about += "\n" about += "Modded by Meo" about += "\n" about += "\nPatch for openPli Dimitrij <[email protected]>" self['about'] = Label(about) self["actions"] = ActionMap(["SetupActions"], { "cancel": self.keyCancel }) def keyCancel(self): self.close()
gpl-2.0
-904,339,241,326,673,500
33.633803
116
0.669784
false
devs1991/test_edx_docmode
venv/lib/python2.7/site-packages/logilab/common/table.py
93
31408
# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved. # contact http://www.logilab.fr/ -- mailto:[email protected] # # This file is part of logilab-common. # # logilab-common is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the Free # Software Foundation, either version 2.1 of the License, or (at your option) any # later version. # # logilab-common is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License along # with logilab-common. If not, see <http://www.gnu.org/licenses/>. """Table management module.""" from __future__ import print_function __docformat__ = "restructuredtext en" from six.moves import range class Table(object): """Table defines a data table with column and row names. inv: len(self.data) <= len(self.row_names) forall(self.data, lambda x: len(x) <= len(self.col_names)) """ def __init__(self, default_value=0, col_names=None, row_names=None): self.col_names = [] self.row_names = [] self.data = [] self.default_value = default_value if col_names: self.create_columns(col_names) if row_names: self.create_rows(row_names) def _next_row_name(self): return 'row%s' % (len(self.row_names)+1) def __iter__(self): return iter(self.data) def __eq__(self, other): if other is None: return False else: return list(self) == list(other) __hash__ = object.__hash__ def __ne__(self, other): return not self == other def __len__(self): return len(self.row_names) ## Rows / Columns creation ################################################# def create_rows(self, row_names): """Appends row_names to the list of existing rows """ self.row_names.extend(row_names) for row_name in row_names: self.data.append([self.default_value]*len(self.col_names)) def create_columns(self, col_names): """Appends col_names to the list of existing columns """ for col_name in col_names: self.create_column(col_name) def create_row(self, row_name=None): """Creates a rowname to the row_names list """ row_name = row_name or self._next_row_name() self.row_names.append(row_name) self.data.append([self.default_value]*len(self.col_names)) def create_column(self, col_name): """Creates a colname to the col_names list """ self.col_names.append(col_name) for row in self.data: row.append(self.default_value) ## Sort by column ########################################################## def sort_by_column_id(self, col_id, method = 'asc'): """Sorts the table (in-place) according to data stored in col_id """ try: col_index = self.col_names.index(col_id) self.sort_by_column_index(col_index, method) except ValueError: raise KeyError("Col (%s) not found in table" % (col_id)) def sort_by_column_index(self, col_index, method = 'asc'): """Sorts the table 'in-place' according to data stored in col_index method should be in ('asc', 'desc') """ sort_list = sorted([(row[col_index], row, row_name) for row, row_name in zip(self.data, self.row_names)]) # Sorting sort_list will sort according to col_index # If we want reverse sort, then reverse list if method.lower() == 'desc': sort_list.reverse() # Rebuild data / row names self.data = [] self.row_names = [] for val, row, row_name in sort_list: self.data.append(row) self.row_names.append(row_name) def groupby(self, colname, *others): """builds indexes of data :returns: nested dictionaries pointing to actual rows """ groups = {} colnames = (colname,) + others col_indexes = [self.col_names.index(col_id) for col_id in colnames] for row in self.data: ptr = groups for col_index in col_indexes[:-1]: ptr = ptr.setdefault(row[col_index], {}) ptr = ptr.setdefault(row[col_indexes[-1]], Table(default_value=self.default_value, col_names=self.col_names)) ptr.append_row(tuple(row)) return groups def select(self, colname, value): grouped = self.groupby(colname) try: return grouped[value] except KeyError: return [] def remove(self, colname, value): col_index = self.col_names.index(colname) for row in self.data[:]: if row[col_index] == value: self.data.remove(row) ## The 'setter' part ####################################################### def set_cell(self, row_index, col_index, data): """sets value of cell 'row_indew', 'col_index' to data """ self.data[row_index][col_index] = data def set_cell_by_ids(self, row_id, col_id, data): """sets value of cell mapped by row_id and col_id to data Raises a KeyError if row_id or col_id are not found in the table """ try: row_index = self.row_names.index(row_id) except ValueError: raise KeyError("Row (%s) not found in table" % (row_id)) else: try: col_index = self.col_names.index(col_id) self.data[row_index][col_index] = data except ValueError: raise KeyError("Column (%s) not found in table" % (col_id)) def set_row(self, row_index, row_data): """sets the 'row_index' row pre: type(row_data) == types.ListType len(row_data) == len(self.col_names) """ self.data[row_index] = row_data def set_row_by_id(self, row_id, row_data): """sets the 'row_id' column pre: type(row_data) == types.ListType len(row_data) == len(self.row_names) Raises a KeyError if row_id is not found """ try: row_index = self.row_names.index(row_id) self.set_row(row_index, row_data) except ValueError: raise KeyError('Row (%s) not found in table' % (row_id)) def append_row(self, row_data, row_name=None): """Appends a row to the table pre: type(row_data) == types.ListType len(row_data) == len(self.col_names) """ row_name = row_name or self._next_row_name() self.row_names.append(row_name) self.data.append(row_data) return len(self.data) - 1 def insert_row(self, index, row_data, row_name=None): """Appends row_data before 'index' in the table. To make 'insert' behave like 'list.insert', inserting in an out of range index will insert row_data to the end of the list pre: type(row_data) == types.ListType len(row_data) == len(self.col_names) """ row_name = row_name or self._next_row_name() self.row_names.insert(index, row_name) self.data.insert(index, row_data) def delete_row(self, index): """Deletes the 'index' row in the table, and returns it. Raises an IndexError if index is out of range """ self.row_names.pop(index) return self.data.pop(index) def delete_row_by_id(self, row_id): """Deletes the 'row_id' row in the table. Raises a KeyError if row_id was not found. """ try: row_index = self.row_names.index(row_id) self.delete_row(row_index) except ValueError: raise KeyError('Row (%s) not found in table' % (row_id)) def set_column(self, col_index, col_data): """sets the 'col_index' column pre: type(col_data) == types.ListType len(col_data) == len(self.row_names) """ for row_index, cell_data in enumerate(col_data): self.data[row_index][col_index] = cell_data def set_column_by_id(self, col_id, col_data): """sets the 'col_id' column pre: type(col_data) == types.ListType len(col_data) == len(self.col_names) Raises a KeyError if col_id is not found """ try: col_index = self.col_names.index(col_id) self.set_column(col_index, col_data) except ValueError: raise KeyError('Column (%s) not found in table' % (col_id)) def append_column(self, col_data, col_name): """Appends the 'col_index' column pre: type(col_data) == types.ListType len(col_data) == len(self.row_names) """ self.col_names.append(col_name) for row_index, cell_data in enumerate(col_data): self.data[row_index].append(cell_data) def insert_column(self, index, col_data, col_name): """Appends col_data before 'index' in the table. To make 'insert' behave like 'list.insert', inserting in an out of range index will insert col_data to the end of the list pre: type(col_data) == types.ListType len(col_data) == len(self.row_names) """ self.col_names.insert(index, col_name) for row_index, cell_data in enumerate(col_data): self.data[row_index].insert(index, cell_data) def delete_column(self, index): """Deletes the 'index' column in the table, and returns it. Raises an IndexError if index is out of range """ self.col_names.pop(index) return [row.pop(index) for row in self.data] def delete_column_by_id(self, col_id): """Deletes the 'col_id' col in the table. Raises a KeyError if col_id was not found. """ try: col_index = self.col_names.index(col_id) self.delete_column(col_index) except ValueError: raise KeyError('Column (%s) not found in table' % (col_id)) ## The 'getter' part ####################################################### def get_shape(self): """Returns a tuple which represents the table's shape """ return len(self.row_names), len(self.col_names) shape = property(get_shape) def __getitem__(self, indices): """provided for convenience""" rows, multirows = None, False cols, multicols = None, False if isinstance(indices, tuple): rows = indices[0] if len(indices) > 1: cols = indices[1] else: rows = indices # define row slice if isinstance(rows, str): try: rows = self.row_names.index(rows) except ValueError: raise KeyError("Row (%s) not found in table" % (rows)) if isinstance(rows, int): rows = slice(rows, rows+1) multirows = False else: rows = slice(None) multirows = True # define col slice if isinstance(cols, str): try: cols = self.col_names.index(cols) except ValueError: raise KeyError("Column (%s) not found in table" % (cols)) if isinstance(cols, int): cols = slice(cols, cols+1) multicols = False else: cols = slice(None) multicols = True # get sub-table tab = Table() tab.default_value = self.default_value tab.create_rows(self.row_names[rows]) tab.create_columns(self.col_names[cols]) for idx, row in enumerate(self.data[rows]): tab.set_row(idx, row[cols]) if multirows : if multicols: return tab else: return [item[0] for item in tab.data] else: if multicols: return tab.data[0] else: return tab.data[0][0] def get_cell_by_ids(self, row_id, col_id): """Returns the element at [row_id][col_id] """ try: row_index = self.row_names.index(row_id) except ValueError: raise KeyError("Row (%s) not found in table" % (row_id)) else: try: col_index = self.col_names.index(col_id) except ValueError: raise KeyError("Column (%s) not found in table" % (col_id)) return self.data[row_index][col_index] def get_row_by_id(self, row_id): """Returns the 'row_id' row """ try: row_index = self.row_names.index(row_id) except ValueError: raise KeyError("Row (%s) not found in table" % (row_id)) return self.data[row_index] def get_column_by_id(self, col_id, distinct=False): """Returns the 'col_id' col """ try: col_index = self.col_names.index(col_id) except ValueError: raise KeyError("Column (%s) not found in table" % (col_id)) return self.get_column(col_index, distinct) def get_columns(self): """Returns all the columns in the table """ return [self[:, index] for index in range(len(self.col_names))] def get_column(self, col_index, distinct=False): """get a column by index""" col = [row[col_index] for row in self.data] if distinct: col = list(set(col)) return col def apply_stylesheet(self, stylesheet): """Applies the stylesheet to this table """ for instruction in stylesheet.instructions: eval(instruction) def transpose(self): """Keeps the self object intact, and returns the transposed (rotated) table. """ transposed = Table() transposed.create_rows(self.col_names) transposed.create_columns(self.row_names) for col_index, column in enumerate(self.get_columns()): transposed.set_row(col_index, column) return transposed def pprint(self): """returns a string representing the table in a pretty printed 'text' format. """ # The maximum row name (to know the start_index of the first col) max_row_name = 0 for row_name in self.row_names: if len(row_name) > max_row_name: max_row_name = len(row_name) col_start = max_row_name + 5 lines = [] # Build the 'first' line <=> the col_names one # The first cell <=> an empty one col_names_line = [' '*col_start] for col_name in self.col_names: col_names_line.append(col_name + ' '*5) lines.append('|' + '|'.join(col_names_line) + '|') max_line_length = len(lines[0]) # Build the table for row_index, row in enumerate(self.data): line = [] # First, build the row_name's cell row_name = self.row_names[row_index] line.append(row_name + ' '*(col_start-len(row_name))) # Then, build all the table's cell for this line. for col_index, cell in enumerate(row): col_name_length = len(self.col_names[col_index]) + 5 data = str(cell) line.append(data + ' '*(col_name_length - len(data))) lines.append('|' + '|'.join(line) + '|') if len(lines[-1]) > max_line_length: max_line_length = len(lines[-1]) # Wrap the table with '-' to make a frame lines.insert(0, '-'*max_line_length) lines.append('-'*max_line_length) return '\n'.join(lines) def __repr__(self): return repr(self.data) def as_text(self): data = [] # We must convert cells into strings before joining them for row in self.data: data.append([str(cell) for cell in row]) lines = ['\t'.join(row) for row in data] return '\n'.join(lines) class TableStyle: """Defines a table's style """ def __init__(self, table): self._table = table self.size = dict([(col_name, '1*') for col_name in table.col_names]) # __row_column__ is a special key to define the first column which # actually has no name (<=> left most column <=> row names column) self.size['__row_column__'] = '1*' self.alignment = dict([(col_name, 'right') for col_name in table.col_names]) self.alignment['__row_column__'] = 'right' # We shouldn't have to create an entry for # the 1st col (the row_column one) self.units = dict([(col_name, '') for col_name in table.col_names]) self.units['__row_column__'] = '' # XXX FIXME : params order should be reversed for all set() methods def set_size(self, value, col_id): """sets the size of the specified col_id to value """ self.size[col_id] = value def set_size_by_index(self, value, col_index): """Allows to set the size according to the column index rather than using the column's id. BE CAREFUL : the '0' column is the '__row_column__' one ! """ if col_index == 0: col_id = '__row_column__' else: col_id = self._table.col_names[col_index-1] self.size[col_id] = value def set_alignment(self, value, col_id): """sets the alignment of the specified col_id to value """ self.alignment[col_id] = value def set_alignment_by_index(self, value, col_index): """Allows to set the alignment according to the column index rather than using the column's id. BE CAREFUL : the '0' column is the '__row_column__' one ! """ if col_index == 0: col_id = '__row_column__' else: col_id = self._table.col_names[col_index-1] self.alignment[col_id] = value def set_unit(self, value, col_id): """sets the unit of the specified col_id to value """ self.units[col_id] = value def set_unit_by_index(self, value, col_index): """Allows to set the unit according to the column index rather than using the column's id. BE CAREFUL : the '0' column is the '__row_column__' one ! (Note that in the 'unit' case, you shouldn't have to set a unit for the 1st column (the __row__column__ one)) """ if col_index == 0: col_id = '__row_column__' else: col_id = self._table.col_names[col_index-1] self.units[col_id] = value def get_size(self, col_id): """Returns the size of the specified col_id """ return self.size[col_id] def get_size_by_index(self, col_index): """Allows to get the size according to the column index rather than using the column's id. BE CAREFUL : the '0' column is the '__row_column__' one ! """ if col_index == 0: col_id = '__row_column__' else: col_id = self._table.col_names[col_index-1] return self.size[col_id] def get_alignment(self, col_id): """Returns the alignment of the specified col_id """ return self.alignment[col_id] def get_alignment_by_index(self, col_index): """Allors to get the alignment according to the column index rather than using the column's id. BE CAREFUL : the '0' column is the '__row_column__' one ! """ if col_index == 0: col_id = '__row_column__' else: col_id = self._table.col_names[col_index-1] return self.alignment[col_id] def get_unit(self, col_id): """Returns the unit of the specified col_id """ return self.units[col_id] def get_unit_by_index(self, col_index): """Allors to get the unit according to the column index rather than using the column's id. BE CAREFUL : the '0' column is the '__row_column__' one ! """ if col_index == 0: col_id = '__row_column__' else: col_id = self._table.col_names[col_index-1] return self.units[col_id] import re CELL_PROG = re.compile("([0-9]+)_([0-9]+)") class TableStyleSheet: """A simple Table stylesheet Rules are expressions where cells are defined by the row_index and col_index separated by an underscore ('_'). For example, suppose you want to say that the (2,5) cell must be the sum of its two preceding cells in the row, you would create the following rule : 2_5 = 2_3 + 2_4 You can also use all the math.* operations you want. For example: 2_5 = sqrt(2_3**2 + 2_4**2) """ def __init__(self, rules = None): rules = rules or [] self.rules = [] self.instructions = [] for rule in rules: self.add_rule(rule) def add_rule(self, rule): """Adds a rule to the stylesheet rules """ try: source_code = ['from math import *'] source_code.append(CELL_PROG.sub(r'self.data[\1][\2]', rule)) self.instructions.append(compile('\n'.join(source_code), 'table.py', 'exec')) self.rules.append(rule) except SyntaxError: print("Bad Stylesheet Rule : %s [skipped]" % rule) def add_rowsum_rule(self, dest_cell, row_index, start_col, end_col): """Creates and adds a rule to sum over the row at row_index from start_col to end_col. dest_cell is a tuple of two elements (x,y) of the destination cell No check is done for indexes ranges. pre: start_col >= 0 end_col > start_col """ cell_list = ['%d_%d'%(row_index, index) for index in range(start_col, end_col + 1)] rule = '%d_%d=' % dest_cell + '+'.join(cell_list) self.add_rule(rule) def add_rowavg_rule(self, dest_cell, row_index, start_col, end_col): """Creates and adds a rule to make the row average (from start_col to end_col) dest_cell is a tuple of two elements (x,y) of the destination cell No check is done for indexes ranges. pre: start_col >= 0 end_col > start_col """ cell_list = ['%d_%d'%(row_index, index) for index in range(start_col, end_col + 1)] num = (end_col - start_col + 1) rule = '%d_%d=' % dest_cell + '('+'+'.join(cell_list)+')/%f'%num self.add_rule(rule) def add_colsum_rule(self, dest_cell, col_index, start_row, end_row): """Creates and adds a rule to sum over the col at col_index from start_row to end_row. dest_cell is a tuple of two elements (x,y) of the destination cell No check is done for indexes ranges. pre: start_row >= 0 end_row > start_row """ cell_list = ['%d_%d'%(index, col_index) for index in range(start_row, end_row + 1)] rule = '%d_%d=' % dest_cell + '+'.join(cell_list) self.add_rule(rule) def add_colavg_rule(self, dest_cell, col_index, start_row, end_row): """Creates and adds a rule to make the col average (from start_row to end_row) dest_cell is a tuple of two elements (x,y) of the destination cell No check is done for indexes ranges. pre: start_row >= 0 end_row > start_row """ cell_list = ['%d_%d'%(index, col_index) for index in range(start_row, end_row + 1)] num = (end_row - start_row + 1) rule = '%d_%d=' % dest_cell + '('+'+'.join(cell_list)+')/%f'%num self.add_rule(rule) class TableCellRenderer: """Defines a simple text renderer """ def __init__(self, **properties): """keywords should be properties with an associated boolean as value. For example : renderer = TableCellRenderer(units = True, alignment = False) An unspecified property will have a 'False' value by default. Possible properties are : alignment, unit """ self.properties = properties def render_cell(self, cell_coord, table, table_style): """Renders the cell at 'cell_coord' in the table, using table_style """ row_index, col_index = cell_coord cell_value = table.data[row_index][col_index] final_content = self._make_cell_content(cell_value, table_style, col_index +1) return self._render_cell_content(final_content, table_style, col_index + 1) def render_row_cell(self, row_name, table, table_style): """Renders the cell for 'row_id' row """ cell_value = row_name return self._render_cell_content(cell_value, table_style, 0) def render_col_cell(self, col_name, table, table_style): """Renders the cell for 'col_id' row """ cell_value = col_name col_index = table.col_names.index(col_name) return self._render_cell_content(cell_value, table_style, col_index +1) def _render_cell_content(self, content, table_style, col_index): """Makes the appropriate rendering for this cell content. Rendering properties will be searched using the *table_style.get_xxx_by_index(col_index)' methods **This method should be overridden in the derived renderer classes.** """ return content def _make_cell_content(self, cell_content, table_style, col_index): """Makes the cell content (adds decoration data, like units for example) """ final_content = cell_content if 'skip_zero' in self.properties: replacement_char = self.properties['skip_zero'] else: replacement_char = 0 if replacement_char and final_content == 0: return replacement_char try: units_on = self.properties['units'] if units_on: final_content = self._add_unit( cell_content, table_style, col_index) except KeyError: pass return final_content def _add_unit(self, cell_content, table_style, col_index): """Adds unit to the cell_content if needed """ unit = table_style.get_unit_by_index(col_index) return str(cell_content) + " " + unit class DocbookRenderer(TableCellRenderer): """Defines how to render a cell for a docboook table """ def define_col_header(self, col_index, table_style): """Computes the colspec element according to the style """ size = table_style.get_size_by_index(col_index) return '<colspec colname="c%d" colwidth="%s"/>\n' % \ (col_index, size) def _render_cell_content(self, cell_content, table_style, col_index): """Makes the appropriate rendering for this cell content. Rendering properties will be searched using the table_style.get_xxx_by_index(col_index)' methods. """ try: align_on = self.properties['alignment'] alignment = table_style.get_alignment_by_index(col_index) if align_on: return "<entry align='%s'>%s</entry>\n" % \ (alignment, cell_content) except KeyError: # KeyError <=> Default alignment return "<entry>%s</entry>\n" % cell_content class TableWriter: """A class to write tables """ def __init__(self, stream, table, style, **properties): self._stream = stream self.style = style or TableStyle(table) self._table = table self.properties = properties self.renderer = None def set_style(self, style): """sets the table's associated style """ self.style = style def set_renderer(self, renderer): """sets the way to render cell """ self.renderer = renderer def update_properties(self, **properties): """Updates writer's properties (for cell rendering) """ self.properties.update(properties) def write_table(self, title = ""): """Writes the table """ raise NotImplementedError("write_table must be implemented !") class DocbookTableWriter(TableWriter): """Defines an implementation of TableWriter to write a table in Docbook """ def _write_headers(self): """Writes col headers """ # Define col_headers (colstpec elements) for col_index in range(len(self._table.col_names)+1): self._stream.write(self.renderer.define_col_header(col_index, self.style)) self._stream.write("<thead>\n<row>\n") # XXX FIXME : write an empty entry <=> the first (__row_column) column self._stream.write('<entry></entry>\n') for col_name in self._table.col_names: self._stream.write(self.renderer.render_col_cell( col_name, self._table, self.style)) self._stream.write("</row>\n</thead>\n") def _write_body(self): """Writes the table body """ self._stream.write('<tbody>\n') for row_index, row in enumerate(self._table.data): self._stream.write('<row>\n') row_name = self._table.row_names[row_index] # Write the first entry (row_name) self._stream.write(self.renderer.render_row_cell(row_name, self._table, self.style)) for col_index, cell in enumerate(row): self._stream.write(self.renderer.render_cell( (row_index, col_index), self._table, self.style)) self._stream.write('</row>\n') self._stream.write('</tbody>\n') def write_table(self, title = ""): """Writes the table """ self._stream.write('<table>\n<title>%s></title>\n'%(title)) self._stream.write( '<tgroup cols="%d" align="left" colsep="1" rowsep="1">\n'% (len(self._table.col_names)+1)) self._write_headers() self._write_body() self._stream.write('</tgroup>\n</table>\n')
agpl-3.0
-1,865,526,839,058,620,000
32.808396
81
0.547345
false
joshloyal/scikit-learn
examples/feature_selection/plot_f_test_vs_mi.py
75
1647
""" =========================================== Comparison of F-test and mutual information =========================================== This example illustrates the differences between univariate F-test statistics and mutual information. We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the target depends on them as follows: y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is completely irrelevant. The code below plots the dependency of y against individual x_i and normalized values of univariate F-tests statistics and mutual information. As F-test captures only linear dependency, it rates x_1 as the most discriminative feature. On the other hand, mutual information can capture any kind of dependency between variables and it rates x_2 as the most discriminative feature, which probably agrees better with our intuitive perception for this example. Both methods correctly marks x_3 as irrelevant. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.feature_selection import f_regression, mutual_info_regression np.random.seed(0) X = np.random.rand(1000, 3) y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000) f_test, _ = f_regression(X, y) f_test /= np.max(f_test) mi = mutual_info_regression(X, y) mi /= np.max(mi) plt.figure(figsize=(15, 5)) for i in range(3): plt.subplot(1, 3, i + 1) plt.scatter(X[:, i], y) plt.xlabel("$x_{}$".format(i + 1), fontsize=14) if i == 0: plt.ylabel("$y$", fontsize=14) plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]), fontsize=16) plt.show()
bsd-3-clause
-6,642,357,052,161,931,000
32.612245
97
0.660595
false
foo123/sikuli-framework
examples/calculator/baseline/os/mac/Calculator/Calculator,lcdDisplay-0.py
2
1952
""" Copyright (c) 2013, SMART Technologies ULC All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Copyright holder (SMART Technologies ULC) nor the names of its contributors (Joshua Henn) may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER (SMART Technologies ULC) "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ from region.transform import Transform, RegionBelow, RegionMorph,\ RegionRight transforms = { Transform.CONTEXT_PREVIOUS: [], \ Transform.CONTEXT_CURRENT: [], \ Transform.CONTEXT_NEXT: [ RegionRight(), ], \ Transform.CONTEXT_MATCH: [], \ Transform.CONTEXT_FINAL: [], \ Transform.CONTEXT_ENTITY: [] }
bsd-3-clause
1,600,121,264,138,907,600
43.44186
72
0.753586
false
groundhogxc/XCSoar
python/test/test_xcsoar.py
37
2408
#!/usr/bin/env python import xcsoar import argparse from pprint import pprint # Parse command line parameters parser = argparse.ArgumentParser( description='Please give me a IGC file name...') parser.add_argument('file_name', type=str) args = parser.parse_args() print "Init xcsoar.Flight, don't store flight in memory" flight = xcsoar.Flight(args.file_name, False) times = flight.times() for dtime in times: takeoff = dtime['takeoff'] release = dtime['release'] landing = dtime['landing'] print "Takeoff: {}, location {}".format(takeoff['time'], takeoff['location']) print "Release: {}, location {}".format(release['time'], release['location']) print "Landing: {}, location {}".format(landing['time'], landing['location']) print "Flight path from takeoff to release:" fixes = flight.path(takeoff['time'], release['time']) for fix in fixes: print fix del flight print print "Init xcsoar.Flight, store flight on init in memory" flight = xcsoar.Flight(args.file_name, True) times = flight.times() flight_sequence = None for dtime in times: takeoff = dtime['takeoff'] release = dtime['release'] landing = dtime['landing'] print "Takeoff: {}, location {}".format(takeoff['time'], takeoff['location']) print "Release: {}, location {}".format(release['time'], release['location']) print "Landing: {}, location {}".format(landing['time'], landing['location']) print "Flight path from takeoff to release:" fixes = flight.path(takeoff['time'], release['time']) for fix in fixes: print fix flight.reduce(takeoff['time'], landing['time'], max_points=10) print "Flight path from takeoff to landing, reduced:" fixes = flight.path(takeoff['time'], landing['time']) for fix in fixes: print fix flight_sequence = fixes analysis = flight.analyse(takeoff=takeoff['time'], scoring_start=release['time'], scoring_end=landing['time'], landing=landing['time']) pprint(analysis) fixes = flight.path(takeoff['time'], landing['time']) print xcsoar.encode([(row[2]['longitude'], row[2]['latitude']) for row in fixes], floor=10e5, method="double") pprint(flight.encode()) del flight print print "Init xcsoar.Flight with a python sequence" flight = xcsoar.Flight([fix[0:5] for fix in flight_sequence]) for fix in flight.path(): print fix del flight
gpl-2.0
8,727,714,687,335,501,000
25.755556
112
0.669435
false
CanalTP-Cityway/apii_sim_engine
source/apiisim/tests/mis_translator/test_mis_api_stub.py
1
4320
#!/usr/bin/python # -*- encoding: utf8 -*- import os import unittest import json from datetime import timedelta, datetime from apiisim import tests from apiisim.common import AlgorithmEnum, TransportModeEnum from apiisim.common.plan_trip import LocationStructure from apiisim.common.mis_plan_trip import ItineraryResponseType from apiisim.common.mis_plan_summed_up_trip import SummedUpItinerariesResponseType, LocationContextType from apiisim.mis_translator.mis_api.stub import base as stub TEST_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "") class TestStub(unittest.TestCase): def setUp(self): STOPS_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "stub_transilien_stops.json") STOPS_FIELD = "stop_areas" self.stub_transilien = stub._CrowFliesMisApi(STOPS_FILE, STOPS_FIELD, tests.DB_NAME) def tearDown(self): del self.stub_transilien def _stop_orly(self, access_time=0): loc = LocationContextType() loc.Position = LocationStructure(Latitude=48.743411, Longitude=2.402955) loc.AccessTime = timedelta(minutes=access_time) loc.PlaceTypeId = "stop_area:DUA:SA:4:57" return loc def _stop_choisy(self, access_time=0): loc = LocationContextType() loc.Position = LocationStructure(Latitude=48.765177, Longitude=2.410013) loc.AccessTime = timedelta(minutes=access_time) loc.PlaceTypeId = "stop_area:DUA:SA:8754528" return loc def _stop_thiais(self, access_time=0): loc = LocationContextType() loc.Position = LocationStructure(Latitude=48.76577965, Longitude=2.392136794) loc.AccessTime = timedelta(minutes=access_time) loc.PlaceTypeId = "stop_area:DUA:SA:4:126" return loc def _stop_morillons(self, access_time=0): loc = LocationContextType() loc.Position = LocationStructure(Latitude=48.731742, Longitude=2.432025) loc.AccessTime = timedelta(minutes=access_time) loc.PlaceTypeId = "stop_area:DUA:SA:4:141" return loc def test_get_stops(self): stops = self.stub_transilien.get_stops() self.assertEquals(len(stops), 10000) def test_get_earliest_location(self): # geographic order best_arrival, distance, duration = self.stub_transilien._get_earliest_location\ (self._stop_choisy(), [self._stop_morillons(), self._stop_orly(), self._stop_thiais()]) self.assertEqual(best_arrival.PlaceTypeId, 'stop_area:DUA:SA:4:126') #Thiais self.assertEqual(round(distance), 743.0) self.assertEqual(duration, timedelta()) best_arrival, distance, duration = self.stub_transilien._get_earliest_location\ (self._stop_choisy(), [self._stop_morillons(), self._stop_orly()]) self.assertEqual(best_arrival.PlaceTypeId, 'stop_area:DUA:SA:4:57') #Orly self.assertEqual(round(distance), 2476.0) self.assertEqual(duration, timedelta(minutes=2)) best_arrival, distance, duration = self.stub_transilien._get_earliest_location\ (self._stop_choisy(), [self._stop_morillons()]) self.assertEqual(best_arrival.PlaceTypeId, 'stop_area:DUA:SA:4:141') #Morillons self.assertEqual(round(distance), 4055.0) self.assertEqual(duration, timedelta(minutes=4)) # test with inactive access_time best_arrival, distance, duration = self.stub_transilien._get_earliest_location\ (self._stop_choisy(3), [self._stop_morillons(), self._stop_orly(), self._stop_thiais()]) self.assertEqual(best_arrival.PlaceTypeId, 'stop_area:DUA:SA:4:126') #Thiais self.assertEqual(round(distance), 743.0) self.assertEqual(duration, timedelta()) # test with active access_time on arrival best_arrival, distance, duration = self.stub_transilien._get_earliest_location\ (self._stop_choisy(3), [self._stop_morillons(), self._stop_orly(), self._stop_thiais(3)]) self.assertEqual(best_arrival.PlaceTypeId, 'stop_area:DUA:SA:4:57') #Orly self.assertEqual(round(distance), 2476.0) self.assertEqual(duration, timedelta(minutes=2)) if __name__ == '__main__': tests.drop_db() unittest.main()
agpl-3.0
-1,395,001,047,732,850,000
42.636364
108
0.668287
false
spring-week-topos/nova-week
nova/tests/api/openstack/compute/test_limits.py
12
35781
# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests dealing with HTTP rate-limiting. """ import httplib import StringIO from xml.dom import minidom from lxml import etree import webob from nova.api.openstack.compute import limits from nova.api.openstack.compute import views from nova.api.openstack import xmlutil import nova.context from nova.openstack.common import jsonutils from nova import test from nova.tests.api.openstack import fakes from nova.tests import matchers from nova import utils TEST_LIMITS = [ limits.Limit("GET", "/delayed", "^/delayed", 1, utils.TIME_UNITS['MINUTE']), limits.Limit("POST", "*", ".*", 7, utils.TIME_UNITS['MINUTE']), limits.Limit("POST", "/servers", "^/servers", 3, utils.TIME_UNITS['MINUTE']), limits.Limit("PUT", "*", "", 10, utils.TIME_UNITS['MINUTE']), limits.Limit("PUT", "/servers", "^/servers", 5, utils.TIME_UNITS['MINUTE']), ] NS = { 'atom': 'http://www.w3.org/2005/Atom', 'ns': 'http://docs.openstack.org/common/api/v1.0' } class BaseLimitTestSuite(test.NoDBTestCase): """Base test suite which provides relevant stubs and time abstraction.""" def setUp(self): super(BaseLimitTestSuite, self).setUp() self.time = 0.0 self.stubs.Set(limits.Limit, "_get_time", self._get_time) self.absolute_limits = {} def stub_get_project_quotas(context, project_id, usages=True): return dict((k, dict(limit=v)) for k, v in self.absolute_limits.items()) self.stubs.Set(nova.quota.QUOTAS, "get_project_quotas", stub_get_project_quotas) def _get_time(self): """Return the "time" according to this test suite.""" return self.time class LimitsControllerTest(BaseLimitTestSuite): """Tests for `limits.LimitsController` class.""" def setUp(self): """Run before each test.""" super(LimitsControllerTest, self).setUp() self.controller = limits.create_resource() self.ctrler = limits.LimitsController() def _get_index_request(self, accept_header="application/json"): """Helper to set routing arguments.""" request = webob.Request.blank("/") request.accept = accept_header request.environ["wsgiorg.routing_args"] = (None, { "action": "index", "controller": "", }) context = nova.context.RequestContext('testuser', 'testproject') request.environ["nova.context"] = context return request def _populate_limits(self, request): """Put limit info into a request.""" _limits = [ limits.Limit("GET", "*", ".*", 10, 60).display(), limits.Limit("POST", "*", ".*", 5, 60 * 60).display(), limits.Limit("GET", "changes-since*", "changes-since", 5, 60).display(), ] request.environ["nova.limits"] = _limits return request def test_empty_index_json(self): # Test getting empty limit details in JSON. request = self._get_index_request() response = request.get_response(self.controller) expected = { "limits": { "rate": [], "absolute": {}, }, } body = jsonutils.loads(response.body) self.assertEqual(expected, body) def test_index_json(self): # Test getting limit details in JSON. request = self._get_index_request() request = self._populate_limits(request) self.absolute_limits = { 'ram': 512, 'instances': 5, 'cores': 21, 'key_pairs': 10, 'floating_ips': 10, 'security_groups': 10, 'security_group_rules': 20, } response = request.get_response(self.controller) expected = { "limits": { "rate": [ { "regex": ".*", "uri": "*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00Z", "unit": "MINUTE", "value": 10, "remaining": 10, }, { "verb": "POST", "next-available": "1970-01-01T00:00:00Z", "unit": "HOUR", "value": 5, "remaining": 5, }, ], }, { "regex": "changes-since", "uri": "changes-since*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00Z", "unit": "MINUTE", "value": 5, "remaining": 5, }, ], }, ], "absolute": { "maxTotalRAMSize": 512, "maxTotalInstances": 5, "maxTotalCores": 21, "maxTotalKeypairs": 10, "maxTotalFloatingIps": 10, "maxSecurityGroups": 10, "maxSecurityGroupRules": 20, }, }, } body = jsonutils.loads(response.body) self.assertEqual(expected, body) def _populate_limits_diff_regex(self, request): """Put limit info into a request.""" _limits = [ limits.Limit("GET", "*", ".*", 10, 60).display(), limits.Limit("GET", "*", "*.*", 10, 60).display(), ] request.environ["nova.limits"] = _limits return request def test_index_diff_regex(self): # Test getting limit details in JSON. request = self._get_index_request() request = self._populate_limits_diff_regex(request) response = request.get_response(self.controller) expected = { "limits": { "rate": [ { "regex": ".*", "uri": "*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00Z", "unit": "MINUTE", "value": 10, "remaining": 10, }, ], }, { "regex": "*.*", "uri": "*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00Z", "unit": "MINUTE", "value": 10, "remaining": 10, }, ], }, ], "absolute": {}, }, } body = jsonutils.loads(response.body) self.assertEqual(expected, body) def _test_index_absolute_limits_json(self, expected): request = self._get_index_request() response = request.get_response(self.controller) body = jsonutils.loads(response.body) self.assertEqual(expected, body['limits']['absolute']) def test_index_ignores_extra_absolute_limits_json(self): self.absolute_limits = {'unknown_limit': 9001} self._test_index_absolute_limits_json({}) def test_index_absolute_ram_json(self): self.absolute_limits = {'ram': 1024} self._test_index_absolute_limits_json({'maxTotalRAMSize': 1024}) def test_index_absolute_cores_json(self): self.absolute_limits = {'cores': 17} self._test_index_absolute_limits_json({'maxTotalCores': 17}) def test_index_absolute_instances_json(self): self.absolute_limits = {'instances': 19} self._test_index_absolute_limits_json({'maxTotalInstances': 19}) def test_index_absolute_metadata_json(self): # NOTE: both server metadata and image metadata are overloaded # into metadata_items self.absolute_limits = {'metadata_items': 23} expected = { 'maxServerMeta': 23, 'maxImageMeta': 23, } self._test_index_absolute_limits_json(expected) def test_index_absolute_injected_files(self): self.absolute_limits = { 'injected_files': 17, 'injected_file_content_bytes': 86753, } expected = { 'maxPersonality': 17, 'maxPersonalitySize': 86753, } self._test_index_absolute_limits_json(expected) def test_index_absolute_security_groups(self): self.absolute_limits = { 'security_groups': 8, 'security_group_rules': 16, } expected = { 'maxSecurityGroups': 8, 'maxSecurityGroupRules': 16, } self._test_index_absolute_limits_json(expected) def test_limit_create(self): req = fakes.HTTPRequest.blank('/v2/fake/limits') self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.create, req, {}) def test_limit_delete(self): req = fakes.HTTPRequest.blank('/v2/fake/limits') self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.delete, req, 1) def test_limit_detail(self): req = fakes.HTTPRequest.blank('/v2/fake/limits') self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.detail, req) def test_limit_show(self): req = fakes.HTTPRequest.blank('/v2/fake/limits') self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.show, req, 1) def test_limit_update(self): req = fakes.HTTPRequest.blank('/v2/fake/limits') self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.update, req, 1, {}) class MockLimiter(limits.Limiter): pass class LimitMiddlewareTest(BaseLimitTestSuite): """Tests for the `limits.RateLimitingMiddleware` class.""" @webob.dec.wsgify def _empty_app(self, request): """Do-nothing WSGI app.""" pass def setUp(self): """Prepare middleware for use through fake WSGI app.""" super(LimitMiddlewareTest, self).setUp() _limits = '(GET, *, .*, 1, MINUTE)' self.app = limits.RateLimitingMiddleware(self._empty_app, _limits, "%s.MockLimiter" % self.__class__.__module__) def test_limit_class(self): # Test that middleware selected correct limiter class. self.assertIsInstance(self.app._limiter, MockLimiter) def test_good_request(self): # Test successful GET request through middleware. request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(200, response.status_int) def test_limited_request_json(self): # Test a rate-limited (429) GET request through middleware. request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(200, response.status_int) request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(response.status_int, 429) self.assertIn('Retry-After', response.headers) retry_after = int(response.headers['Retry-After']) self.assertAlmostEqual(retry_after, 60, 1) body = jsonutils.loads(response.body) expected = "Only 1 GET request(s) can be made to * every minute." value = body["overLimit"]["details"].strip() self.assertEqual(value, expected) self.assertIn("retryAfter", body["overLimit"]) retryAfter = body["overLimit"]["retryAfter"] self.assertEqual(retryAfter, "60") def test_limited_request_xml(self): # Test a rate-limited (429) response as XML. request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(200, response.status_int) request = webob.Request.blank("/") request.accept = "application/xml" response = request.get_response(self.app) self.assertEqual(response.status_int, 429) root = minidom.parseString(response.body).childNodes[0] expected = "Only 1 GET request(s) can be made to * every minute." self.assertIsNotNone(root.attributes.getNamedItem("retryAfter")) retryAfter = root.attributes.getNamedItem("retryAfter").value self.assertEqual(retryAfter, "60") details = root.getElementsByTagName("details") self.assertEqual(details.length, 1) value = details.item(0).firstChild.data.strip() self.assertEqual(value, expected) class LimitTest(BaseLimitTestSuite): """Tests for the `limits.Limit` class.""" def test_GET_no_delay(self): # Test a limit handles 1 GET per second. limit = limits.Limit("GET", "*", ".*", 1, 1) delay = limit("GET", "/anything") self.assertIsNone(delay) self.assertEqual(0, limit.next_request) self.assertEqual(0, limit.last_request) def test_GET_delay(self): # Test two calls to 1 GET per second limit. limit = limits.Limit("GET", "*", ".*", 1, 1) delay = limit("GET", "/anything") self.assertIsNone(delay) delay = limit("GET", "/anything") self.assertEqual(1, delay) self.assertEqual(1, limit.next_request) self.assertEqual(0, limit.last_request) self.time += 4 delay = limit("GET", "/anything") self.assertIsNone(delay) self.assertEqual(4, limit.next_request) self.assertEqual(4, limit.last_request) class ParseLimitsTest(BaseLimitTestSuite): """Tests for the default limits parser in the in-memory `limits.Limiter` class. """ def test_invalid(self): # Test that parse_limits() handles invalid input correctly. self.assertRaises(ValueError, limits.Limiter.parse_limits, ';;;;;') def test_bad_rule(self): # Test that parse_limits() handles bad rules correctly. self.assertRaises(ValueError, limits.Limiter.parse_limits, 'GET, *, .*, 20, minute') def test_missing_arg(self): # Test that parse_limits() handles missing args correctly. self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, 20)') def test_bad_value(self): # Test that parse_limits() handles bad values correctly. self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, foo, minute)') def test_bad_unit(self): # Test that parse_limits() handles bad units correctly. self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, 20, lightyears)') def test_multiple_rules(self): # Test that parse_limits() handles multiple rules correctly. try: l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);' '(PUT, /foo*, /foo.*, 10, hour);' '(POST, /bar*, /bar.*, 5, second);' '(Say, /derp*, /derp.*, 1, day)') except ValueError as e: assert False, str(e) # Make sure the number of returned limits are correct self.assertEqual(len(l), 4) # Check all the verbs... expected = ['GET', 'PUT', 'POST', 'SAY'] self.assertEqual([t.verb for t in l], expected) # ...the URIs... expected = ['*', '/foo*', '/bar*', '/derp*'] self.assertEqual([t.uri for t in l], expected) # ...the regexes... expected = ['.*', '/foo.*', '/bar.*', '/derp.*'] self.assertEqual([t.regex for t in l], expected) # ...the values... expected = [20, 10, 5, 1] self.assertEqual([t.value for t in l], expected) # ...and the units... expected = [utils.TIME_UNITS['MINUTE'], utils.TIME_UNITS['HOUR'], utils.TIME_UNITS['SECOND'], utils.TIME_UNITS['DAY']] self.assertEqual([t.unit for t in l], expected) class LimiterTest(BaseLimitTestSuite): """Tests for the in-memory `limits.Limiter` class.""" def setUp(self): """Run before each test.""" super(LimiterTest, self).setUp() userlimits = {'limits.user3': '', 'limits.user0': '(get, *, .*, 4, minute);' '(put, *, .*, 2, minute)'} self.limiter = limits.Limiter(TEST_LIMITS, **userlimits) def _check(self, num, verb, url, username=None): """Check and yield results from checks.""" for x in xrange(num): yield self.limiter.check_for_delay(verb, url, username)[0] def _check_sum(self, num, verb, url, username=None): """Check and sum results from checks.""" results = self._check(num, verb, url, username) return sum(item for item in results if item) def test_no_delay_GET(self): """Simple test to ensure no delay on a single call for a limit verb we didn"t set. """ delay = self.limiter.check_for_delay("GET", "/anything") self.assertEqual(delay, (None, None)) def test_no_delay_PUT(self): # Simple test to ensure no delay on a single call for a known limit. delay = self.limiter.check_for_delay("PUT", "/anything") self.assertEqual(delay, (None, None)) def test_delay_PUT(self): """Ensure the 11th PUT will result in a delay of 6.0 seconds until the next request will be granced. """ expected = [None] * 10 + [6.0] results = list(self._check(11, "PUT", "/anything")) self.assertEqual(expected, results) def test_delay_POST(self): """Ensure the 8th POST will result in a delay of 6.0 seconds until the next request will be granced. """ expected = [None] * 7 results = list(self._check(7, "POST", "/anything")) self.assertEqual(expected, results) expected = 60.0 / 7.0 results = self._check_sum(1, "POST", "/anything") self.assertAlmostEqual(expected, results, 8) def test_delay_GET(self): # Ensure the 11th GET will result in NO delay. expected = [None] * 11 results = list(self._check(11, "GET", "/anything")) self.assertEqual(expected, results) expected = [None] * 4 + [15.0] results = list(self._check(5, "GET", "/foo", "user0")) self.assertEqual(expected, results) def test_delay_PUT_servers(self): """Ensure PUT on /servers limits at 5 requests, and PUT elsewhere is still OK after 5 requests...but then after 11 total requests, PUT limiting kicks in. """ # First 6 requests on PUT /servers expected = [None] * 5 + [12.0] results = list(self._check(6, "PUT", "/servers")) self.assertEqual(expected, results) # Next 5 request on PUT /anything expected = [None] * 4 + [6.0] results = list(self._check(5, "PUT", "/anything")) self.assertEqual(expected, results) def test_delay_PUT_wait(self): """Ensure after hitting the limit and then waiting for the correct amount of time, the limit will be lifted. """ expected = [None] * 10 + [6.0] results = list(self._check(11, "PUT", "/anything")) self.assertEqual(expected, results) # Advance time self.time += 6.0 expected = [None, 6.0] results = list(self._check(2, "PUT", "/anything")) self.assertEqual(expected, results) def test_multiple_delays(self): # Ensure multiple requests still get a delay. expected = [None] * 10 + [6.0] * 10 results = list(self._check(20, "PUT", "/anything")) self.assertEqual(expected, results) self.time += 1.0 expected = [5.0] * 10 results = list(self._check(10, "PUT", "/anything")) self.assertEqual(expected, results) expected = [None] * 2 + [30.0] * 8 results = list(self._check(10, "PUT", "/anything", "user0")) self.assertEqual(expected, results) def test_user_limit(self): # Test user-specific limits. self.assertEqual(self.limiter.levels['user3'], []) self.assertEqual(len(self.limiter.levels['user0']), 2) def test_multiple_users(self): # Tests involving multiple users. # User0 expected = [None] * 2 + [30.0] * 8 results = list(self._check(10, "PUT", "/anything", "user0")) self.assertEqual(expected, results) # User1 expected = [None] * 10 + [6.0] * 10 results = list(self._check(20, "PUT", "/anything", "user1")) self.assertEqual(expected, results) # User2 expected = [None] * 10 + [6.0] * 5 results = list(self._check(15, "PUT", "/anything", "user2")) self.assertEqual(expected, results) # User3 expected = [None] * 20 results = list(self._check(20, "PUT", "/anything", "user3")) self.assertEqual(expected, results) self.time += 1.0 # User1 again expected = [5.0] * 10 results = list(self._check(10, "PUT", "/anything", "user1")) self.assertEqual(expected, results) self.time += 1.0 # User1 again expected = [4.0] * 5 results = list(self._check(5, "PUT", "/anything", "user2")) self.assertEqual(expected, results) # User0 again expected = [28.0] results = list(self._check(1, "PUT", "/anything", "user0")) self.assertEqual(expected, results) self.time += 28.0 expected = [None, 30.0] results = list(self._check(2, "PUT", "/anything", "user0")) self.assertEqual(expected, results) class WsgiLimiterTest(BaseLimitTestSuite): """Tests for `limits.WsgiLimiter` class.""" def setUp(self): """Run before each test.""" super(WsgiLimiterTest, self).setUp() self.app = limits.WsgiLimiter(TEST_LIMITS) def _request_data(self, verb, path): """Get data describing a limit request verb/path.""" return jsonutils.dumps({"verb": verb, "path": path}) def _request(self, verb, url, username=None): """Make sure that POSTing to the given url causes the given username to perform the given action. Make the internal rate limiter return delay and make sure that the WSGI app returns the correct response. """ if username: request = webob.Request.blank("/%s" % username) else: request = webob.Request.blank("/") request.method = "POST" request.body = self._request_data(verb, url) response = request.get_response(self.app) if "X-Wait-Seconds" in response.headers: self.assertEqual(response.status_int, 403) return response.headers["X-Wait-Seconds"] self.assertEqual(response.status_int, 204) def test_invalid_methods(self): # Only POSTs should work. requests = [] for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]: request = webob.Request.blank("/", method=method) response = request.get_response(self.app) self.assertEqual(response.status_int, 405) def test_good_url(self): delay = self._request("GET", "/something") self.assertIsNone(delay) def test_escaping(self): delay = self._request("GET", "/something/jump%20up") self.assertIsNone(delay) def test_response_to_delays(self): delay = self._request("GET", "/delayed") self.assertIsNone(delay) delay = self._request("GET", "/delayed") self.assertEqual(delay, '60.00') def test_response_to_delays_usernames(self): delay = self._request("GET", "/delayed", "user1") self.assertIsNone(delay) delay = self._request("GET", "/delayed", "user2") self.assertIsNone(delay) delay = self._request("GET", "/delayed", "user1") self.assertEqual(delay, '60.00') delay = self._request("GET", "/delayed", "user2") self.assertEqual(delay, '60.00') class FakeHttplibSocket(object): """Fake `httplib.HTTPResponse` replacement.""" def __init__(self, response_string): """Initialize new `FakeHttplibSocket`.""" self._buffer = StringIO.StringIO(response_string) def makefile(self, _mode, _other): """Returns the socket's internal buffer.""" return self._buffer class FakeHttplibConnection(object): """Fake `httplib.HTTPConnection`.""" def __init__(self, app, host): """Initialize `FakeHttplibConnection`.""" self.app = app self.host = host def request(self, method, path, body="", headers=None): """Requests made via this connection actually get translated and routed into our WSGI app, we then wait for the response and turn it back into an `httplib.HTTPResponse`. """ if not headers: headers = {} req = webob.Request.blank(path) req.method = method req.headers = headers req.host = self.host req.body = body resp = str(req.get_response(self.app)) resp = "HTTP/1.0 %s" % resp sock = FakeHttplibSocket(resp) self.http_response = httplib.HTTPResponse(sock) self.http_response.begin() def getresponse(self): """Return our generated response from the request.""" return self.http_response def wire_HTTPConnection_to_WSGI(host, app): """Monkeypatches HTTPConnection so that if you try to connect to host, you are instead routed straight to the given WSGI app. After calling this method, when any code calls httplib.HTTPConnection(host) the connection object will be a fake. Its requests will be sent directly to the given WSGI app rather than through a socket. Code connecting to hosts other than host will not be affected. This method may be called multiple times to map different hosts to different apps. This method returns the original HTTPConnection object, so that the caller can restore the default HTTPConnection interface (for all hosts). """ class HTTPConnectionDecorator(object): """Wraps the real HTTPConnection class so that when you instantiate the class you might instead get a fake instance. """ def __init__(self, wrapped): self.wrapped = wrapped def __call__(self, connection_host, *args, **kwargs): if connection_host == host: return FakeHttplibConnection(app, host) else: return self.wrapped(connection_host, *args, **kwargs) oldHTTPConnection = httplib.HTTPConnection httplib.HTTPConnection = HTTPConnectionDecorator(httplib.HTTPConnection) return oldHTTPConnection class WsgiLimiterProxyTest(BaseLimitTestSuite): """Tests for the `limits.WsgiLimiterProxy` class.""" def setUp(self): """Do some nifty HTTP/WSGI magic which allows for WSGI to be called directly by something like the `httplib` library. """ super(WsgiLimiterProxyTest, self).setUp() self.app = limits.WsgiLimiter(TEST_LIMITS) self.oldHTTPConnection = ( wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app)) self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80") def test_200(self): # Successful request test. delay = self.proxy.check_for_delay("GET", "/anything") self.assertEqual(delay, (None, None)) def test_403(self): # Forbidden request test. delay = self.proxy.check_for_delay("GET", "/delayed") self.assertEqual(delay, (None, None)) delay, error = self.proxy.check_for_delay("GET", "/delayed") error = error.strip() expected = ("60.00", "403 Forbidden\n\nOnly 1 GET request(s) can be " "made to /delayed every minute.") self.assertEqual((delay, error), expected) def tearDown(self): # restore original HTTPConnection object httplib.HTTPConnection = self.oldHTTPConnection super(WsgiLimiterProxyTest, self).tearDown() class LimitsViewBuilderTest(test.NoDBTestCase): def setUp(self): super(LimitsViewBuilderTest, self).setUp() self.view_builder = views.limits.ViewBuilder() self.rate_limits = [{"URI": "*", "regex": ".*", "value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226}, {"URI": "*/servers", "regex": "^/servers", "value": 50, "verb": "POST", "remaining": 10, "unit": "DAY", "resetTime": 1311272226}] self.absolute_limits = {"metadata_items": 1, "injected_files": 5, "injected_file_content_bytes": 5} def test_build_limits(self): expected_limits = {"limits": { "rate": [{ "uri": "*", "regex": ".*", "limit": [{"value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "next-available": "2011-07-21T18:17:06Z"}]}, {"uri": "*/servers", "regex": "^/servers", "limit": [{"value": 50, "verb": "POST", "remaining": 10, "unit": "DAY", "next-available": "2011-07-21T18:17:06Z"}]}], "absolute": {"maxServerMeta": 1, "maxImageMeta": 1, "maxPersonality": 5, "maxPersonalitySize": 5}}} output = self.view_builder.build(self.rate_limits, self.absolute_limits) self.assertThat(output, matchers.DictMatches(expected_limits)) def test_build_limits_empty_limits(self): expected_limits = {"limits": {"rate": [], "absolute": {}}} abs_limits = {} rate_limits = [] output = self.view_builder.build(rate_limits, abs_limits) self.assertThat(output, matchers.DictMatches(expected_limits)) class LimitsXMLSerializationTest(test.NoDBTestCase): def test_xml_declaration(self): serializer = limits.LimitsTemplate() fixture = {"limits": { "rate": [], "absolute": {}}} output = serializer.serialize(fixture) has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>") self.assertTrue(has_dec) def test_index(self): serializer = limits.LimitsTemplate() fixture = { "limits": { "rate": [{ "uri": "*", "regex": ".*", "limit": [{ "value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "next-available": "2011-12-15T22:42:45Z"}]}, {"uri": "*/servers", "regex": "^/servers", "limit": [{ "value": 50, "verb": "POST", "remaining": 10, "unit": "DAY", "next-available": "2011-12-15T22:42:45Z"}]}], "absolute": {"maxServerMeta": 1, "maxImageMeta": 1, "maxPersonality": 5, "maxPersonalitySize": 10240}}} output = serializer.serialize(fixture) root = etree.XML(output) xmlutil.validate_schema(root, 'limits') #verify absolute limits absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS) self.assertEqual(len(absolutes), 4) for limit in absolutes: name = limit.get('name') value = limit.get('value') self.assertEqual(value, str(fixture['limits']['absolute'][name])) #verify rate limits rates = root.xpath('ns:rates/ns:rate', namespaces=NS) self.assertEqual(len(rates), 2) for i, rate in enumerate(rates): for key in ['uri', 'regex']: self.assertEqual(rate.get(key), str(fixture['limits']['rate'][i][key])) rate_limits = rate.xpath('ns:limit', namespaces=NS) self.assertEqual(len(rate_limits), 1) for j, limit in enumerate(rate_limits): for key in ['verb', 'value', 'remaining', 'unit', 'next-available']: self.assertEqual(limit.get(key), str(fixture['limits']['rate'][i]['limit'][j][key])) def test_index_no_limits(self): serializer = limits.LimitsTemplate() fixture = {"limits": { "rate": [], "absolute": {}}} output = serializer.serialize(fixture) root = etree.XML(output) xmlutil.validate_schema(root, 'limits') #verify absolute limits absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS) self.assertEqual(len(absolutes), 0) #verify rate limits rates = root.xpath('ns:rates/ns:rate', namespaces=NS) self.assertEqual(len(rates), 0)
apache-2.0
-5,357,864,286,038,564,000
35.436864
79
0.532433
false
jswope00/GAI
lms/envs/acceptance.py
2
6284
""" This config file extends the test environment configuration so that we can run the lettuce acceptance tests. """ # We intentionally define lots of variables that aren't used, and # want to import all variables from base settings files # pylint: disable=W0401, W0614 from .test import * from .sauce import * # You need to start the server in debug mode, # otherwise the browser will not render the pages correctly DEBUG = True SITE_NAME = 'localhost:{}'.format(LETTUCE_SERVER_PORT) # Output Django logs to a file import logging logging.basicConfig(filename=TEST_ROOT / "log" / "lms_acceptance.log", level=logging.ERROR) import os from random import choice import string def seed(): return os.getppid() # Suppress error message "Cannot determine primary key of logged in user" # from track.middleware that gets triggered when using an auto_auth workflow # This is an ERROR level warning so we need to set the threshold at CRITICAL logging.getLogger('track.middleware').setLevel(logging.CRITICAL) # Use the mongo store for acceptance tests DOC_STORE_CONFIG = { 'host': 'localhost', 'db': 'acceptance_xmodule', 'collection': 'acceptance_modulestore_%s' % seed(), } modulestore_options = { 'default_class': 'xmodule.hidden_module.HiddenDescriptor', 'fs_root': TEST_ROOT / "data", 'render_template': 'edxmako.shortcuts.render_to_string', } MODULESTORE = { 'default': { 'ENGINE': 'xmodule.modulestore.mixed.MixedModuleStore', 'OPTIONS': { 'mappings': {}, 'stores': { 'default': { 'ENGINE': 'xmodule.modulestore.mongo.MongoModuleStore', 'DOC_STORE_CONFIG': DOC_STORE_CONFIG, 'OPTIONS': modulestore_options } } } } } MODULESTORE['direct'] = MODULESTORE['default'] CONTENTSTORE = { 'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore', 'DOC_STORE_CONFIG': { 'host': 'localhost', 'db': 'acceptance_xcontent_%s' % seed(), } } # Set this up so that rake lms[acceptance] and running the # harvest command both use the same (test) database # which they can flush without messing up your dev db DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': TEST_ROOT / "db" / "test_edx.db", 'TEST_NAME': TEST_ROOT / "db" / "test_edx.db", } } TRACKING_BACKENDS.update({ 'mongo': { 'ENGINE': 'track.backends.mongodb.MongoBackend' } }) # Enable asset pipeline # Our fork of django-pipeline uses `PIPELINE` instead of `PIPELINE_ENABLED` # PipelineFinder is explained here: http://django-pipeline.readthedocs.org/en/1.1.24/storages.html PIPELINE = True STATICFILES_FINDERS += ('pipeline.finders.PipelineFinder', ) BULK_EMAIL_DEFAULT_FROM_EMAIL = "[email protected]" # Forums are disabled in test.py to speed up unit tests, but we do not have # per-test control for lettuce acceptance tests. # If you are writing an acceptance test that needs the discussion service enabled, # do not write it in lettuce, but instead write it using bok-choy. # DO NOT CHANGE THIS SETTING HERE. FEATURES['ENABLE_DISCUSSION_SERVICE'] = False # Use the auto_auth workflow for creating users and logging them in FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] = True # Enable fake payment processing page FEATURES['ENABLE_PAYMENT_FAKE'] = True # Enable email on the instructor dash FEATURES['ENABLE_INSTRUCTOR_EMAIL'] = True FEATURES['REQUIRE_COURSE_EMAIL_AUTH'] = False # Don't actually send any requests to Software Secure for student identity # verification. FEATURES['AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'] = True # Configure the payment processor to use the fake processing page # Since both the fake payment page and the shoppingcart app are using # the same settings, we can generate this randomly and guarantee # that they are using the same secret. RANDOM_SHARED_SECRET = ''.join( choice(string.letters + string.digits + string.punctuation) for x in range(250) ) CC_PROCESSOR['CyberSource']['SHARED_SECRET'] = RANDOM_SHARED_SECRET CC_PROCESSOR['CyberSource']['MERCHANT_ID'] = "edx" CC_PROCESSOR['CyberSource']['SERIAL_NUMBER'] = "0123456789012345678901" CC_PROCESSOR['CyberSource']['PURCHASE_ENDPOINT'] = "/shoppingcart/payment_fake" # HACK # Setting this flag to false causes imports to not load correctly in the lettuce python files # We do not yet understand why this occurs. Setting this to true is a stopgap measure USE_I18N = True FEATURES['ENABLE_FEEDBACK_SUBMISSION'] = True FEEDBACK_SUBMISSION_EMAIL = '[email protected]' # Include the lettuce app for acceptance testing, including the 'harvest' django-admin command INSTALLED_APPS += ('lettuce.django',) LETTUCE_APPS = ('courseware', 'instructor',) # Lettuce appears to have a bug that causes it to search # `instructor_task` when we specify the `instructor` app. # This causes some pretty cryptic errors as lettuce tries # to parse files in `instructor_task` as features. # As a quick workaround, explicitly exclude the `instructor_task` app. LETTUCE_AVOID_APPS = ('instructor_task',) LETTUCE_BROWSER = os.environ.get('LETTUCE_BROWSER', 'chrome') # Where to run: local, saucelabs, or grid LETTUCE_SELENIUM_CLIENT = os.environ.get('LETTUCE_SELENIUM_CLIENT', 'local') SELENIUM_GRID = { 'URL': 'http://127.0.0.1:4444/wd/hub', 'BROWSER': LETTUCE_BROWSER, } ##################################################################### # See if the developer has any local overrides. try: from .private import * # pylint: disable=F0401 except ImportError: pass # Because an override for where to run will affect which ports to use, # set these up after the local overrides. # Configure XQueue interface to use our stub XQueue server XQUEUE_INTERFACE = { "url": "http://127.0.0.1:{0:d}".format(XQUEUE_PORT), "django_auth": { "username": "lms", "password": "***REMOVED***" }, "basic_auth": ('anant', 'agarwal'), } # Point the URL used to test YouTube availability to our stub YouTube server YOUTUBE['API'] = 'youtube.com/iframe_api' YOUTUBE['TEST_URL'] = "127.0.0.1:{0}/test_youtube/".format(YOUTUBE_PORT) YOUTUBE['TEXT_API']['url'] = "127.0.0.1:{0}/test_transcripts_youtube/".format(YOUTUBE_PORT)
agpl-3.0
8,061,897,684,150,476,000
32.604278
98
0.695576
false
justbrowsing/justbrowsing-adeskbar
src/adesk/0bar.py
1
33894
# -*- coding: utf-8 -*- # python modules import os import sys # extra modules import gtk import cairo import gobject import traceback try: import keybinder KEYBINDER_PRESENT = True except: KEYBINDER_PRESENT = False # adeskbar modules import ui import barconf import config import desktop import core import draw #~ import rundialog #~ import terminal #~ from draw import * ID_CMD, ID_ICON, ID_NAME = 0, 1, 2 ## Only for debugging : False / True DEBUG = 0 ## Icon theme ICON_THEME = gtk.icon_theme_get_default() class BarManager(): """ class App - main bar config/function """ def __init__(self, cfg_file): self.cfg_file = cfg_file ## Init some var. self.plg_mgr = None self.tooltip = None self.bar_conf = None self.win = None self.init_flag = False self.bar_hidden = False self.mouse_over = False self.can_hide = True self.last_event_time = None if desktop.HAS_WNCK: self.wnck = desktop.Wnck(self) else: self.wnck = None ## Load user/default config self.load_config() self.create_menu() self.init_bar_callback() #~ self.rundialog = rundialog.RunDialog(self) #~ self.terminal = terminal.Terminal(self) ## global keybind #~ gobject.timeout_add(2000, self.set_keybind) def set_keybind(self): keystr = "<Super>r" ret = keybinder.bind(keystr, self.keybinder_callback, self.rundialog) print 'retour keybind :', ret #~ keystr = "<Super>space" #~ keybinder.bind(keystr, self.keybinder_callback, self.terminal) def keybinder_callback(self, user_data): user_data.toggle() def create_bar(self): """ create and configure gtk.Window (bar) """ core.logINFO('create_bar', 'bar') self.win = ui.Window() self.win.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DOCK) self.win.set_title("ADeskBar") self.win.set_name("ADeskBar") self.is_composited = self.win.is_composited() self.set_geometry() def set_geometry(self): if self.cfg['fixed_mode']: screen_width, screen_height = gtk.gdk.screen_width(), gtk.gdk.screen_height() padding = max(self.cfg['padding'], self.cfg['icon_size'] * self.cfg['zoom_factor'] - self.cfg['icon_size']) min_size = int(padding + self.cfg['padding'] + self.cfg['icon_size']) if self.cfg['position'] == "bottom" or self.cfg['position'] == "top": req_size = int(screen_width * self.cfg['fixed_size']/100.0) self.win.set_geometry_hints(None, min_width=req_size, min_height=min_size, max_width=req_size, max_height=min_size, base_width=-1, base_height=-1, width_inc=-1, height_inc=-1, min_aspect=-1.0, max_aspect=-1.0) else: req_size = int(screen_height * self.cfg['fixed_size']/100.0) self.win.set_geometry_hints(None, min_width=min_size, min_height=req_size, max_width=min_size, max_height=req_size, base_width=-1, base_height=-1, width_inc=-1, height_inc=-1, min_aspect=-1.0, max_aspect=-1.0) else: self.win.set_geometry_hints(None, min_width=-1, min_height=-1, max_width=-1, max_height=-1, base_width=-1, base_height=-1, width_inc=-1, height_inc=-1, min_aspect=-1.0, max_aspect=-1.0) def init_bar_callback(self): ## Window callback self.win.connect("button_press_event", self.bar_released) self.win.connect("leave-notify-event", self.bar_leave_notify) self.win.connect("enter-notify-event", self.bar_enter_notify) self.win.connect('expose-event', self.expose) self.win.connect('screen-changed', self.reposition) self.win.connect('size-allocate', self.win_size_allocate) self.win.connect("realize", self.update_strut) self.win.connect("composited-changed", self.composite_changed) def composite_changed(self, widget): self.is_composited = self.win.is_composited() self.update_all() def update_strut(self, widget): """ """ # window need to be realize before change strut if widget.window == None: return # reset struct widget.window.property_change("_NET_WM_STRUT", "CARDINAL", 32, gtk.gdk.PROP_MODE_REPLACE, [0,0,0,0]) # only set strut if "panel" mode if not (self.cfg['fixed_mode'] and self.cfg['reserve_space']): return x, y, w, h = widget.get_allocation() if self.cfg['position'] == "bottom" or self.cfg['position'] == "top": h = self.cfg['icon_size'] + 2*self.cfg['padding'] else: w = self.cfg['icon_size'] + 2*self.cfg['padding'] if self.cfg['auto_hide'] and self.bar_hidden: if self.cfg['position'] == "bottom" or self.cfg['position'] == "top": h = self.cfg['hidden_size'] else: w = self.cfg['hidden_size'] if self.cfg['position'] == "bottom": if not self.bar_hidden and not self.cfg['bar_style'] == 0: h += self.cfg['offset_pos'] widget.window.property_change("_NET_WM_STRUT", "CARDINAL", 32, gtk.gdk.PROP_MODE_REPLACE, [0,0,0,h]) elif self.cfg['position'] == "top": if not self.bar_hidden and not self.cfg['bar_style'] == 0: h += self.cfg['offset_pos'] widget.window.property_change("_NET_WM_STRUT", "CARDINAL", 32, gtk.gdk.PROP_MODE_REPLACE, [0,0,h,0]) elif self.cfg['position'] == "left": if not self.bar_hidden and not self.cfg['bar_style'] == 0: w += self.cfg['offset_pos'] widget.window.property_change("_NET_WM_STRUT", "CARDINAL", 32, gtk.gdk.PROP_MODE_REPLACE, [w,0,0,0]) elif self.cfg['position'] == "right": if not self.bar_hidden and not self.cfg['bar_style'] == 0: w += self.cfg['offset_pos'] widget.window.property_change("_NET_WM_STRUT", "CARDINAL", 32, gtk.gdk.PROP_MODE_REPLACE, [0,w,0,0]) def win_size_allocate(self, widget, allocation): self.init_bar_pos() self.bar_move() def restart(self, widget=None): self.win.hide() for index in self.plg_mgr.plugins: self.plg_mgr.plugins[index].destroy() self.win.destroy() self.load_config() def create_menu(self): ## Edit preferences self.popupMenu = gtk.Menu() menuPopup = gtk.ImageMenuItem(gtk.STOCK_PREFERENCES) menuPopup.connect("activate", self.edit_config) self.popupMenu.add(menuPopup) ## Quit (really ?) menuPopup = gtk.ImageMenuItem(gtk.STOCK_QUIT) menuPopup.connect("activate", self.doquit) self.popupMenu.add(menuPopup) self.popupMenu.show_all() def resize_and_seticon(self, data=None): # resize and update icon for all plugins for ind in self.plg_mgr.plugins: self.plg_mgr.plugins[ind].resize() self.plg_mgr.plugins[ind].set_icon() self.plg_mgr.plugins[ind].restart() self.update_all() ## call from timer .. return False def load_config(self): self.cfg, self.launcher, self.drawer = config.read(self.cfg_file) ## If intellihide and wnck loaded if self.cfg['auto_hide'] == 2 and not self.wnck: # no wnck module ? fallback to autohide core.logINFO('intellihide : no wnck module found .. fallback to autohide', 'bar') self.cfg['auto_hide'] = 1 self.wnck = None self.zoom_size = self.cfg['icon_size'] * self.cfg['zoom_factor'] * 1.0 # timer for leave_bar callback self.timer_auto_hide = None # timer for smooth_hidding self.timer_smooth_hide = None # use for animate hiding self.moving = False self.count = 0 self.countdown = 0 self.timer_anim = None # middle click - Toggle always visible self.always_visible = False # launcher ( for mouseover/click ) self.focus = None self.widget_pressed = False self.anim = 1 self.fade = True self.anim_cpt = 0 self.anim_flag = True # flag for plugin self.opened_popup = None self.lock_auto_hide = False ## convert color hex->rgb self.cfg['bg_color_rgb'] = core.hex2rgb(self.cfg['background_color']) self.cfg['border_color_rgb'] = core.hex2rgb(self.cfg['border_color']) self.cfg['bg_color_sub_rgb'] = core.hex2rgb(self.cfg['bg_color_sub']) self.cfg['border_color_sub_rgb'] = core.hex2rgb(self.cfg['border_color_sub']) self.cfg['bg_gradient_color_rgb'] = core.hex2rgb(self.cfg['background_gradient_color']) self.pixbuf_glow = gtk.gdk.pixbuf_new_from_file('images/pixmaps/button.png') self.pixbuf_pressed = gtk.gdk.pixbuf_new_from_file('images/pixmaps/launcher.png') ## Create main bar self.create_bar() self.set_below_or_above() ## tooltip if self.tooltip: self.tooltip.destroy() self.tooltip = None if self.cfg['tooltips']: self.tooltip = ui.TooltipWindow(self) # create a new plugin manager self.plg_mgr = PluginManager(self) # and start to pack plugin .. for ind in self.cfg['ind_launcher']: self.plg_mgr.append(ind, self.launcher[ind]) self.plg_mgr.run() # start bar callback self.init_bar_callback() ## FIXME! ## gtk.Window doesn't stick after reload config ?! self.win.realize() self.win.stick() self.reposition() self.win.show_all() # init all plugins self.plg_mgr.on_init() ## FIXME!! # sometimes reposition doesn't work :/ .. quick hack gobject.timeout_add(500, self.reposition) if DEBUG and not 1: for index in self.plg_mgr.index: print ('------------------------------------') for val in self.plg_mgr.plugins[index].settings: print ('%s = %s' % (val, self.plg_mgr.plugins[index].settings[val])) print ('widget :', self.plg_mgr.plugins[index]) print ('index :', self.plg_mgr.plugins[index].index) print ('------------------------------------\n') def set_below_or_above(self): if self.cfg['keep_below']: self.win.set_keep_below(True) self.win.set_keep_above(False) else: self.win.set_keep_above(True) self.win.set_keep_below(False) def reposition(self): if self.cfg['fixed_mode']: screen_width, screen_height = gtk.gdk.screen_width(), gtk.gdk.screen_height() if self.cfg['position'] == "bottom" or self.cfg['position'] == "top": req_size = int(screen_width * self.cfg['fixed_size']/100.0) self.win.resize(req_size, 1) else: req_size = int(screen_height * self.cfg['fixed_size']/100.0) self.win.resize(1, req_size) else: self.win.resize(1, 1) self.bar_move() self.toggle_hidden() # Intellihide if self.wnck: self.check_window_state() self.update() return False def expose(self, widget, event): if self.is_composited: cr = self.win.window.cairo_create() ## Full transparent window cr.set_source_rgba(0, 0, 0, 0) cr.set_operator(cairo.OPERATOR_SOURCE) cr.paint() else: self.opacity = 1 x, y, width, height = self.win.get_allocation() pixmap = gtk.gdk.Pixmap(None, width, height, 1) cr = pixmap.cairo_create() # Clear the bitmap to False cr.set_source_rgb(0, 0, 0) cr.set_operator(cairo.OPERATOR_DEST_OUT) cr.paint() ## Draw next over cr.set_operator(cairo.OPERATOR_OVER) rect = self.win.get_allocation() cr.set_source_rgb(1, 1, 1) if self.cfg['bar_style'] == 0: # Edgy draw.rounded_rect2(cr, rect, self.cfg['rounded_corner'], self.cfg['position'], fill=True) elif self.cfg['bar_style'] == 1: # Floaty draw.rounded_rect(cr, rect, self.cfg['rounded_corner'], self.cfg['position'], fill=True) else: draw.rounded_rect(cr, rect, 0, self.cfg['position'], fill=True) self.win.shape_combine_mask(pixmap, 0, 0) cr = self.win.window.cairo_create() if (self.bar_hidden and self.cfg['fade_hidden']) or not self.init_flag: return False ## Draw next over 'transparent window' cr.set_operator(cairo.OPERATOR_OVER) ## paint background cr.set_source_surface(self.bg_surface, 0, 0) cr.paint() if DEBUG: x, y, width, height = self.win.get_allocation() cr.set_source_rgb(1, 0.2, 0.2) cr.set_line_width(1) cr.rectangle(x, y, width, height) cr.stroke() x, y, width, height = self.plg_mgr.box.get_allocation() cr.set_source_rgb(0.2, 1, 0.2) cr.set_line_width(1) cr.rectangle(x, y, width, height) cr.stroke() return False def draw_bg(self): if self.cfg['bar_style'] == 3 and not self.is_composited: self.cfg['bar_style'] == 2 if not self.is_composited: self.opacity = 1 else: self.opacity = self.cfg['opacity']/100.0 cr = cairo.Context(self.bg_surface) cr.set_source_rgba(0, 0, 0, 0) cr.set_operator(cairo.OPERATOR_SOURCE) cr.paint() cr.set_operator(cairo.OPERATOR_OVER) cr.set_line_width(1) if self.bar_hidden or not self.is_composited: rect = self.win.get_allocation() else: rect = self.draw_x, self.draw_y, self.draw_width, self.draw_height cr.save() r, g, b = self.cfg['bg_color_rgb'] cr.set_source_rgba(r, g, b, self.opacity) if self.cfg['bar_style'] == 0: # Edgy draw.rounded_rect2(cr, rect, self.cfg['rounded_corner'], self.cfg['position'], fill=True) elif self.cfg['bar_style'] == 1: # Floaty draw.rounded_rect(cr, rect, self.cfg['rounded_corner'], self.cfg['position'], fill=True) elif self.cfg['bar_style'] == 2: # 3d draw.trapeze(cr, rect, self.cfg['rounded_corner'], self.cfg['position'], fill=True) if self.cfg['bg_gradient']: r1, g1, b2 = self.cfg['bg_gradient_color_rgb'] lg = draw.gradient_color2trans(r1, g1, b2, rect, self.opacity, self.cfg['position'], invert=False) cr.set_source(lg) if self.cfg['bar_style'] == 0: # Edgy draw.rounded_rect2(cr, rect, self.cfg['rounded_corner'], self.cfg['position'], fill=True) elif self.cfg['bar_style'] == 1: # Floaty draw.rounded_rect(cr, rect, self.cfg['rounded_corner'], self.cfg['position'], fill=True) elif self.cfg['bar_style'] == 2: # 3d draw.trapeze(cr, rect, self.cfg['rounded_corner'], self.cfg['position'], fill=True) if self.cfg['show_border']: r, g, b = self.cfg['border_color_rgb'] cr.set_source_rgba(r, g, b, self.opacity) rect = rect[0]+1, rect[1]+1, rect[2]-2, rect[3]-2 if self.cfg['bar_style'] == 0: # Edgy draw.rounded_rect2(cr, rect, self.cfg['rounded_corner'], self.cfg['position']) elif self.cfg['bar_style'] == 1: # Floaty draw.rounded_rect(cr, rect, self.cfg['rounded_corner'], self.cfg['position']) elif self.cfg['bar_style'] == 2: # 3d draw.trapeze(cr, rect, self.cfg['rounded_corner'], self.cfg['position']) def init_bar_pos(self): self.bar_width , self.bar_height = self.win.get_size() screen_width, screen_height = gtk.gdk.screen_width(), gtk.gdk.screen_height() if not self.is_composited: bar_size = 1 else: bar_size = self.cfg['bar_size']/100.0 if self.cfg['position'] == "bottom": if self.cfg['bar_style'] == 0: self.bar_pos_y = screen_height - self.bar_height + 1 else: self.bar_pos_y = screen_height - self.bar_height - self.cfg['offset_pos'] if self.cfg['align'] == "start": self.bar_pos_x = 0 + self.cfg['offset_align'] elif self.cfg['align'] == "center": self.bar_pos_x = ( screen_width - self.bar_width ) // 2 elif self.cfg['align'] == "end": self.bar_pos_x = screen_width - self.bar_width - self.cfg['offset_align'] self.bar_hide_y = screen_height - self.cfg['hidden_size'] self.bar_hide_x = self.bar_pos_x ## for expose self.draw_height = (2*self.cfg['padding']+self.cfg['icon_size'])*bar_size self.draw_width = self.bar_width self.draw_x = 0 self.draw_y = self.bar_height - (2*self.cfg['padding']+self.cfg['icon_size'])*bar_size elif self.cfg['position'] == "top": if self.cfg['bar_style'] == 0: self.bar_pos_y = -1 else: self.bar_pos_y = self.cfg['offset_pos'] if self.cfg['align'] == "start": self.bar_pos_x = self.cfg['offset_align'] elif self.cfg['align'] == "center": self.bar_pos_x = ( screen_width - self.bar_width ) // 2 elif self.cfg['align'] == "end": self.bar_pos_x = screen_width - self.bar_width - self.cfg['offset_align'] self.bar_hide_y = self.cfg['hidden_size'] - self.bar_height self.bar_hide_x = self.bar_pos_x ## for expose self.draw_height = (2*self.cfg['padding']+self.cfg['icon_size'])*bar_size self.draw_width = self.bar_width self.draw_x, self.draw_y = 0, 0 elif self.cfg['position'] == "left": if self.cfg['bar_style'] == 0: self.bar_pos_x = -1 else: self.bar_pos_x = self.cfg['offset_pos'] if self.cfg['align'] == "start": self.bar_pos_y = 0 + self.cfg['offset_align'] elif self.cfg['align'] == "center": self.bar_pos_y = (screen_height - self.bar_height) // 2 elif self.cfg['align'] == "end": self.bar_pos_y = screen_height - self.bar_height - self.cfg['offset_align'] self.bar_hide_y = self.bar_pos_y self.bar_hide_x = - self.bar_width + self.cfg['hidden_size'] ## for expose self.draw_height = self.bar_height self.draw_width = (2*self.cfg['padding']+self.cfg['icon_size'])*bar_size self.draw_x, self.draw_y = 0, 0 elif self.cfg['position'] == "right": if self.cfg['bar_style'] == 0: self.bar_pos_x = screen_width - self.bar_width +1 else: self.bar_pos_x = screen_width - self.bar_width - self.cfg['offset_pos'] if self.cfg['align'] == "start": self.bar_pos_y = 0 + self.cfg['offset_align'] elif self.cfg['align'] == "center": self.bar_pos_y = (screen_height - self.bar_height) // 2 elif self.cfg['align'] == "end": self.bar_pos_y = screen_height - self.bar_height - self.cfg['offset_align'] self.bar_hide_y = self.bar_pos_y self.bar_hide_x = screen_width - self.cfg['hidden_size'] ## for expose self.draw_height = self.bar_height self.draw_width = (2*self.cfg['padding']+self.cfg['icon_size'])*bar_size self.draw_x = self.bar_width - (2*self.cfg['padding']+self.cfg['icon_size'])*bar_size self.draw_y = 0 self.draw_width = int(self.draw_width) self.draw_height = int(self.draw_height) self.draw_x = int(self.draw_x) self.draw_y = int(self.draw_y) self.bg_surface = cairo.ImageSurface (cairo.FORMAT_ARGB32, self.bar_width , self.bar_height) self.draw_bg() self.init_flag = True def launcher_leave_notify(self, plugin, event): plugin.focus = False self.anim_flag = True self.update() if self.cfg['tooltips']: self.tooltip.stop() def widget_enter_notify(self, plugin, event): plugin.focus = True ## tooltip if plugin.has_tooltip and self.cfg['tooltips']: self.tooltip.run(plugin) self.anim = 1 self.anim_cpt = 0 self.bar_enter_notify() self.update() return True def widget_press(self, widget, event): if event.button==1: widget.is_pressed = True self.update() if self.cfg['tooltips']: self.tooltip.stop() if event.button==2: return False def widget_released(self, widget, event): if event.button==1: widget.onClick(widget, event) widget.is_pressed = False self.update() if event.button==2: return False def update(self): self.win.queue_draw() return True def update_all(self): self.init_bar_pos() self.set_geometry() self.reposition() self.draw_bg() self.update() def check_window_state(self): if not self.init_flag: return if not (self.cfg['auto_hide'] == 2 and self.wnck): return if self.wnck.current_state and not self.bar_hidden and not self.always_visible: self.bar_hide() elif self.bar_hidden and not self.wnck.current_state: self.bar_hidden = False self.bar_move() def bar_move(self): if self.bar_hidden: self.win.move(self.bar_hide_x, self.bar_hide_y) else: self.win.move(self.bar_pos_x, self.bar_pos_y) self.update() self.update_strut(self.win) def toggle_hidden(self, widget=None , event=None): if self.bar_hidden: self.bar_hidden = False self.bar_move() elif self.cfg['auto_hide'] == 1: # autohide self.bar_hide() elif self.cfg['auto_hide'] == 2: # intellihide self.check_window_state() def bar_hide(self): if not self.can_hide: return if self.cfg['smooth_hide']: self.count = 14 ## 500 / 35 ms self.countdown = self.count self.moving = True self.timer_smooth_hide = gobject.timeout_add(35, self.on_timeout_hide) else: self.bar_hidden = True self.bar_move() def on_timeout_hide(self): self.countdown -= 1 N = self.count n = self.countdown x = self.bar_pos_x + ( (self.bar_hide_x - self.bar_pos_x) // N ) * (N-n) if self.cfg['position'] == "top": y = ( (self.bar_hide_y - self.bar_pos_y ) / N ) * (N-n) else: y = self.bar_pos_y + ( (self.bar_hide_y - self.bar_pos_y ) // N ) * (N-n) self.win.move(x, y) if self.countdown <= 0: self.bar_hidden = True self.bar_move() self.moving = False return False else: return True def bar_leave_notify(self, widget=None, event=None): if not self.timer_auto_hide == None: gobject.source_remove(self.timer_auto_hide) self.timer_auto_hide = None if (self.cfg['auto_hide'] == 1 or self.wnck) and self.can_hide and not self.always_visible: if self.cfg['timer'] == 0: ## minimum time because bar auto-hide it-self :( if self.cfg['offset_pos'] > 0: self.timer_auto_hide = gobject.timeout_add(500, self.on_timeout_notify) else: self.timer_auto_hide = gobject.timeout_add(100, self.on_timeout_notify) else: self.timer_auto_hide = gobject.timeout_add(self.cfg['timer']*1000, self.on_timeout_notify) self.focus = None self.mouse_over = False self.update() if self.cfg['tooltips']: self.tooltip.stop() return True def bar_enter_notify(self, widget=None, event=None): if self.cfg['auto_raise'] and self.bar_hidden: self.toggle_hidden() if not self.timer_auto_hide == None: gobject.source_remove(self.timer_auto_hide) self.timer_auto_hide = None if self.moving: self.moving = False self.bar_hidden = True gobject.source_remove(self.timer_smooth_hide) self.timer_smooth_hide = None self.toggle_hidden() self.mouse_over = True def bar_released(self, widget, event): ## FIXME! avoid double callback (I don't know why I receive twice) if self.last_event_time == event.time: return False self.last_event_time = event.time if event.button==3: # right click #~ if event.state == gtk.gdk.CONTROL_MASK | gtk.gdk.MOD2_MASK: self.popupMenu.popup(None, None, None, event.button, event.time) elif event.button==2: # middle click self.always_visible = not self.always_visible elif event.button==1 and self.bar_hidden: # left click self.toggle_hidden() def on_timeout_notify(self): ## autohide if self.cfg['auto_hide'] == 1 and not self.bar_hidden: self.toggle_hidden() ## intellihide elif self.wnck: self.check_window_state() if self.timer_auto_hide: gobject.source_remove(self.timer_auto_hide) self.timer_auto_hide = None return False def edit_config(self, widget): if not self.bar_conf: self.bar_conf = barconf.Conf(self) else: self.bar_conf.window.present() def doquit(self, widget=None, data=None): ## FIXME!! what to do now ? try to close adeskbar nicely .. self.win.hide() for ind in self.plg_mgr.plugins: self.plg_mgr.plugins[ind].stop() self.win.destroy() gtk.main_quit() def run(self): try: gtk.main() except KeyboardInterrupt: # ctrl-c ## FIXME!! what to do now ? try to close adeskbar nicely .. self.doquit() class PluginManager: """ Class AppManager - load/resize plugins for main bar """ def __init__( self, bar ): """ configure container for plugins """ self.bar = bar self.index = [] self.plugins = {} if bar.cfg['position'] == "top" or bar.cfg['position'] == "bottom": self.box = gtk.HBox(False, bar.cfg['icon_space']) else: self.box = gtk.VBox(False, bar.cfg['icon_space']) self.spacer_left_top = gtk.EventBox() self.spacer_left_bottom = gtk.EventBox() self.spacer_right = gtk.EventBox() if not DEBUG: self.spacer_left_top.set_visible_window(False) self.spacer_left_bottom.set_visible_window(False) self.spacer_right.set_visible_window(False) self.table = gtk.Table(3, 3, False) self.table.set_row_spacings(0) self.table.set_col_spacings(0) self.table.attach(self.spacer_left_top, 0, 1, 0, 1, xoptions=gtk.SHRINK, yoptions=gtk.SHRINK) self.table.attach(self.spacer_left_bottom, 0, 1, 2, 3, xoptions=gtk.SHRINK, yoptions=gtk.SHRINK) self.table.attach(self.spacer_right, 2, 3, 0, 1, xoptions=gtk.SHRINK, yoptions=gtk.SHRINK) if self.bar.cfg['fixed_mode']: self.table.attach(self.box, 1, 2, 1, 2, xoptions=gtk.EXPAND|gtk.FILL, yoptions=gtk.EXPAND|gtk.FILL) else: self.table.attach(self.box, 1, 2, 1, 2, xoptions=gtk.SHRINK, yoptions=gtk.SHRINK) bar.win.add(self.table) self.resize_spacer() self.table.show_all() self.box_alloc = self.box.get_allocation() def remove(self, index): self.index.remove(index) self.plugins[index].hide() self.plugins[index].destroy() self.plugins.pop(index) self.bar.reposition() def reorder(self, widget, position): self.box.reorder_child(widget, position) def on_init(self): for index in self.plugins: self.plugins[index].on_init() def run(self): #~ if not self.bar.cfg['fixed_mode']: self.box.connect('size-allocate', self.box_size_allocate) def box_size_allocate(self, widget, allocation): """ resize to minimum size and reposition """ if not self.box_alloc == allocation: self.box.set_size_request(-1, -1) self.bar.win.resize(1, 1) gobject.idle_add(self.bar.reposition) self.box_alloc = allocation def resize_spacer(self): """ configure main bar aspect from config """ cfg = self.bar.cfg padding = cfg['padding'] size, zoom_f, space = cfg['icon_size'], cfg['zoom_factor'], cfg['icon_space'] offset_top = max( padding, int(size * zoom_f - size) ) offset_side = 2*padding if cfg['position']=='bottom': self.spacer_left_top.set_size_request(offset_side, offset_top) self.spacer_left_bottom.set_size_request(offset_side, padding) self.spacer_right.set_size_request(offset_side, padding) elif cfg['position']=='top': self.spacer_left_top.set_size_request(offset_side, padding) self.spacer_left_bottom.set_size_request(offset_side, offset_top) self.spacer_right.set_size_request(offset_side, padding) elif cfg['position']=='left': self.spacer_left_top.set_size_request(padding, offset_side) self.spacer_left_bottom.set_size_request(padding, offset_side) self.spacer_right.set_size_request(offset_top, offset_side) elif cfg['position']=='right': self.spacer_left_top.set_size_request(offset_top, offset_side) self.spacer_left_bottom.set_size_request(offset_top, offset_side) self.spacer_right.set_size_request(padding, offset_side) def load_plugin(self, p, settings=None): """ load plugin as widget """ try: core.logINFO(("Loading plugin '%s' ..") % (p), 'bar') exec("import plugins.%s as plugin" % p) widget = plugin.Plugin(self.bar, settings) except Exception as e: core.logINFO(("EE : Unable to load plugin '%s': %s") % (p, e), 'bar') traceback.print_exc() return None return widget def append(self, index, settings): """ append plugin (widget) to main bar """ is_plugin = False is_separator = False if len(settings['cmd']) > 1 and settings['cmd'][0] == '@': is_plugin = True if settings['cmd'][1:] == 'separator': is_separator = True elif settings['cmd'][1:] == 'drawer': if index in self.bar.drawer: settings['launcher'] = self.bar.drawer[index] else: settings['launcher'] = {} widget = self.load_plugin(settings['cmd'][1:], settings) else: widget = self.load_plugin('launcher', settings) if widget: # load OK widget.tooltip = settings['name'] widget.index = index if widget.can_show_icon: widget.set_icon(settings['icon'], is_separator) widget.resize() widget.connect("button-release-event", self.bar.widget_released) widget.connect("button-press-event", self.bar.widget_press) widget.connect("enter-notify-event", self.bar.widget_enter_notify) widget.connect("leave-notify-event", self.bar.launcher_leave_notify) widget.show() if settings['cmd'][1:] == 'tasklist': self.box.pack_start(widget, True, True) else: self.box.pack_start(widget, False, False) self.index.append(index) self.plugins[index] = widget return widget else: return None def set_orientation(self): if self.bar.cfg['position'] == "top" or self.bar.cfg['position'] == "bottom": self.box.set_orientation(gtk.ORIENTATION_HORIZONTAL) else: self.box.set_orientation(gtk.ORIENTATION_VERTICAL) def set_panel_mode(self): self.table.remove(self.box) if self.bar.cfg['fixed_mode']: self.table.attach(self.box, 1, 2, 1, 2, xoptions=gtk.EXPAND|gtk.FILL, yoptions=gtk.EXPAND|gtk.FILL) else: self.table.attach(self.box, 1, 2, 1, 2, xoptions=gtk.SHRINK, yoptions=gtk.SHRINK)
gpl-3.0
-2,067,622,214,144,890,400
35.602592
225
0.549773
false
obulpathi/reversecoin
misc/tools/bitcoin_genesis.py
2
4836
#!/usr/bin/env python import binascii from bitcoin.core import COutPoint, CTxIn, CTxOut, CTransaction, CBlock coinbase = "04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73" scriptPubKeyHex = "4104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac" # construct previous out point previousOut = COutPoint() previousOut.hash = 0 previousOut.n = 4294967295 # construct txin txin = CTxIn() txin.coinbase = binascii.unhexlify(coinbase) txin.scriptSig = binascii.unhexlify(coinbase) txin.prevout = previousOut # construct txout txout = CTxOut() txout.nValue = 5000000000 txout.scriptPubKey = binascii.unhexlify(scriptPubKeyHex) # create transaction tx = CTransaction() tx.vin.append(txin) tx.vout.append(txout) tx.calc_sha256() print tx print "Transaction: ", tx.is_valid() print "hash: ", hex(tx.sha256) print "Hash: ", "0x4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b" block = CBlock() block.nVersion = 1 block.hashPrevBlock = 0 block.hashMerkleRoot = 0x4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b block.nTime = 1231006505 block.nBits = 486604799 # 0x1d00ffff block.nNonce = 2083236893 block.vtx = [tx] block.calc_sha256() print "Calculated hash: ", hex(block.sha256) print " >>>>>>>>>>>>>>: ", "0x000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f" # #print block.is_valid() genesis = binascii.hexlify(block.serialize()) print "Version: ", genesis[:8] print "Previous block: ", genesis[8:72] print "Merkle root: ", genesis[72:136] print "Match : ", "3BA3EDFD7A7B12B27AC72C3E67768F617FC81BC3888A51323A9FB8AA4B1E5E4A" print "Time stamp: ", genesis[136:144] print "Match: : ", "29AB5F49" print "nBits: ", genesis[144:152] print "Match: ", "FFFF001D" print "Nonce: ", genesis[152:160] print "Match: ", "1DAC2B7C" print "# transactions: ", genesis[160:162] print "Match : ", "01" print "Version: ", genesis[162:170] print "Input: ", genesis[170:172] print "previous out:", genesis[172:244] print "Match :", "0000000000000000000000000000000000000000000000000000000000000000FFFFFFFF" print "Script length: ", genesis[244:246] print "scriptSig: ", genesis[246:400] print "match : ", "04FFFF001D0104455468652054696D65732030332F4A616E2F32303039204368616E63656C6C6F72206F6E206272696E6B206F66207365636F6E64206261696C6F757420666F722062616E6B73" print "sequence: ", genesis[400:408] print "match : ", "ffffffff" print "outputs: ", genesis[408:410] print "nValue:", genesis[410:426] print "match: ", "00F2052A01000000" print "script length: ", genesis[426:428] print "out put script: ", genesis[428:562] print "match: ", "4104678AFDB0FE5548271967F1A67130B7105CD6A828E03909A67962E0EA1F61DEB649F6BC3F4CEF38C4F35504E51EC112DE5C384DF7BA0B8D578A4C702B6BF11D5FAC" print "lock time : ", genesis[562:570] blkchain = open('genesis.dat', 'wb') magic = "bef9d9b4011d0000" magic = 'f9beb4d91d010000' blkchain.write(binascii.unhexlify(magic + genesis)) blkchain.close() """ 4D - script length 04FFFF001D0104455468652054696D65732030332F4A616E2F32303039204368616E63656C6C6F72206F6E206272696E6B206F66207365636F6E64206261696C6F757420666F722062616E6B73 - scriptsig FFFFFFFF - sequence 01 - outputs 00F2052A01000000 - 50 BTC 43 - pk_script length 4104678AFDB0FE5548271967F1A67130B7105CD6A828E03909A67962E0EA1F61DEB649F6BC3F4CEF38C4F35504E51EC112DE5C384DF7BA0B8D578A4C702B6BF11D5FAC - pk_script 00000000 - lock time """ """ { "hash":"000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f", "ver":1, "prev_block":"0000000000000000000000000000000000000000000000000000000000000000", "mrkl_root":"4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b", "time":1231006505, "bits":486604799, "nonce":2083236893, "n_tx":1, "size":285, "tx":[ { "hash":"4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b", "ver":1, "vin_sz":1, "vout_sz":1, "lock_time":0, "size":204, "in":[ { "prev_out":{ "hash":"0000000000000000000000000000000000000000000000000000000000000000", "n":4294967295 }, "coinbase":"04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73" } ], "out":[ { "value":"50.00000000", "scriptPubKey":"04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f OP_CHECKSIG" } ] } ], "mrkl_tree":[ "4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b" ] } """
gpl-2.0
-7,545,839,946,766,482,000
33.791367
177
0.766956
false
electrolinux/weblate
weblate/trans/views/source.py
9
4827
# -*- coding: utf-8 -*- # # Copyright © 2012 - 2015 Michal Čihař <[email protected]> # # This file is part of Weblate <http://weblate.org/> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from django.http import Http404 from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from django.core.exceptions import PermissionDenied from django.contrib.auth.decorators import login_required from django.shortcuts import render, redirect, get_object_or_404 from django.utils.translation import ugettext as _ from django.views.decorators.http import require_POST from django.contrib import messages from urllib import urlencode from weblate.trans.views.helper import get_subproject from weblate.trans.models import Translation, Source from weblate.trans.forms import PriorityForm, CheckFlagsForm from weblate.trans.permissions import can_edit_flags, can_edit_priority def get_source(request, project, subproject): """ Returns first translation in subproject (this assumes all have same source strings). """ obj = get_subproject(request, project, subproject) try: return obj, obj.translation_set.all()[0] except (Translation.DoesNotExist, IndexError): raise Http404('No translation exists in this component.') def review_source(request, project, subproject): """ Listing of source strings to review. """ obj, source = get_source(request, project, subproject) # Grab search type and page number rqtype = request.GET.get('type', 'all') limit = request.GET.get('limit', 50) page = request.GET.get('page', 1) checksum = request.GET.get('checksum', '') ignored = 'ignored' in request.GET expand = False query_string = {'type': rqtype} if ignored: query_string['ignored'] = 'true' # Filter units: if checksum: sources = source.unit_set.filter(checksum=checksum) expand = True else: sources = source.unit_set.filter_type(rqtype, source, ignored) paginator = Paginator(sources, limit) try: sources = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. sources = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. sources = paginator.page(paginator.num_pages) return render( request, 'source-review.html', { 'object': obj, 'source': source, 'page_obj': sources, 'query_string': urlencode(query_string), 'ignored': ignored, 'expand': expand, 'title': _('Review source strings in %s') % obj.__unicode__(), } ) def show_source(request, project, subproject): """ Show source strings summary and checks. """ obj, source = get_source(request, project, subproject) return render( request, 'source.html', { 'object': obj, 'source': source, 'title': _('Source strings in %s') % obj.__unicode__(), } ) @require_POST @login_required def edit_priority(request, pk): """ Change source string priority. """ source = get_object_or_404(Source, pk=pk) if not can_edit_priority(request.user, source.subproject.project): raise PermissionDenied() form = PriorityForm(request.POST) if form.is_valid(): source.priority = form.cleaned_data['priority'] source.save() else: messages.error(request, _('Failed to change a priority!')) return redirect(request.POST.get('next', source.get_absolute_url())) @require_POST @login_required def edit_check_flags(request, pk): """ Change source string check flags. """ source = get_object_or_404(Source, pk=pk) if not can_edit_flags(request.user, source.subproject.project): raise PermissionDenied() form = CheckFlagsForm(request.POST) if form.is_valid(): source.check_flags = form.cleaned_data['flags'] source.save() else: messages.error(request, _('Failed to change check flags!')) return redirect(request.POST.get('next', source.get_absolute_url()))
gpl-3.0
-6,867,921,458,512,830,000
30.529412
76
0.664801
false
savi-dev/nova
nova/db/sqlalchemy/migrate_repo/versions/101_security_group_instance_association_uses_uuid.py
8
2540
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # Copyright 2012 Michael Still and Canonical Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate import ForeignKeyConstraint from sqlalchemy import MetaData, String, Table from sqlalchemy import select, Column, ForeignKey, Integer from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine sgia = Table('security_group_instance_association', meta, autoload=True) instances = Table('instances', meta, autoload=True) uuid_column = Column('instance_uuid', String(36)) uuid_column.create(sgia) try: sgia.update().values( instance_uuid=select( [instances.c.uuid], instances.c.id == sgia.c.instance_id) ).execute() except Exception: uuid_column.drop() raise fkeys = list(sgia.c.instance_id.foreign_keys) if fkeys: try: fkey_name = fkeys[0].constraint.name ForeignKeyConstraint( columns=[sgia.c.instance_id], refcolumns=[instances.c.id], name=fkey_name).drop() except Exception: LOG.error(_("foreign key constraint couldn't be removed")) raise sgia.c.instance_id.drop() def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine sgia = Table('security_group_instance_association', meta, autoload=True) instances = Table('instances', meta, autoload=True) id_column = Column('instance_id', Integer, ForeignKey('instances.id')) id_column.create(sgia) try: sgia.update().values( instance_id=select( [instances.c.id], instances.c.uuid == sgia.c.instance_uuid) ).execute() except Exception: id_column.drop() raise sgia.c.instance_uuid.drop()
apache-2.0
2,779,751,424,503,015,400
30.75
78
0.648031
false
INNUENDOCON/INNUca
src/SPAdes-3.10.1-Linux/share/spades/joblib2/format_stack.py
18
16314
""" Represent an exception with a lot of information. Provides 2 useful functions: format_exc: format an exception into a complete traceback, with full debugging instruction. format_outer_frames: format the current position in the stack call. Adapted from IPython's VerboseTB. """ # Authors: Gael Varoquaux < gael dot varoquaux at normalesup dot org > # Nathaniel Gray <[email protected]> # Fernando Perez <[email protected]> # Copyright: 2010, Gael Varoquaux # 2001-2004, Fernando Perez # 2001 Nathaniel Gray # License: BSD 3 clause import inspect import keyword import linecache import os import pydoc import sys import time import tokenize import traceback import types try: # Python 2 generate_tokens = tokenize.generate_tokens except AttributeError: # Python 3 generate_tokens = tokenize.tokenize PY3 = (sys.version[0] == '3') INDENT = ' ' * 8 ############################################################################### # some internal-use functions def safe_repr(value): """Hopefully pretty robust repr equivalent.""" # this is pretty horrible but should always return *something* try: return pydoc.text.repr(value) except KeyboardInterrupt: raise except: try: return repr(value) except KeyboardInterrupt: raise except: try: # all still in an except block so we catch # getattr raising name = getattr(value, '__name__', None) if name: # ick, recursion return safe_repr(name) klass = getattr(value, '__class__', None) if klass: return '%s instance' % safe_repr(klass) except KeyboardInterrupt: raise except: return 'UNRECOVERABLE REPR FAILURE' def eq_repr(value, repr=safe_repr): return '=%s' % repr(value) ############################################################################### def uniq_stable(elems): """uniq_stable(elems) -> list Return from an iterable, a list of all the unique elements in the input, but maintaining the order in which they first appear. A naive solution to this problem which just makes a dictionary with the elements as keys fails to respect the stability condition, since dictionaries are unsorted by nature. Note: All elements in the input must be hashable. """ unique = [] unique_set = set() for nn in elems: if nn not in unique_set: unique.append(nn) unique_set.add(nn) return unique ############################################################################### def fix_frame_records_filenames(records): """Try to fix the filenames in each record from inspect.getinnerframes(). Particularly, modules loaded from within zip files have useless filenames attached to their code object, and inspect.getinnerframes() just uses it. """ fixed_records = [] for frame, filename, line_no, func_name, lines, index in records: # Look inside the frame's globals dictionary for __file__, which should # be better. better_fn = frame.f_globals.get('__file__', None) if isinstance(better_fn, str): # Check the type just in case someone did something weird with # __file__. It might also be None if the error occurred during # import. filename = better_fn fixed_records.append((frame, filename, line_no, func_name, lines, index)) return fixed_records def _fixed_getframes(etb, context=1, tb_offset=0): LNUM_POS, LINES_POS, INDEX_POS = 2, 4, 5 records = fix_frame_records_filenames(inspect.getinnerframes(etb, context)) # If the error is at the console, don't build any context, since it would # otherwise produce 5 blank lines printed out (there is no file at the # console) rec_check = records[tb_offset:] try: rname = rec_check[0][1] if rname == '<ipython console>' or rname.endswith('<string>'): return rec_check except IndexError: pass aux = traceback.extract_tb(etb) assert len(records) == len(aux) for i, (file, lnum, _, _) in enumerate(aux): maybeStart = lnum - 1 - context // 2 start = max(maybeStart, 0) end = start + context lines = linecache.getlines(file)[start:end] # pad with empty lines if necessary if maybeStart < 0: lines = (['\n'] * -maybeStart) + lines if len(lines) < context: lines += ['\n'] * (context - len(lines)) buf = list(records[i]) buf[LNUM_POS] = lnum buf[INDEX_POS] = lnum - 1 - start buf[LINES_POS] = lines records[i] = tuple(buf) return records[tb_offset:] def _format_traceback_lines(lnum, index, lines, lvals=None): numbers_width = 7 res = [] i = lnum - index for line in lines: if i == lnum: # This is the line with the error pad = numbers_width - len(str(i)) if pad >= 3: marker = '-' * (pad - 3) + '-> ' elif pad == 2: marker = '> ' elif pad == 1: marker = '>' else: marker = '' num = marker + str(i) else: num = '%*s' % (numbers_width, i) line = '%s %s' % (num, line) res.append(line) if lvals and i == lnum: res.append(lvals + '\n') i = i + 1 return res def format_records(records): # , print_globals=False): # Loop over all records printing context and info frames = [] abspath = os.path.abspath for frame, file, lnum, func, lines, index in records: #print '*** record:',file,lnum,func,lines,index # dbg try: file = file and abspath(file) or '?' except OSError: # if file is '<console>' or something not in the filesystem, # the abspath call will throw an OSError. Just ignore it and # keep the original file string. pass link = file try: args, varargs, varkw, locals = inspect.getargvalues(frame) except: # This can happen due to a bug in python2.3. We should be # able to remove this try/except when 2.4 becomes a # requirement. Bug details at http://python.org/sf/1005466 print "\nJoblib's exception reporting continues...\n" if func == '?': call = '' else: # Decide whether to include variable details or not try: call = 'in %s%s' % (func, inspect.formatargvalues(args, varargs, varkw, locals, formatvalue=eq_repr)) except KeyError: # Very odd crash from inspect.formatargvalues(). The # scenario under which it appeared was a call to # view(array,scale) in NumTut.view.view(), where scale had # been defined as a scalar (it should be a tuple). Somehow # inspect messes up resolving the argument list of view() # and barfs out. At some point I should dig into this one # and file a bug report about it. print "\nJoblib's exception reporting continues...\n" call = 'in %s(***failed resolving arguments***)' % func # Initialize a list of names on the current line, which the # tokenizer below will populate. names = [] def tokeneater(token_type, token, start, end, line): """Stateful tokeneater which builds dotted names. The list of names it appends to (from the enclosing scope) can contain repeated composite names. This is unavoidable, since there is no way to disambguate partial dotted structures until the full list is known. The caller is responsible for pruning the final list of duplicates before using it.""" # build composite names if token == '.': try: names[-1] += '.' # store state so the next token is added for x.y.z names tokeneater.name_cont = True return except IndexError: pass if token_type == tokenize.NAME and token not in keyword.kwlist: if tokeneater.name_cont: # Dotted names names[-1] += token tokeneater.name_cont = False else: # Regular new names. We append everything, the caller # will be responsible for pruning the list later. It's # very tricky to try to prune as we go, b/c composite # names can fool us. The pruning at the end is easy # to do (or the caller can print a list with repeated # names if so desired. names.append(token) elif token_type == tokenize.NEWLINE: raise IndexError # we need to store a bit of state in the tokenizer to build # dotted names tokeneater.name_cont = False def linereader(file=file, lnum=[lnum], getline=linecache.getline): line = getline(file, lnum[0]) lnum[0] += 1 return line # Build the list of names on this line of code where the exception # occurred. try: # This builds the names list in-place by capturing it from the # enclosing scope. for token in generate_tokens(linereader): tokeneater(*token) except (IndexError, UnicodeDecodeError): # signals exit of tokenizer pass except tokenize.TokenError, msg: _m = ("An unexpected error occurred while tokenizing input\n" "The following traceback may be corrupted or invalid\n" "The error message is: %s\n" % msg) print(_m) # prune names list of duplicates, but keep the right order unique_names = uniq_stable(names) # Start loop over vars lvals = [] for name_full in unique_names: name_base = name_full.split('.', 1)[0] if name_base in frame.f_code.co_varnames: if name_base in locals.keys(): try: value = repr(eval(name_full, locals)) except: value = "undefined" else: value = "undefined" name = name_full lvals.append('%s = %s' % (name, value)) #elif print_globals: # if frame.f_globals.has_key(name_base): # try: # value = repr(eval(name_full,frame.f_globals)) # except: # value = "undefined" # else: # value = "undefined" # name = 'global %s' % name_full # lvals.append('%s = %s' % (name,value)) if lvals: lvals = '%s%s' % (INDENT, ('\n%s' % INDENT).join(lvals)) else: lvals = '' level = '%s\n%s %s\n' % (75 * '.', link, call) if index is None: frames.append(level) else: frames.append('%s%s' % (level, ''.join( _format_traceback_lines(lnum, index, lines, lvals)))) return frames ############################################################################### def format_exc(etype, evalue, etb, context=5, tb_offset=0): """ Return a nice text document describing the traceback. Parameters ----------- etype, evalue, etb: as returned by sys.exc_info context: number of lines of the source file to plot tb_offset: the number of stack frame not to use (0 = use all) """ # some locals try: etype = etype.__name__ except AttributeError: pass # Header with the exception type, python version, and date pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable date = time.ctime(time.time()) pid = 'PID: %i' % os.getpid() head = '%s%s%s\n%s%s%s' % (etype, ' ' * (75 - len(str(etype)) - len(date)), date, pid, ' ' * (75 - len(str(pid)) - len(pyver)), pyver) # Flush cache before calling inspect. This helps alleviate some of the # problems with python 2.3's inspect.py. linecache.checkcache() # Drop topmost frames if requested try: records = _fixed_getframes(etb, context, tb_offset) except: raise print '\nUnfortunately, your original traceback can not be ' + \ 'constructed.\n' return '' # Get (safely) a string form of the exception info try: etype_str, evalue_str = map(str, (etype, evalue)) except: # User exception is improperly defined. etype, evalue = str, sys.exc_info()[:2] etype_str, evalue_str = map(str, (etype, evalue)) # ... and format it exception = ['%s: %s' % (etype_str, evalue_str)] if (not PY3) and type(evalue) is types.InstanceType: try: names = [w for w in dir(evalue) if isinstance(w, basestring)] except: # Every now and then, an object with funny inernals blows up # when dir() is called on it. We do the best we can to report # the problem and continue exception.append( 'Exception reporting error (object with broken dir()):' ) etype_str, evalue_str = map(str, sys.exc_info()[:2]) exception.append('%s: %s' % (etype_str, evalue_str)) names = [] for name in names: value = safe_repr(getattr(evalue, name)) exception.append('\n%s%s = %s' % (INDENT, name, value)) frames = format_records(records) return '%s\n%s\n%s' % (head, '\n'.join(frames), ''.join(exception[0])) ############################################################################### def format_outer_frames(context=5, stack_start=None, stack_end=None, ignore_ipython=True): LNUM_POS, LINES_POS, INDEX_POS = 2, 4, 5 records = inspect.getouterframes(inspect.currentframe()) output = list() for i, (frame, filename, line_no, func_name, lines, index) \ in enumerate(records): # Look inside the frame's globals dictionary for __file__, which should # be better. better_fn = frame.f_globals.get('__file__', None) if isinstance(better_fn, str): # Check the type just in case someone did something weird with # __file__. It might also be None if the error occurred during # import. filename = better_fn if filename.endswith('.pyc'): filename = filename[:-4] + '.py' if ignore_ipython: # Hack to avoid printing the interals of IPython if (os.path.basename(filename) == 'iplib.py' and func_name in ('safe_execfile', 'runcode')): break maybeStart = line_no - 1 - context // 2 start = max(maybeStart, 0) end = start + context lines = linecache.getlines(filename)[start:end] # pad with empty lines if necessary if maybeStart < 0: lines = (['\n'] * -maybeStart) + lines if len(lines) < context: lines += ['\n'] * (context - len(lines)) buf = list(records[i]) buf[LNUM_POS] = line_no buf[INDEX_POS] = line_no - 1 - start buf[LINES_POS] = lines output.append(tuple(buf)) return '\n'.join(format_records(output[stack_end:stack_start:-1]))
gpl-3.0
-5,230,174,639,504,592,000
36.331808
79
0.533223
false
devs1991/test_edx_docmode
venv/lib/python2.7/site-packages/nltk/featstruct.py
12
100854
# Natural Language Toolkit: Feature Structures # # Copyright (C) 2001-2012 NLTK Project # Author: Edward Loper <[email protected]>, # Rob Speer, # Steven Bird <[email protected]> # URL: <http://nltk.sourceforge.net> # For license information, see LICENSE.TXT """ Basic data classes for representing feature structures, and for performing basic operations on those feature structures. A feature structure is a mapping from feature identifiers to feature values, where each feature value is either a basic value (such as a string or an integer), or a nested feature structure. There are two types of feature structure, implemented by two subclasses of ``FeatStruct``: - feature dictionaries, implemented by ``FeatDict``, act like Python dictionaries. Feature identifiers may be strings or instances of the ``Feature`` class. - feature lists, implemented by ``FeatList``, act like Python lists. Feature identifiers are integers. Feature structures are typically used to represent partial information about objects. A feature identifier that is not mapped to a value stands for a feature whose value is unknown (*not* a feature without a value). Two feature structures that represent (potentially overlapping) information about the same object can be combined by unification. When two inconsistent feature structures are unified, the unification fails and returns None. Features can be specified using "feature paths", or tuples of feature identifiers that specify path through the nested feature structures to a value. Feature structures may contain reentrant feature values. A "reentrant feature value" is a single feature value that can be accessed via multiple feature paths. Unification preserves the reentrance relations imposed by both of the unified feature structures. In the feature structure resulting from unification, any modifications to a reentrant feature value will be visible using any of its feature paths. Feature structure variables are encoded using the ``nltk.sem.Variable`` class. The variables' values are tracked using a bindings dictionary, which maps variables to their values. When two feature structures are unified, a fresh bindings dictionary is created to track their values; and before unification completes, all bound variables are replaced by their values. Thus, the bindings dictionaries are usually strictly internal to the unification process. However, it is possible to track the bindings of variables if you choose to, by supplying your own initial bindings dictionary to the ``unify()`` function. When unbound variables are unified with one another, they become aliased. This is encoded by binding one variable to the other. Lightweight Feature Structures ============================== Many of the functions defined by ``nltk.featstruct`` can be applied directly to simple Python dictionaries and lists, rather than to full-fledged ``FeatDict`` and ``FeatList`` objects. In other words, Python ``dicts`` and ``lists`` can be used as "light-weight" feature structures. >>> from nltk.featstruct import unify >>> unify(dict(x=1, y=dict()), dict(a='a', y=dict(b='b'))) {'y': {'b': 'b'}, 'x': 1, 'a': 'a'} However, you should keep in mind the following caveats: - Python dictionaries & lists ignore reentrance when checking for equality between values. But two FeatStructs with different reentrances are considered nonequal, even if all their base values are equal. - FeatStructs can be easily frozen, allowing them to be used as keys in hash tables. Python dictionaries and lists can not. - FeatStructs display reentrance in their string representations; Python dictionaries and lists do not. - FeatStructs may *not* be mixed with Python dictionaries and lists (e.g., when performing unification). - FeatStructs provide a number of useful methods, such as ``walk()`` and ``cyclic()``, which are not available for Python dicts and lists. In general, if your feature structures will contain any reentrances, or if you plan to use them as dictionary keys, it is strongly recommended that you use full-fledged ``FeatStruct`` objects. """ import re, copy from nltk.sem.logic import (Variable, Expression, SubstituteBindingsI, LogicParser, ParseException) import nltk.internals ###################################################################### # Feature Structure ###################################################################### class FeatStruct(SubstituteBindingsI): """ A mapping from feature identifiers to feature values, where each feature value is either a basic value (such as a string or an integer), or a nested feature structure. There are two types of feature structure: - feature dictionaries, implemented by ``FeatDict``, act like Python dictionaries. Feature identifiers may be strings or instances of the ``Feature`` class. - feature lists, implemented by ``FeatList``, act like Python lists. Feature identifiers are integers. Feature structures may be indexed using either simple feature identifiers or 'feature paths.' A feature path is a sequence of feature identifiers that stand for a corresponding sequence of indexing operations. In particular, ``fstruct[(f1,f2,...,fn)]`` is equivalent to ``fstruct[f1][f2]...[fn]``. Feature structures may contain reentrant feature structures. A "reentrant feature structure" is a single feature structure object that can be accessed via multiple feature paths. Feature structures may also be cyclic. A feature structure is "cyclic" if there is any feature path from the feature structure to itself. Two feature structures are considered equal if they assign the same values to all features, and have the same reentrancies. By default, feature structures are mutable. They may be made immutable with the ``freeze()`` method. Once they have been frozen, they may be hashed, and thus used as dictionary keys. """ _frozen = False """:ivar: A flag indicating whether this feature structure is frozen or not. Once this flag is set, it should never be un-set; and no further modification should be made to this feature structue.""" ##//////////////////////////////////////////////////////////// #{ Constructor ##//////////////////////////////////////////////////////////// def __new__(cls, features=None, **morefeatures): """ Construct and return a new feature structure. If this constructor is called directly, then the returned feature structure will be an instance of either the ``FeatDict`` class or the ``FeatList`` class. :param features: The initial feature values for this feature structure: - FeatStruct(string) -> FeatStructParser().parse(string) - FeatStruct(mapping) -> FeatDict(mapping) - FeatStruct(sequence) -> FeatList(sequence) - FeatStruct() -> FeatDict() :param morefeatures: If ``features`` is a mapping or None, then ``morefeatures`` provides additional features for the ``FeatDict`` constructor. """ # If the FeatStruct constructor is called directly, then decide # whether to create a FeatDict or a FeatList, based on the # contents of the `features` argument. if cls is FeatStruct: if features is None: return FeatDict.__new__(FeatDict, **morefeatures) elif _is_mapping(features): return FeatDict.__new__(FeatDict, features, **morefeatures) elif morefeatures: raise TypeError('Keyword arguments may only be specified ' 'if features is None or is a mapping.') if isinstance(features, basestring): if FeatStructParser._START_FDICT_RE.match(features): return FeatDict.__new__(FeatDict, features, **morefeatures) else: return FeatList.__new__(FeatList, features, **morefeatures) elif _is_sequence(features): return FeatList.__new__(FeatList, features) else: raise TypeError('Expected string or mapping or sequence') # Otherwise, construct the object as normal. else: return super(FeatStruct, cls).__new__(cls, features, **morefeatures) ##//////////////////////////////////////////////////////////// #{ Uniform Accessor Methods ##//////////////////////////////////////////////////////////// # These helper functions allow the methods defined by FeatStruct # to treat all feature structures as mappings, even if they're # really lists. (Lists are treated as mappings from ints to vals) def _keys(self): """Return an iterable of the feature identifiers used by this FeatStruct.""" raise NotImplementedError() # Implemented by subclasses. def _values(self): """Return an iterable of the feature values directly defined by this FeatStruct.""" raise NotImplementedError() # Implemented by subclasses. def _items(self): """Return an iterable of (fid,fval) pairs, where fid is a feature identifier and fval is the corresponding feature value, for all features defined by this FeatStruct.""" raise NotImplementedError() # Implemented by subclasses. ##//////////////////////////////////////////////////////////// #{ Equality & Hashing ##//////////////////////////////////////////////////////////// def equal_values(self, other, check_reentrance=False): """ Return True if ``self`` and ``other`` assign the same value to to every feature. In particular, return true if ``self[p]==other[p]`` for every feature path *p* such that ``self[p]`` or ``other[p]`` is a base value (i.e., not a nested feature structure). :param check_reentrance: If True, then also return False if there is any difference between the reentrances of ``self`` and ``other``. :note: the ``==`` is equivalent to ``equal_values()`` with ``check_reentrance=True``. """ return self._equal(other, check_reentrance, set(), set(), set()) def __eq__(self, other): """ Return true if ``self`` and ``other`` are both feature structures, assign the same values to all features, and contain the same reentrances. I.e., return ``self.equal_values(other, check_reentrance=True)``. :see: ``equal_values()`` """ return self._equal(other, True, set(), set(), set()) def __ne__(self, other): """ Return true unless ``self`` and ``other`` are both feature structures, assign the same values to all features, and contain the same reentrances. I.e., return ``not self.equal_values(other, check_reentrance=True)``. """ return not self.__eq__(other) def __hash__(self): """ If this feature structure is frozen, return its hash value; otherwise, raise ``TypeError``. """ if not self._frozen: raise TypeError('FeatStructs must be frozen before they ' 'can be hashed.') try: return self.__hash except AttributeError: self.__hash = self._hash(set()) return self.__hash def _equal(self, other, check_reentrance, visited_self, visited_other, visited_pairs): """ Return True iff self and other have equal values. :param visited_self: A set containing the ids of all ``self`` feature structures we've already visited. :param visited_other: A set containing the ids of all ``other`` feature structures we've already visited. :param visited_pairs: A set containing ``(selfid, otherid)`` pairs for all pairs of feature structures we've already visited. """ # If we're the same object, then we're equal. if self is other: return True # If we have different classes, we're definitely not equal. if self.__class__ != other.__class__: return False # If we define different features, we're definitely not equal. # (Perform len test first because it's faster -- we should # do profiling to see if this actually helps) if len(self) != len(other): return False if set(self._keys()) != set(other._keys()): return False # If we're checking reentrance, then any time we revisit a # structure, make sure that it was paired with the same # feature structure that it is now. Note: if check_reentrance, # then visited_pairs will never contain two pairs whose first # values are equal, or two pairs whose second values are equal. if check_reentrance: if id(self) in visited_self or id(other) in visited_other: return (id(self), id(other)) in visited_pairs # If we're not checking reentrance, then we still need to deal # with cycles. If we encounter the same (self, other) pair a # second time, then we won't learn anything more by examining # their children a second time, so just return true. else: if (id(self), id(other)) in visited_pairs: return True # Keep track of which nodes we've visited. visited_self.add(id(self)) visited_other.add(id(other)) visited_pairs.add( (id(self), id(other)) ) # Now we have to check all values. If any of them don't match, # then return false. for (fname, self_fval) in self._items(): other_fval = other[fname] if isinstance(self_fval, FeatStruct): if not self_fval._equal(other_fval, check_reentrance, visited_self, visited_other, visited_pairs): return False else: if self_fval != other_fval: return False # Everything matched up; return true. return True def _hash(self, visited): """ Return a hash value for this feature structure. :require: ``self`` must be frozen. :param visited: A set containing the ids of all feature structures we've already visited while hashing. """ if id(self) in visited: return 1 visited.add(id(self)) hashval = 5831 for (fname, fval) in sorted(self._items()): hashval *= 37 hashval += hash(fname) hashval *= 37 if isinstance(fval, FeatStruct): hashval += fval._hash(visited) else: hashval += hash(fval) # Convert to a 32 bit int. hashval = int(hashval & 0x7fffffff) return hashval ##//////////////////////////////////////////////////////////// #{ Freezing ##//////////////////////////////////////////////////////////// #: Error message used by mutating methods when called on a frozen #: feature structure. _FROZEN_ERROR = "Frozen FeatStructs may not be modified." def freeze(self): """ Make this feature structure, and any feature structures it contains, immutable. Note: this method does not attempt to 'freeze' any feature value that is not a ``FeatStruct``; it is recommended that you use only immutable feature values. """ if self._frozen: return self._freeze(set()) def frozen(self): """ Return True if this feature structure is immutable. Feature structures can be made immutable with the ``freeze()`` method. Immutable feature structures may not be made mutable again, but new mutable copies can be produced with the ``copy()`` method. """ return self._frozen def _freeze(self, visited): """ Make this feature structure, and any feature structure it contains, immutable. :param visited: A set containing the ids of all feature structures we've already visited while freezing. """ if id(self) in visited: return visited.add(id(self)) self._frozen = True for (fname, fval) in sorted(self._items()): if isinstance(fval, FeatStruct): fval._freeze(visited) ##//////////////////////////////////////////////////////////// #{ Copying ##//////////////////////////////////////////////////////////// def copy(self, deep=True): """ Return a new copy of ``self``. The new copy will not be frozen. :param deep: If true, create a deep copy; if false, create a shallow copy. """ if deep: return copy.deepcopy(self) else: return self.__class__(self) # Subclasses should define __deepcopy__ to ensure that the new # copy will not be frozen. def __deepcopy__(self, memo): raise NotImplementedError() # Implemented by subclasses. ##//////////////////////////////////////////////////////////// #{ Structural Information ##//////////////////////////////////////////////////////////// def cyclic(self): """ Return True if this feature structure contains itself. """ return self._find_reentrances({})[id(self)] def walk(self): """ Return an iterator that generates this feature structure, and each feature structure it contains. Each feature structure will be generated exactly once. """ return self._walk(set()) def _walk(self, visited): """ Return an iterator that generates this feature structure, and each feature structure it contains. :param visited: A set containing the ids of all feature structures we've already visited while freezing. """ raise NotImplementedError() # Implemented by subclasses. def _walk(self, visited): if id(self) in visited: return visited.add(id(self)) yield self for fval in self._values(): if isinstance(fval, FeatStruct): for elt in fval._walk(visited): yield elt # Walk through the feature tree. The first time we see a feature # value, map it to False (not reentrant). If we see a feature # value more than once, then map it to True (reentrant). def _find_reentrances(self, reentrances): """ Return a dictionary that maps from the ``id`` of each feature structure contained in ``self`` (including ``self``) to a boolean value, indicating whether it is reentrant or not. """ if reentrances.has_key(id(self)): # We've seen it more than once. reentrances[id(self)] = True else: # This is the first time we've seen it. reentrances[id(self)] = False # Recurse to contained feature structures. for fval in self._values(): if isinstance(fval, FeatStruct): fval._find_reentrances(reentrances) return reentrances ##//////////////////////////////////////////////////////////// #{ Variables & Bindings ##//////////////////////////////////////////////////////////// def substitute_bindings(self, bindings): """:see: ``nltk.featstruct.substitute_bindings()``""" return substitute_bindings(self, bindings) def retract_bindings(self, bindings): """:see: ``nltk.featstruct.retract_bindings()``""" return retract_bindings(self, bindings) def variables(self): """:see: ``nltk.featstruct.find_variables()``""" return find_variables(self) def rename_variables(self, vars=None, used_vars=(), new_vars=None): """:see: ``nltk.featstruct.rename_variables()``""" return rename_variables(self, vars, used_vars, new_vars) def remove_variables(self): """ Return the feature structure that is obtained by deleting any feature whose value is a ``Variable``. :rtype: FeatStruct """ return remove_variables(self) ##//////////////////////////////////////////////////////////// #{ Unification ##//////////////////////////////////////////////////////////// def unify(self, other, bindings=None, trace=False, fail=None, rename_vars=True): return unify(self, other, bindings, trace, fail, rename_vars) def subsumes(self, other): """ Return True if ``self`` subsumes ``other``. I.e., return true If unifying ``self`` with ``other`` would result in a feature structure equal to ``other``. """ return subsumes(self, other) ##//////////////////////////////////////////////////////////// #{ String Representations ##//////////////////////////////////////////////////////////// def __repr__(self): """ Display a single-line representation of this feature structure, suitable for embedding in other representations. """ return self._repr(self._find_reentrances({}), {}) def _repr(self, reentrances, reentrance_ids): """ Return a string representation of this feature structure. :param reentrances: A dictionary that maps from the ``id`` of each feature value in self, indicating whether that value is reentrant or not. :param reentrance_ids: A dictionary mapping from each ``id`` of a feature value to a unique identifier. This is modified by ``repr``: the first time a reentrant feature value is displayed, an identifier is added to ``reentrance_ids`` for it. """ raise NotImplementedError() # Mutation: disable if frozen. _FROZEN_ERROR = "Frozen FeatStructs may not be modified." _FROZEN_NOTICE = "\n%sIf self is frozen, raise ValueError." def _check_frozen(method, indent=''): """ Given a method function, return a new method function that first checks if ``self._frozen`` is true; and if so, raises ``ValueError`` with an appropriate message. Otherwise, call the method and return its result. """ def wrapped(self, *args, **kwargs): if self._frozen: raise ValueError(_FROZEN_ERROR) else: return method(self, *args, **kwargs) wrapped.__name__ = method.__name__ wrapped.__doc__ = (method.__doc__ or '') + (_FROZEN_NOTICE % indent) return wrapped ###################################################################### # Feature Dictionary ###################################################################### class FeatDict(FeatStruct, dict): """ A feature structure that acts like a Python dictionary. I.e., a mapping from feature identifiers to feature values, where a feature identifier can be a string or a ``Feature``; and where a feature value can be either a basic value (such as a string or an integer), or a nested feature structure. A feature identifiers for a ``FeatDict`` is sometimes called a "feature name". Two feature dicts are considered equal if they assign the same values to all features, and have the same reentrances. :see: ``FeatStruct`` for information about feature paths, reentrance, cyclic feature structures, mutability, freezing, and hashing. """ def __init__(self, features=None, **morefeatures): """ Create a new feature dictionary, with the specified features. :param features: The initial value for this feature dictionary. If ``features`` is a ``FeatStruct``, then its features are copied (shallow copy). If ``features`` is a dict, then a feature is created for each item, mapping its key to its value. If ``features`` is a string, then it is parsed using ``FeatStructParser``. If ``features`` is a list of tuples ``(name, val)``, then a feature is created for each tuple. :param morefeatures: Additional features for the new feature dictionary. If a feature is listed under both ``features`` and ``morefeatures``, then the value from ``morefeatures`` will be used. """ if isinstance(features, basestring): FeatStructParser().parse(features, self) self.update(**morefeatures) else: # update() checks the types of features. self.update(features, **morefeatures) #//////////////////////////////////////////////////////////// #{ Dict methods #//////////////////////////////////////////////////////////// _INDEX_ERROR = "Expected feature name or path. Got %r." def __getitem__(self, name_or_path): """If the feature with the given name or path exists, return its value; otherwise, raise ``KeyError``.""" if isinstance(name_or_path, (basestring, Feature)): return dict.__getitem__(self, name_or_path) elif isinstance(name_or_path, tuple): try: val = self for fid in name_or_path: if not isinstance(val, FeatStruct): raise KeyError # path contains base value val = val[fid] return val except (KeyError, IndexError): raise KeyError(name_or_path) else: raise TypeError(self._INDEX_ERROR % name_or_path) def get(self, name_or_path, default=None): """If the feature with the given name or path exists, return its value; otherwise, return ``default``.""" try: return self[name_or_path] except KeyError: return default def __contains__(self, name_or_path): """Return true if a feature with the given name or path exists.""" try: self[name_or_path]; return True except KeyError: return False def has_key(self, name_or_path): """Return true if a feature with the given name or path exists.""" return name_or_path in self def __delitem__(self, name_or_path): """If the feature with the given name or path exists, delete its value; otherwise, raise ``KeyError``.""" if self._frozen: raise ValueError(_FROZEN_ERROR) if isinstance(name_or_path, (basestring, Feature)): return dict.__delitem__(self, name_or_path) elif isinstance(name_or_path, tuple): if len(name_or_path) == 0: raise ValueError("The path () can not be set") else: parent = self[name_or_path[:-1]] if not isinstance(parent, FeatStruct): raise KeyError(name_or_path) # path contains base value del parent[name_or_path[-1]] else: raise TypeError(self._INDEX_ERROR % name_or_path) def __setitem__(self, name_or_path, value): """Set the value for the feature with the given name or path to ``value``. If ``name_or_path`` is an invalid path, raise ``KeyError``.""" if self._frozen: raise ValueError(_FROZEN_ERROR) if isinstance(name_or_path, (basestring, Feature)): return dict.__setitem__(self, name_or_path, value) elif isinstance(name_or_path, tuple): if len(name_or_path) == 0: raise ValueError("The path () can not be set") else: parent = self[name_or_path[:-1]] if not isinstance(parent, FeatStruct): raise KeyError(name_or_path) # path contains base value parent[name_or_path[-1]] = value else: raise TypeError(self._INDEX_ERROR % name_or_path) clear = _check_frozen(dict.clear) pop = _check_frozen(dict.pop) popitem = _check_frozen(dict.popitem) setdefault = _check_frozen(dict.setdefault) def update(self, features=None, **morefeatures): if self._frozen: raise ValueError(_FROZEN_ERROR) if features is None: items = () elif hasattr(features, 'has_key'): items = features.items() elif hasattr(features, '__iter__'): items = features else: raise ValueError('Expected mapping or list of tuples') for key, val in items: if not isinstance(key, (basestring, Feature)): raise TypeError('Feature names must be strings') self[key] = val for key, val in morefeatures.items(): if not isinstance(key, (basestring, Feature)): raise TypeError('Feature names must be strings') self[key] = val ##//////////////////////////////////////////////////////////// #{ Copying ##//////////////////////////////////////////////////////////// def __deepcopy__(self, memo): memo[id(self)] = selfcopy = self.__class__() for (key, val) in self._items(): selfcopy[copy.deepcopy(key,memo)] = copy.deepcopy(val,memo) return selfcopy ##//////////////////////////////////////////////////////////// #{ Uniform Accessor Methods ##//////////////////////////////////////////////////////////// def _keys(self): return self.keys() def _values(self): return self.values() def _items(self): return self.items() ##//////////////////////////////////////////////////////////// #{ String Representations ##//////////////////////////////////////////////////////////// def __str__(self): """ Display a multi-line representation of this feature dictionary as an FVM (feature value matrix). """ return '\n'.join(self._str(self._find_reentrances({}), {})) def _repr(self, reentrances, reentrance_ids): segments = [] prefix = '' suffix = '' # If this is the first time we've seen a reentrant structure, # then assign it a unique identifier. if reentrances[id(self)]: assert not reentrance_ids.has_key(id(self)) reentrance_ids[id(self)] = `len(reentrance_ids)+1` # sorting note: keys are unique strings, so we'll never fall # through to comparing values. for (fname, fval) in sorted(self.items()): display = getattr(fname, 'display', None) if reentrance_ids.has_key(id(fval)): segments.append('%s->(%s)' % (fname, reentrance_ids[id(fval)])) elif (display == 'prefix' and not prefix and isinstance(fval, (Variable, basestring))): prefix = '%s' % fval elif display == 'slash' and not suffix: if isinstance(fval, Variable): suffix = '/%s' % fval.name else: suffix = '/%r' % fval elif isinstance(fval, Variable): segments.append('%s=%s' % (fname, fval.name)) elif fval is True: segments.append('+%s' % fname) elif fval is False: segments.append('-%s' % fname) elif isinstance(fval, Expression): segments.append('%s=<%s>' % (fname, fval)) elif not isinstance(fval, FeatStruct): segments.append('%s=%r' % (fname, fval)) else: fval_repr = fval._repr(reentrances, reentrance_ids) segments.append('%s=%s' % (fname, fval_repr)) # If it's reentrant, then add on an identifier tag. if reentrances[id(self)]: prefix = '(%s)%s' % (reentrance_ids[id(self)], prefix) return '%s[%s]%s' % (prefix, ', '.join(segments), suffix) def _str(self, reentrances, reentrance_ids): """ :return: A list of lines composing a string representation of this feature dictionary. :param reentrances: A dictionary that maps from the ``id`` of each feature value in self, indicating whether that value is reentrant or not. :param reentrance_ids: A dictionary mapping from each ``id`` of a feature value to a unique identifier. This is modified by ``repr``: the first time a reentrant feature value is displayed, an identifier is added to ``reentrance_ids`` for it. """ # If this is the first time we've seen a reentrant structure, # then tack on an id string. if reentrances[id(self)]: assert not reentrance_ids.has_key(id(self)) reentrance_ids[id(self)] = `len(reentrance_ids)+1` # Special case: empty feature dict. if len(self) == 0: if reentrances[id(self)]: return ['(%s) []' % reentrance_ids[id(self)]] else: return ['[]'] # What's the longest feature name? Use this to align names. maxfnamelen = max(len(str(k)) for k in self.keys()) lines = [] # sorting note: keys are unique strings, so we'll never fall # through to comparing values. for (fname, fval) in sorted(self.items()): fname = str(fname).ljust(maxfnamelen) if isinstance(fval, Variable): lines.append('%s = %s' % (fname,fval.name)) elif isinstance(fval, Expression): lines.append('%s = <%s>' % (fname, fval)) elif isinstance(fval, FeatList): fval_repr = fval._repr(reentrances, reentrance_ids) lines.append('%s = %r' % (fname, fval_repr)) elif not isinstance(fval, FeatDict): # It's not a nested feature structure -- just print it. lines.append('%s = %r' % (fname, fval)) elif reentrance_ids.has_key(id(fval)): # It's a feature structure we've seen before -- print # the reentrance id. lines.append('%s -> (%s)' % (fname, reentrance_ids[id(fval)])) else: # It's a new feature structure. Separate it from # other values by a blank line. if lines and lines[-1] != '': lines.append('') # Recursively print the feature's value (fval). fval_lines = fval._str(reentrances, reentrance_ids) # Indent each line to make room for fname. fval_lines = [(' '*(maxfnamelen+3))+l for l in fval_lines] # Pick which line we'll display fname on, & splice it in. nameline = (len(fval_lines)-1)/2 fval_lines[nameline] = ( fname+' ='+fval_lines[nameline][maxfnamelen+2:]) # Add the feature structure to the output. lines += fval_lines # Separate FeatStructs by a blank line. lines.append('') # Get rid of any excess blank lines. if lines[-1] == '': lines.pop() # Add brackets around everything. maxlen = max(len(line) for line in lines) lines = ['[ %s%s ]' % (line, ' '*(maxlen-len(line))) for line in lines] # If it's reentrant, then add on an identifier tag. if reentrances[id(self)]: idstr = '(%s) ' % reentrance_ids[id(self)] lines = [(' '*len(idstr))+l for l in lines] idline = (len(lines)-1)/2 lines[idline] = idstr + lines[idline][len(idstr):] return lines ###################################################################### # Feature List ###################################################################### class FeatList(FeatStruct, list): """ A list of feature values, where each feature value is either a basic value (such as a string or an integer), or a nested feature structure. Feature lists may contain reentrant feature values. A "reentrant feature value" is a single feature value that can be accessed via multiple feature paths. Feature lists may also be cyclic. Two feature lists are considered equal if they assign the same values to all features, and have the same reentrances. :see: ``FeatStruct`` for information about feature paths, reentrance, cyclic feature structures, mutability, freezing, and hashing. """ def __init__(self, features=()): """ Create a new feature list, with the specified features. :param features: The initial list of features for this feature list. If ``features`` is a string, then it is paresd using ``FeatStructParser``. Otherwise, it should be a sequence of basic values and nested feature structures. """ if isinstance(features, basestring): FeatStructParser().parse(features, self) else: list.__init__(self, features) #//////////////////////////////////////////////////////////// #{ List methods #//////////////////////////////////////////////////////////// _INDEX_ERROR = "Expected int or feature path. Got %r." def __getitem__(self, name_or_path): if isinstance(name_or_path, (int, long)): return list.__getitem__(self, name_or_path) elif isinstance(name_or_path, tuple): try: val = self for fid in name_or_path: if not isinstance(val, FeatStruct): raise KeyError # path contains base value val = val[fid] return val except (KeyError, IndexError): raise KeyError(name_or_path) else: raise TypeError(self._INDEX_ERROR % name_or_path) def __delitem__(self, name_or_path): """If the feature with the given name or path exists, delete its value; otherwise, raise ``KeyError``.""" if self._frozen: raise ValueError(_FROZEN_ERROR) if isinstance(name_or_path, (int, long)): return list.__delitem__(self, name_or_path) elif isinstance(name_or_path, tuple): if len(name_or_path) == 0: raise ValueError("The path () can not be set") else: parent = self[name_or_path[:-1]] if not isinstance(parent, FeatStruct): raise KeyError(name_or_path) # path contains base value del parent[name_or_path[-1]] else: raise TypeError(self._INDEX_ERROR % name_or_path) def __setitem__(self, name_or_path, value): """Set the value for the feature with the given name or path to ``value``. If ``name_or_path`` is an invalid path, raise ``KeyError``.""" if self._frozen: raise ValueError(_FROZEN_ERROR) if isinstance(name_or_path, (int, long)): return list.__setitem__(self, name_or_path, value) elif isinstance(name_or_path, tuple): if len(name_or_path) == 0: raise ValueError("The path () can not be set") else: parent = self[name_or_path[:-1]] if not isinstance(parent, FeatStruct): raise KeyError(name_or_path) # path contains base value parent[name_or_path[-1]] = value else: raise TypeError(self._INDEX_ERROR % name_or_path) __delslice__ = _check_frozen(list.__delslice__, ' ') __setslice__ = _check_frozen(list.__setslice__, ' ') __iadd__ = _check_frozen(list.__iadd__) __imul__ = _check_frozen(list.__imul__) append = _check_frozen(list.append) extend = _check_frozen(list.extend) insert = _check_frozen(list.insert) pop = _check_frozen(list.pop) remove = _check_frozen(list.remove) reverse = _check_frozen(list.reverse) sort = _check_frozen(list.sort) ##//////////////////////////////////////////////////////////// #{ Copying ##//////////////////////////////////////////////////////////// def __deepcopy__(self, memo): memo[id(self)] = selfcopy = self.__class__() selfcopy.extend([copy.deepcopy(fval,memo) for fval in self]) return selfcopy ##//////////////////////////////////////////////////////////// #{ Uniform Accessor Methods ##//////////////////////////////////////////////////////////// def _keys(self): return range(len(self)) def _values(self): return self def _items(self): return enumerate(self) ##//////////////////////////////////////////////////////////// #{ String Representations ##//////////////////////////////////////////////////////////// # Special handling for: reentrances, variables, expressions. def _repr(self, reentrances, reentrance_ids): # If this is the first time we've seen a reentrant structure, # then assign it a unique identifier. if reentrances[id(self)]: assert not reentrance_ids.has_key(id(self)) reentrance_ids[id(self)] = `len(reentrance_ids)+1` prefix = '(%s)' % reentrance_ids[id(self)] else: prefix = '' segments = [] for fval in self: if id(fval) in reentrance_ids: segments.append('->(%s)' % reentrance_ids[id(fval)]) elif isinstance(fval, Variable): segments.append(fval.name) elif isinstance(fval, Expression): segments.append('%s' % fval) elif isinstance(fval, FeatStruct): segments.append(fval._repr(reentrances, reentrance_ids)) else: segments.append('%r' % fval) return '%s[%s]' % (prefix, ', '.join(segments)) ###################################################################### # Variables & Bindings ###################################################################### def substitute_bindings(fstruct, bindings, fs_class='default'): """ Return the feature structure that is obtained by replacing each variable bound by ``bindings`` with its binding. If a variable is aliased to a bound variable, then it will be replaced by that variable's value. If a variable is aliased to an unbound variable, then it will be replaced by that variable. :type bindings: dict(Variable -> any) :param bindings: A dictionary mapping from variables to values. """ if fs_class == 'default': fs_class = _default_fs_class(fstruct) fstruct = copy.deepcopy(fstruct) _substitute_bindings(fstruct, bindings, fs_class, set()) return fstruct def _substitute_bindings(fstruct, bindings, fs_class, visited): # Visit each node only once: if id(fstruct) in visited: return visited.add(id(fstruct)) if _is_mapping(fstruct): items = fstruct.items() elif _is_sequence(fstruct): items = enumerate(fstruct) else: raise ValueError('Expected mapping or sequence') for (fname, fval) in items: while (isinstance(fval, Variable) and fval in bindings): fval = fstruct[fname] = bindings[fval] if isinstance(fval, fs_class): _substitute_bindings(fval, bindings, fs_class, visited) elif isinstance(fval, SubstituteBindingsI): fstruct[fname] = fval.substitute_bindings(bindings) def retract_bindings(fstruct, bindings, fs_class='default'): """ Return the feature structure that is obtained by replacing each feature structure value that is bound by ``bindings`` with the variable that binds it. A feature structure value must be identical to a bound value (i.e., have equal id) to be replaced. ``bindings`` is modified to point to this new feature structure, rather than the original feature structure. Feature structure values in ``bindings`` may be modified if they are contained in ``fstruct``. """ if fs_class == 'default': fs_class = _default_fs_class(fstruct) (fstruct, new_bindings) = copy.deepcopy((fstruct, bindings)) bindings.update(new_bindings) inv_bindings = dict((id(val),var) for (var,val) in bindings.items()) _retract_bindings(fstruct, inv_bindings, fs_class, set()) return fstruct def _retract_bindings(fstruct, inv_bindings, fs_class, visited): # Visit each node only once: if id(fstruct) in visited: return visited.add(id(fstruct)) if _is_mapping(fstruct): items = fstruct.items() elif _is_sequence(fstruct): items = enumerate(fstruct) else: raise ValueError('Expected mapping or sequence') for (fname, fval) in items: if isinstance(fval, fs_class): if id(fval) in inv_bindings: fstruct[fname] = inv_bindings[id(fval)] _retract_bindings(fval, inv_bindings, fs_class, visited) def find_variables(fstruct, fs_class='default'): """ :return: The set of variables used by this feature structure. :rtype: set(Variable) """ if fs_class == 'default': fs_class = _default_fs_class(fstruct) return _variables(fstruct, set(), fs_class, set()) def _variables(fstruct, vars, fs_class, visited): # Visit each node only once: if id(fstruct) in visited: return visited.add(id(fstruct)) if _is_mapping(fstruct): items = fstruct.items() elif _is_sequence(fstruct): items = enumerate(fstruct) else: raise ValueError('Expected mapping or sequence') for (fname, fval) in items: if isinstance(fval, Variable): vars.add(fval) elif isinstance(fval, fs_class): _variables(fval, vars, fs_class, visited) elif isinstance(fval, SubstituteBindingsI): vars.update(fval.variables()) return vars def rename_variables(fstruct, vars=None, used_vars=(), new_vars=None, fs_class='default'): """ Return the feature structure that is obtained by replacing any of this feature structure's variables that are in ``vars`` with new variables. The names for these new variables will be names that are not used by any variable in ``vars``, or in ``used_vars``, or in this feature structure. :type vars: set :param vars: The set of variables that should be renamed. If not specified, ``find_variables(fstruct)`` is used; i.e., all variables will be given new names. :type used_vars: set :param used_vars: A set of variables whose names should not be used by the new variables. :type new_vars: dict(Variable -> Variable) :param new_vars: A dictionary that is used to hold the mapping from old variables to new variables. For each variable *v* in this feature structure: - If ``new_vars`` maps *v* to *v'*, then *v* will be replaced by *v'*. - If ``new_vars`` does not contain *v*, but ``vars`` does contain *v*, then a new entry will be added to ``new_vars``, mapping *v* to the new variable that is used to replace it. To consistently rename the variables in a set of feature structures, simply apply rename_variables to each one, using the same dictionary: >>> from nltk.featstruct import FeatStruct >>> fstruct1 = FeatStruct('[subj=[agr=[gender=?y]], obj=[agr=[gender=?y]]]') >>> fstruct2 = FeatStruct('[subj=[agr=[number=?z,gender=?y]], obj=[agr=[number=?z,gender=?y]]]') >>> new_vars = {} # Maps old vars to alpha-renamed vars >>> fstruct1.rename_variables(new_vars=new_vars) [obj=[agr=[gender=?y2]], subj=[agr=[gender=?y2]]] >>> fstruct2.rename_variables(new_vars=new_vars) [obj=[agr=[gender=?y2, number=?z2]], subj=[agr=[gender=?y2, number=?z2]]] If new_vars is not specified, then an empty dictionary is used. """ if fs_class == 'default': fs_class = _default_fs_class(fstruct) # Default values: if new_vars is None: new_vars = {} if vars is None: vars = find_variables(fstruct, fs_class) else: vars = set(vars) # Add our own variables to used_vars. used_vars = find_variables(fstruct, fs_class).union(used_vars) # Copy ourselves, and rename variables in the copy. return _rename_variables(copy.deepcopy(fstruct), vars, used_vars, new_vars, fs_class, set()) def _rename_variables(fstruct, vars, used_vars, new_vars, fs_class, visited): if id(fstruct) in visited: return visited.add(id(fstruct)) if _is_mapping(fstruct): items = fstruct.items() elif _is_sequence(fstruct): items = enumerate(fstruct) else: raise ValueError('Expected mapping or sequence') for (fname, fval) in items: if isinstance(fval, Variable): # If it's in new_vars, then rebind it. if fval in new_vars: fstruct[fname] = new_vars[fval] # If it's in vars, pick a new name for it. elif fval in vars: new_vars[fval] = _rename_variable(fval, used_vars) fstruct[fname] = new_vars[fval] used_vars.add(new_vars[fval]) elif isinstance(fval, fs_class): _rename_variables(fval, vars, used_vars, new_vars, fs_class, visited) elif isinstance(fval, SubstituteBindingsI): # Pick new names for any variables in `vars` for var in fval.variables(): if var in vars and var not in new_vars: new_vars[var] = _rename_variable(var, used_vars) used_vars.add(new_vars[var]) # Replace all variables in `new_vars`. fstruct[fname] = fval.substitute_bindings(new_vars) return fstruct def _rename_variable(var, used_vars): name, n = re.sub('\d+$', '', var.name), 2 if not name: name = '?' while Variable('%s%s' % (name, n)) in used_vars: n += 1 return Variable('%s%s' % (name, n)) def remove_variables(fstruct, fs_class='default'): """ :rtype: FeatStruct :return: The feature structure that is obtained by deleting all features whose values are ``Variables``. """ if fs_class == 'default': fs_class = _default_fs_class(fstruct) return _remove_variables(copy.deepcopy(fstruct), fs_class, set()) def _remove_variables(fstruct, fs_class, visited): if id(fstruct) in visited: return visited.add(id(fstruct)) if _is_mapping(fstruct): items = fstruct.items() elif _is_sequence(fstruct): items = enumerate(fstruct) else: raise ValueError('Expected mapping or sequence') for (fname, fval) in items: if isinstance(fval, Variable): del fstruct[fname] elif isinstance(fval, fs_class): _remove_variables(fval, fs_class, visited) return fstruct ###################################################################### # Unification ###################################################################### class _UnificationFailure(object): def __repr__(self): return 'nltk.featstruct.UnificationFailure' UnificationFailure = _UnificationFailure() """A unique value used to indicate unification failure. It can be returned by ``Feature.unify_base_values()`` or by custom ``fail()`` functions to indicate that unificaiton should fail.""" # The basic unification algorithm: # 1. Make copies of self and other (preserving reentrance) # 2. Destructively unify self and other # 3. Apply forward pointers, to preserve reentrance. # 4. Replace bound variables with their values. def unify(fstruct1, fstruct2, bindings=None, trace=False, fail=None, rename_vars=True, fs_class='default'): """ Unify ``fstruct1`` with ``fstruct2``, and return the resulting feature structure. This unified feature structure is the minimal feature structure that contains all feature value assignments from both ``fstruct1`` and ``fstruct2``, and that preserves all reentrancies. If no such feature structure exists (because ``fstruct1`` and ``fstruct2`` specify incompatible values for some feature), then unification fails, and ``unify`` returns None. Bound variables are replaced by their values. Aliased variables are replaced by their representative variable (if unbound) or the value of their representative variable (if bound). I.e., if variable *v* is in ``bindings``, then *v* is replaced by ``bindings[v]``. This will be repeated until the variable is replaced by an unbound variable or a non-variable value. Unbound variables are bound when they are unified with values; and aliased when they are unified with variables. I.e., if variable *v* is not in ``bindings``, and is unified with a variable or value *x*, then ``bindings[v]`` is set to *x*. If ``bindings`` is unspecified, then all variables are assumed to be unbound. I.e., ``bindings`` defaults to an empty dict. >>> from nltk.featstruct import FeatStruct >>> FeatStruct('[a=?x]').unify(FeatStruct('[b=?x]')) [a=?x, b=?x2] :type bindings: dict(Variable -> any) :param bindings: A set of variable bindings to be used and updated during unification. :type trace: bool :param trace: If true, generate trace output. :type rename_vars: bool :param rename_vars: If True, then rename any variables in ``fstruct2`` that are also used in ``fstruct1``, in order to avoid collisions on variable names. """ # Decide which class(es) will be treated as feature structures, # for the purposes of unification. if fs_class == 'default': fs_class = _default_fs_class(fstruct1) if _default_fs_class(fstruct2) != fs_class: raise ValueError("Mixing FeatStruct objects with Python " "dicts and lists is not supported.") assert isinstance(fstruct1, fs_class) assert isinstance(fstruct2, fs_class) # If bindings are unspecified, use an empty set of bindings. user_bindings = (bindings is not None) if bindings is None: bindings = {} # Make copies of fstruct1 and fstruct2 (since the unification # algorithm is destructive). Do it all at once, to preserve # reentrance links between fstruct1 and fstruct2. Copy bindings # as well, in case there are any bound vars that contain parts # of fstruct1 or fstruct2. (fstruct1copy, fstruct2copy, bindings_copy) = ( copy.deepcopy((fstruct1, fstruct2, bindings))) # Copy the bindings back to the original bindings dict. bindings.update(bindings_copy) if rename_vars: vars1 = find_variables(fstruct1copy, fs_class) vars2 = find_variables(fstruct2copy, fs_class) _rename_variables(fstruct2copy, vars1, vars2, {}, fs_class, set()) # Do the actual unification. If it fails, return None. forward = {} if trace: _trace_unify_start((), fstruct1copy, fstruct2copy) try: result = _destructively_unify(fstruct1copy, fstruct2copy, bindings, forward, trace, fail, fs_class, ()) except _UnificationFailureError: return None # _destructively_unify might return UnificationFailure, e.g. if we # tried to unify a mapping with a sequence. if result is UnificationFailure: if fail is None: return None else: return fail(fstruct1copy, fstruct2copy, ()) # Replace any feature structure that has a forward pointer # with the target of its forward pointer. result = _apply_forwards(result, forward, fs_class, set()) if user_bindings: _apply_forwards_to_bindings(forward, bindings) # Replace bound vars with values. _resolve_aliases(bindings) _substitute_bindings(result, bindings, fs_class, set()) # Return the result. if trace: _trace_unify_succeed((), result) if trace: _trace_bindings((), bindings) return result class _UnificationFailureError(Exception): """An exception that is used by ``_destructively_unify`` to abort unification when a failure is encountered.""" def _destructively_unify(fstruct1, fstruct2, bindings, forward, trace, fail, fs_class, path): """ Attempt to unify ``fstruct1`` and ``fstruct2`` by modifying them in-place. If the unification succeeds, then ``fstruct1`` will contain the unified value, the value of ``fstruct2`` is undefined, and forward[id(fstruct2)] is set to fstruct1. If the unification fails, then a _UnificationFailureError is raised, and the values of ``fstruct1`` and ``fstruct2`` are undefined. :param bindings: A dictionary mapping variables to values. :param forward: A dictionary mapping feature structures ids to replacement structures. When two feature structures are merged, a mapping from one to the other will be added to the forward dictionary; and changes will be made only to the target of the forward dictionary. ``_destructively_unify`` will always 'follow' any links in the forward dictionary for fstruct1 and fstruct2 before actually unifying them. :param trace: If true, generate trace output :param path: The feature path that led us to this unification step. Used for trace output. """ # If fstruct1 is already identical to fstruct2, we're done. # Note: this, together with the forward pointers, ensures # that unification will terminate even for cyclic structures. if fstruct1 is fstruct2: if trace: _trace_unify_identity(path, fstruct1) return fstruct1 # Set fstruct2's forward pointer to point to fstruct1; this makes # fstruct1 the canonical copy for fstruct2. Note that we need to # do this before we recurse into any child structures, in case # they're cyclic. forward[id(fstruct2)] = fstruct1 # Unifying two mappings: if _is_mapping(fstruct1) and _is_mapping(fstruct2): for fname in fstruct1: if getattr(fname, 'default', None) is not None: fstruct2.setdefault(fname, fname.default) for fname in fstruct2: if getattr(fname, 'default', None) is not None: fstruct1.setdefault(fname, fname.default) # Unify any values that are defined in both fstruct1 and # fstruct2. Copy any values that are defined in fstruct2 but # not in fstruct1 to fstruct1. Note: sorting fstruct2's # features isn't actually necessary; but we do it to give # deterministic behavior, e.g. for tracing. for fname, fval2 in sorted(fstruct2.items()): if fname in fstruct1: fstruct1[fname] = _unify_feature_values( fname, fstruct1[fname], fval2, bindings, forward, trace, fail, fs_class, path+(fname,)) else: fstruct1[fname] = fval2 return fstruct1 # Contains the unified value. # Unifying two sequences: elif _is_sequence(fstruct1) and _is_sequence(fstruct2): # If the lengths don't match, fail. if len(fstruct1) != len(fstruct2): return UnificationFailure # Unify corresponding values in fstruct1 and fstruct2. for findex in range(len(fstruct1)): fstruct1[findex] = _unify_feature_values( findex, fstruct1[findex], fstruct2[findex], bindings, forward, trace, fail, fs_class, path+(findex,)) return fstruct1 # Contains the unified value. # Unifying sequence & mapping: fail. The failure function # doesn't get a chance to recover in this case. elif ((_is_sequence(fstruct1) or _is_mapping(fstruct1)) and (_is_sequence(fstruct2) or _is_mapping(fstruct2))): return UnificationFailure # Unifying anything else: not allowed! raise TypeError('Expected mappings or sequences') def _unify_feature_values(fname, fval1, fval2, bindings, forward, trace, fail, fs_class, fpath): """ Attempt to unify ``fval1`` and and ``fval2``, and return the resulting unified value. The method of unification will depend on the types of ``fval1`` and ``fval2``: 1. If they're both feature structures, then destructively unify them (see ``_destructively_unify()``. 2. If they're both unbound variables, then alias one variable to the other (by setting bindings[v2]=v1). 3. If one is an unbound variable, and the other is a value, then bind the unbound variable to the value. 4. If one is a feature structure, and the other is a base value, then fail. 5. If they're both base values, then unify them. By default, this will succeed if they are equal, and fail otherwise. """ if trace: _trace_unify_start(fpath, fval1, fval2) # Look up the "canonical" copy of fval1 and fval2 while id(fval1) in forward: fval1 = forward[id(fval1)] while id(fval2) in forward: fval2 = forward[id(fval2)] # If fval1 or fval2 is a bound variable, then # replace it by the variable's bound value. This # includes aliased variables, which are encoded as # variables bound to other variables. fvar1 = fvar2 = None while isinstance(fval1, Variable) and fval1 in bindings: fvar1 = fval1 fval1 = bindings[fval1] while isinstance(fval2, Variable) and fval2 in bindings: fvar2 = fval2 fval2 = bindings[fval2] # Case 1: Two feature structures (recursive case) if isinstance(fval1, fs_class) and isinstance(fval2, fs_class): result = _destructively_unify(fval1, fval2, bindings, forward, trace, fail, fs_class, fpath) # Case 2: Two unbound variables (create alias) elif (isinstance(fval1, Variable) and isinstance(fval2, Variable)): if fval1 != fval2: bindings[fval2] = fval1 result = fval1 # Case 3: An unbound variable and a value (bind) elif isinstance(fval1, Variable): bindings[fval1] = fval2 result = fval1 elif isinstance(fval2, Variable): bindings[fval2] = fval1 result = fval2 # Case 4: A feature structure & a base value (fail) elif isinstance(fval1, fs_class) or isinstance(fval2, fs_class): result = UnificationFailure # Case 5: Two base values else: # Case 5a: Feature defines a custom unification method for base values if isinstance(fname, Feature): result = fname.unify_base_values(fval1, fval2, bindings) # Case 5b: Feature value defines custom unification method elif isinstance(fval1, CustomFeatureValue): result = fval1.unify(fval2) # Sanity check: unify value should be symmetric if (isinstance(fval2, CustomFeatureValue) and result != fval2.unify(fval1)): raise AssertionError( 'CustomFeatureValue objects %r and %r disagree ' 'about unification value: %r vs. %r' % (fval1, fval2, result, fval2.unify(fval1))) elif isinstance(fval2, CustomFeatureValue): result = fval2.unify(fval1) # Case 5c: Simple values -- check if they're equal. else: if fval1 == fval2: result = fval1 else: result = UnificationFailure # If either value was a bound variable, then update the # bindings. (This is really only necessary if fname is a # Feature or if either value is a CustomFeatureValue.) if result is not UnificationFailure: if fvar1 is not None: bindings[fvar1] = result result = fvar1 if fvar2 is not None and fvar2 != fvar1: bindings[fvar2] = result result = fvar2 # If we unification failed, call the failure function; it # might decide to continue anyway. if result is UnificationFailure: if fail is not None: result = fail(fval1, fval2, fpath) if trace: _trace_unify_fail(fpath[:-1], result) if result is UnificationFailure: raise _UnificationFailureError # Normalize the result. if isinstance(result, fs_class): result = _apply_forwards(result, forward, fs_class, set()) if trace: _trace_unify_succeed(fpath, result) if trace and isinstance(result, fs_class): _trace_bindings(fpath, bindings) return result def _apply_forwards_to_bindings(forward, bindings): """ Replace any feature structure that has a forward pointer with the target of its forward pointer (to preserve reentrancy). """ for (var, value) in bindings.items(): while id(value) in forward: value = forward[id(value)] bindings[var] = value def _apply_forwards(fstruct, forward, fs_class, visited): """ Replace any feature structure that has a forward pointer with the target of its forward pointer (to preserve reentrancy). """ # Follow our own forwards pointers (if any) while id(fstruct) in forward: fstruct = forward[id(fstruct)] # Visit each node only once: if id(fstruct) in visited: return visited.add(id(fstruct)) if _is_mapping(fstruct): items = fstruct.items() elif _is_sequence(fstruct): items = enumerate(fstruct) else: raise ValueError('Expected mapping or sequence') for fname, fval in items: if isinstance(fval, fs_class): # Replace w/ forwarded value. while id(fval) in forward: fval = forward[id(fval)] fstruct[fname] = fval # Recurse to child. _apply_forwards(fval, forward, fs_class, visited) return fstruct def _resolve_aliases(bindings): """ Replace any bound aliased vars with their binding; and replace any unbound aliased vars with their representative var. """ for (var, value) in bindings.items(): while isinstance(value, Variable) and value in bindings: value = bindings[var] = bindings[value] def _trace_unify_start(path, fval1, fval2): if path == (): print '\nUnification trace:' else: fullname = '.'.join(str(n) for n in path) print ' '+'| '*(len(path)-1)+'|' print ' '+'| '*(len(path)-1)+'| Unify feature: %s' % fullname print ' '+'| '*len(path)+' / '+_trace_valrepr(fval1) print ' '+'| '*len(path)+'|\\ '+_trace_valrepr(fval2) def _trace_unify_identity(path, fval1): print ' '+'| '*len(path)+'|' print ' '+'| '*len(path)+'| (identical objects)' print ' '+'| '*len(path)+'|' print ' '+'| '*len(path)+'+-->'+`fval1` def _trace_unify_fail(path, result): if result is UnificationFailure: resume = '' else: resume = ' (nonfatal)' print ' '+'| '*len(path)+'| |' print ' '+'X '*len(path)+'X X <-- FAIL'+resume def _trace_unify_succeed(path, fval1): # Print the result. print ' '+'| '*len(path)+'|' print ' '+'| '*len(path)+'+-->'+`fval1` def _trace_bindings(path, bindings): # Print the bindings (if any). if len(bindings) > 0: binditems = sorted(bindings.items(), key=lambda v:v[0].name) bindstr = '{%s}' % ', '.join( '%s: %s' % (var, _trace_valrepr(val)) for (var, val) in binditems) print ' '+'| '*len(path)+' Bindings: '+bindstr def _trace_valrepr(val): if isinstance(val, Variable): return '%s' % val else: return '%r' % val def subsumes(fstruct1, fstruct2): """ Return True if ``fstruct1`` subsumes ``fstruct2``. I.e., return true if unifying ``fstruct1`` with ``fstruct2`` would result in a feature structure equal to ``fstruct2.`` :rtype: bool """ return fstruct2 == unify(fstruct1, fstruct2) def conflicts(fstruct1, fstruct2, trace=0): """ Return a list of the feature paths of all features which are assigned incompatible values by ``fstruct1`` and ``fstruct2``. :rtype: list(tuple) """ conflict_list = [] def add_conflict(fval1, fval2, path): conflict_list.append(path) return fval1 unify(fstruct1, fstruct2, fail=add_conflict, trace=trace) return conflict_list ###################################################################### # Helper Functions ###################################################################### def _is_mapping(v): return hasattr(v, 'has_key') and hasattr(v, 'items') def _is_sequence(v): return (hasattr(v, '__iter__') and hasattr(v, '__len__') and not isinstance(v, basestring)) def _default_fs_class(obj): if isinstance(obj, FeatStruct): return FeatStruct if isinstance(obj, (dict, list)): return (dict, list) else: raise ValueError('To unify objects of type %s, you must specify ' 'fs_class explicitly.' % obj.__class__.__name__) ###################################################################### # FeatureValueSet & FeatureValueTuple ###################################################################### class SubstituteBindingsSequence(SubstituteBindingsI): """ A mixin class for sequence clases that distributes variables() and substitute_bindings() over the object's elements. """ def variables(self): return ([elt for elt in self if isinstance(elt, Variable)] + sum([list(elt.variables()) for elt in self if isinstance(elt, SubstituteBindingsI)], [])) def substitute_bindings(self, bindings): return self.__class__([self.subst(v, bindings) for v in self]) def subst(self, v, bindings): if isinstance(v, SubstituteBindingsI): return v.substitute_bindings(bindings) else: return bindings.get(v, v) class FeatureValueTuple(SubstituteBindingsSequence, tuple): """ A base feature value that is a tuple of other base feature values. FeatureValueTuple implements ``SubstituteBindingsI``, so it any variable substitutions will be propagated to the elements contained by the set. A ``FeatureValueTuple`` is immutable. """ def __repr__(self): # [xx] really use %s here? if len(self) == 0: return '()' return '(%s)' % ', '.join('%s' % (b,) for b in self) class FeatureValueSet(SubstituteBindingsSequence, frozenset): """ A base feature value that is a set of other base feature values. FeatureValueSet implements ``SubstituteBindingsI``, so it any variable substitutions will be propagated to the elements contained by the set. A ``FeatureValueSet`` is immutable. """ def __repr__(self): # [xx] really use %s here? if len(self) == 0: return '{/}' # distinguish from dict. # n.b., we sort the string reprs of our elements, to ensure # that our own repr is deterministic. return '{%s}' % ', '.join(sorted('%s' % (b,) for b in self)) __str__ = __repr__ class FeatureValueUnion(SubstituteBindingsSequence, frozenset): """ A base feature value that represents the union of two or more ``FeatureValueSet`` or ``Variable``. """ def __new__(cls, values): # If values contains FeatureValueUnions, then collapse them. values = _flatten(values, FeatureValueUnion) # If the resulting list contains no variables, then # use a simple FeatureValueSet instead. if sum(isinstance(v, Variable) for v in values) == 0: values = _flatten(values, FeatureValueSet) return FeatureValueSet(values) # If we contain a single variable, return that variable. if len(values) == 1: return list(values)[0] # Otherwise, build the FeatureValueUnion. return frozenset.__new__(cls, values) def __repr__(self): # n.b., we sort the string reprs of our elements, to ensure # that our own repr is deterministic. also, note that len(self) # is guaranteed to be 2 or more. return '{%s}' % '+'.join(sorted('%s' % (b,) for b in self)) class FeatureValueConcat(SubstituteBindingsSequence, tuple): """ A base feature value that represents the concatenation of two or more ``FeatureValueTuple`` or ``Variable``. """ def __new__(cls, values): # If values contains FeatureValueConcats, then collapse them. values = _flatten(values, FeatureValueConcat) # If the resulting list contains no variables, then # use a simple FeatureValueTuple instead. if sum(isinstance(v, Variable) for v in values) == 0: values = _flatten(values, FeatureValueTuple) return FeatureValueTuple(values) # If we contain a single variable, return that variable. if len(values) == 1: return list(values)[0] # Otherwise, build the FeatureValueConcat. return tuple.__new__(cls, values) def __repr__(self): # n.b.: len(self) is guaranteed to be 2 or more. return '(%s)' % '+'.join('%s' % (b,) for b in self) def _flatten(lst, cls): """ Helper function -- return a copy of list, with all elements of type ``cls`` spliced in rather than appended in. """ result = [] for elt in lst: if isinstance(elt, cls): result.extend(elt) else: result.append(elt) return result ###################################################################### # Specialized Features ###################################################################### class Feature(object): """ A feature identifier that's specialized to put additional constraints, default values, etc. """ def __init__(self, name, default=None, display=None): assert display in (None, 'prefix', 'slash') self._name = name # [xx] rename to .identifier? self._default = default # [xx] not implemented yet. self._display = display if self._display == 'prefix': self._sortkey = (-1, self._name) elif self._display == 'slash': self._sortkey = (1, self._name) else: self._sortkey = (0, self._name) @property def name(self): """The name of this feature.""" return self._name @property def default(self): """Default value for this feature.""" return self._default @property def display(self): """Custom display location: can be prefix, or slash.""" return self._display def __repr__(self): return '*%s*' % self.name def __cmp__(self, other): if not isinstance(other, Feature): return -1 if self._name == other._name: return 0 return cmp(self._sortkey, other._sortkey) def __hash__(self): return hash(self._name) #//////////////////////////////////////////////////////////// # These can be overridden by subclasses: #//////////////////////////////////////////////////////////// def parse_value(self, s, position, reentrances, parser): return parser.parse_value(s, position, reentrances) def unify_base_values(self, fval1, fval2, bindings): """ If possible, return a single value.. If not, return the value ``UnificationFailure``. """ if fval1 == fval2: return fval1 else: return UnificationFailure class SlashFeature(Feature): def parse_value(self, s, position, reentrances, parser): return parser.partial_parse(s, position, reentrances) class RangeFeature(Feature): RANGE_RE = re.compile('(-?\d+):(-?\d+)') def parse_value(self, s, position, reentrances, parser): m = self.RANGE_RE.match(s, position) if not m: raise ValueError('range', position) return (int(m.group(1)), int(m.group(2))), m.end() def unify_base_values(self, fval1, fval2, bindings): if fval1 is None: return fval2 if fval2 is None: return fval1 rng = max(fval1[0], fval2[0]), min(fval1[1], fval2[1]) if rng[1] < rng[0]: return UnificationFailure return rng SLASH = SlashFeature('slash', default=False, display='slash') TYPE = Feature('type', display='prefix') ###################################################################### # Specialized Feature Values ###################################################################### class CustomFeatureValue(object): """ An abstract base class for base values that define a custom unification method. The custom unification method of ``CustomFeatureValue`` will be used during unification if: - The ``CustomFeatureValue`` is unified with another base value. - The ``CustomFeatureValue`` is not the value of a customized ``Feature`` (which defines its own unification method). If two ``CustomFeatureValue`` objects are unified with one another during feature structure unification, then the unified base values they return *must* be equal; otherwise, an ``AssertionError`` will be raised. Subclasses must define ``unify()`` and ``__cmp__()``. Subclasses may also wish to define ``__hash__()``. """ def unify(self, other): """ If this base value unifies with ``other``, then return the unified value. Otherwise, return ``UnificationFailure``. """ raise NotImplementedError('abstract base class') def __cmp__(self, other): raise NotImplementedError('abstract base class') def __hash__(self): raise TypeError('%s objects or unhashable' % self.__class__.__name__) ###################################################################### # Feature Structure Parser ###################################################################### class FeatStructParser(object): def __init__(self, features=(SLASH, TYPE), fdict_class=FeatStruct, flist_class=FeatList, logic_parser=None): self._features = dict((f.name,f) for f in features) self._fdict_class = fdict_class self._flist_class = flist_class self._prefix_feature = None self._slash_feature = None for feature in features: if feature.display == 'slash': if self._slash_feature: raise ValueError('Multiple features w/ display=slash') self._slash_feature = feature if feature.display == 'prefix': if self._prefix_feature: raise ValueError('Multiple features w/ display=prefix') self._prefix_feature = feature self._features_with_defaults = [feature for feature in features if feature.default is not None] if logic_parser is None: logic_parser = LogicParser() self._logic_parser = logic_parser def parse(self, s, fstruct=None): """ Convert a string representation of a feature structure (as displayed by repr) into a ``FeatStruct``. This parse imposes the following restrictions on the string representation: - Feature names cannot contain any of the following: whitespace, parentheses, quote marks, equals signs, dashes, commas, and square brackets. Feature names may not begin with plus signs or minus signs. - Only the following basic feature value are supported: strings, integers, variables, None, and unquoted alphanumeric strings. - For reentrant values, the first mention must specify a reentrance identifier and a value; and any subsequent mentions must use arrows (``'->'``) to reference the reentrance identifier. """ s = s.strip() value, position = self.partial_parse(s, 0, {}, fstruct) if position != len(s): self._error(s, 'end of string', position) return value _START_FSTRUCT_RE = re.compile(r'\s*(?:\((\d+)\)\s*)?(\??[\w-]+)?(\[)') _END_FSTRUCT_RE = re.compile(r'\s*]\s*') _SLASH_RE = re.compile(r'/') _FEATURE_NAME_RE = re.compile(r'\s*([+-]?)([^\s\(\)<>"\'\-=\[\],]+)\s*') _REENTRANCE_RE = re.compile(r'\s*->\s*') _TARGET_RE = re.compile(r'\s*\((\d+)\)\s*') _ASSIGN_RE = re.compile(r'\s*=\s*') _COMMA_RE = re.compile(r'\s*,\s*') _BARE_PREFIX_RE = re.compile(r'\s*(?:\((\d+)\)\s*)?(\??[\w-]+\s*)()') # This one is used to distinguish fdicts from flists: _START_FDICT_RE = re.compile(r'(%s)|(%s\s*(%s\s*(=|->)|[+-]%s|\]))' % ( _BARE_PREFIX_RE.pattern, _START_FSTRUCT_RE.pattern, _FEATURE_NAME_RE.pattern, _FEATURE_NAME_RE.pattern)) def partial_parse(self, s, position=0, reentrances=None, fstruct=None): """ Helper function that parses a feature structure. :param s: The string to parse. :param position: The position in the string to start parsing. :param reentrances: A dictionary from reentrance ids to values. Defaults to an empty dictionary. :return: A tuple (val, pos) of the feature structure created by parsing and the position where the parsed feature structure ends. :rtype: bool """ if reentrances is None: reentrances = {} try: return self._partial_parse(s, position, reentrances, fstruct) except ValueError, e: if len(e.args) != 2: raise self._error(s, *e.args) def _partial_parse(self, s, position, reentrances, fstruct=None): # Create the new feature structure if fstruct is None: if self._START_FDICT_RE.match(s, position): fstruct = self._fdict_class() else: fstruct = self._flist_class() # Read up to the open bracket. match = self._START_FSTRUCT_RE.match(s, position) if not match: match = self._BARE_PREFIX_RE.match(s, position) if not match: raise ValueError('open bracket or identifier', position) position = match.end() # If there as an identifier, record it. if match.group(1): identifier = match.group(1) if identifier in reentrances: raise ValueError('new identifier', match.start(1)) reentrances[identifier] = fstruct if isinstance(fstruct, FeatDict): fstruct.clear() return self._partial_parse_featdict(s, position, match, reentrances, fstruct) else: del fstruct[:] return self._partial_parse_featlist(s, position, match, reentrances, fstruct) def _partial_parse_featlist(self, s, position, match, reentrances, fstruct): # Prefix features are not allowed: if match.group(2): raise ValueError('open bracket') # Bare prefixes are not allowed: if not match.group(3): raise ValueError('open bracket') # Build a list of the features defined by the structure. while position < len(s): # Check for the close bracket. match = self._END_FSTRUCT_RE.match(s, position) if match is not None: return fstruct, match.end() # Reentances have the form "-> (target)" match = self._REENTRANCE_RE.match(s, position) if match: position = match.end() match = _TARGET_RE.match(s, position) if not match: raise ValueError('identifier', position) target = match.group(1) if target not in reentrances: raise ValueError('bound identifier', position) position = match.end() fstruct.append(reentrances[target]) # Anything else is a value. else: value, position = ( self._parse_value(0, s, position, reentrances)) fstruct.append(value) # If there's a close bracket, handle it at the top of the loop. if self._END_FSTRUCT_RE.match(s, position): continue # Otherwise, there should be a comma match = self._COMMA_RE.match(s, position) if match is None: raise ValueError('comma', position) position = match.end() # We never saw a close bracket. raise ValueError('close bracket', position) def _partial_parse_featdict(self, s, position, match, reentrances, fstruct): # If there was a prefix feature, record it. if match.group(2): if self._prefix_feature is None: raise ValueError('open bracket or identifier', match.start(2)) prefixval = match.group(2).strip() if prefixval.startswith('?'): prefixval = Variable(prefixval) fstruct[self._prefix_feature] = prefixval # If group 3 is empty, then we just have a bare prefix, so # we're done. if not match.group(3): return self._finalize(s, match.end(), reentrances, fstruct) # Build a list of the features defined by the structure. # Each feature has one of the three following forms: # name = value # name -> (target) # +name # -name while position < len(s): # Use these variables to hold info about each feature: name = value = None # Check for the close bracket. match = self._END_FSTRUCT_RE.match(s, position) if match is not None: return self._finalize(s, match.end(), reentrances, fstruct) # Get the feature name's name match = self._FEATURE_NAME_RE.match(s, position) if match is None: raise ValueError('feature name', position) name = match.group(2) position = match.end() # Check if it's a special feature. if name[0] == '*' and name[-1] == '*': name = self._features.get(name[1:-1]) if name is None: raise ValueError('known special feature', match.start(2)) # Check if this feature has a value already. if name in fstruct: raise ValueError('new name', match.start(2)) # Boolean value ("+name" or "-name") if match.group(1) == '+': value = True if match.group(1) == '-': value = False # Reentrance link ("-> (target)") if value is None: match = self._REENTRANCE_RE.match(s, position) if match is not None: position = match.end() match = self._TARGET_RE.match(s, position) if not match: raise ValueError('identifier', position) target = match.group(1) if target not in reentrances: raise ValueError('bound identifier', position) position = match.end() value = reentrances[target] # Assignment ("= value"). if value is None: match = self._ASSIGN_RE.match(s, position) if match: position = match.end() value, position = ( self._parse_value(name, s, position, reentrances)) # None of the above: error. else: raise ValueError('equals sign', position) # Store the value. fstruct[name] = value # If there's a close bracket, handle it at the top of the loop. if self._END_FSTRUCT_RE.match(s, position): continue # Otherwise, there should be a comma match = self._COMMA_RE.match(s, position) if match is None: raise ValueError('comma', position) position = match.end() # We never saw a close bracket. raise ValueError('close bracket', position) def _finalize(self, s, pos, reentrances, fstruct): """ Called when we see the close brace -- checks for a slash feature, and adds in default values. """ # Add the slash feature (if any) match = self._SLASH_RE.match(s, pos) if match: name = self._slash_feature v, pos = self._parse_value(name, s, match.end(), reentrances) fstruct[name] = v ## Add any default features. -- handle in unficiation instead? #for feature in self._features_with_defaults: # fstruct.setdefault(feature, feature.default) # Return the value. return fstruct, pos def _parse_value(self, name, s, position, reentrances): if isinstance(name, Feature): return name.parse_value(s, position, reentrances, self) else: return self.parse_value(s, position, reentrances) def parse_value(self, s, position, reentrances): for (handler, regexp) in self.VALUE_HANDLERS: match = regexp.match(s, position) if match: handler_func = getattr(self, handler) return handler_func(s, position, reentrances, match) raise ValueError('value', position) def _error(self, s, expected, position): lines = s.split('\n') while position > len(lines[0]): position -= len(lines.pop(0))+1 # +1 for the newline. estr = ('Error parsing feature structure\n ' + lines[0] + '\n ' + ' '*position + '^ ' + 'Expected %s' % expected) raise ValueError, estr #//////////////////////////////////////////////////////////// #{ Value Parsers #//////////////////////////////////////////////////////////// #: A table indicating how feature values should be parsed. Each #: entry in the table is a pair (handler, regexp). The first entry #: with a matching regexp will have its handler called. Handlers #: should have the following signature:: #: #: def handler(s, position, reentrances, match): ... #: #: and should return a tuple (value, position), where position is #: the string position where the value ended. (n.b.: order is #: important here!) VALUE_HANDLERS = [ ('parse_fstruct_value', _START_FSTRUCT_RE), ('parse_var_value', re.compile(r'\?[a-zA-Z_][a-zA-Z0-9_]*')), ('parse_str_value', re.compile("[uU]?[rR]?(['\"])")), ('parse_int_value', re.compile(r'-?\d+')), ('parse_sym_value', re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*')), ('parse_app_value', re.compile(r'<(app)\((\?[a-z][a-z]*)\s*,' r'\s*(\?[a-z][a-z]*)\)>')), # ('parse_logic_value', re.compile(r'<([^>]*)>')), #lazily match any character after '<' until we hit a '>' not preceded by '-' ('parse_logic_value', re.compile(r'<(.*?)(?<!-)>')), ('parse_set_value', re.compile(r'{')), ('parse_tuple_value', re.compile(r'\(')), ] def parse_fstruct_value(self, s, position, reentrances, match): return self.partial_parse(s, position, reentrances) def parse_str_value(self, s, position, reentrances, match): return nltk.internals.parse_str(s, position) def parse_int_value(self, s, position, reentrances, match): return int(match.group()), match.end() # Note: the '?' is included in the variable name. def parse_var_value(self, s, position, reentrances, match): return Variable(match.group()), match.end() _SYM_CONSTS = {'None':None, 'True':True, 'False':False} def parse_sym_value(self, s, position, reentrances, match): val, end = match.group(), match.end() return self._SYM_CONSTS.get(val, val), end def parse_app_value(self, s, position, reentrances, match): """Mainly included for backwards compat.""" return self._logic_parser.parse('%s(%s)' % match.group(2,3)), match.end() def parse_logic_value(self, s, position, reentrances, match): try: try: expr = self._logic_parser.parse(match.group(1)) except ParseException: raise ValueError() return expr, match.end() except ValueError: raise ValueError('logic expression', match.start(1)) def parse_tuple_value(self, s, position, reentrances, match): return self._parse_seq_value(s, position, reentrances, match, ')', FeatureValueTuple, FeatureValueConcat) def parse_set_value(self, s, position, reentrances, match): return self._parse_seq_value(s, position, reentrances, match, '}', FeatureValueSet, FeatureValueUnion) def _parse_seq_value(self, s, position, reentrances, match, close_paren, seq_class, plus_class): """ Helper function used by parse_tuple_value and parse_set_value. """ cp = re.escape(close_paren) position = match.end() # Special syntax fo empty tuples: m = re.compile(r'\s*/?\s*%s' % cp).match(s, position) if m: return seq_class(), m.end() # Read values: values = [] seen_plus = False while True: # Close paren: return value. m = re.compile(r'\s*%s' % cp).match(s, position) if m: if seen_plus: return plus_class(values), m.end() else: return seq_class(values), m.end() # Read the next value. val, position = self.parse_value(s, position, reentrances) values.append(val) # Comma or looking at close paren m = re.compile(r'\s*(,|\+|(?=%s))\s*' % cp).match(s, position) if m.group(1) == '+': seen_plus = True if not m: raise ValueError("',' or '+' or '%s'" % cp, position) position = m.end() ###################################################################### #{ Demo ###################################################################### def display_unification(fs1, fs2, indent=' '): # Print the two input feature structures, side by side. fs1_lines = str(fs1).split('\n') fs2_lines = str(fs2).split('\n') if len(fs1_lines) > len(fs2_lines): blankline = '['+' '*(len(fs2_lines[0])-2)+']' fs2_lines += [blankline]*len(fs1_lines) else: blankline = '['+' '*(len(fs1_lines[0])-2)+']' fs1_lines += [blankline]*len(fs2_lines) for (fs1_line, fs2_line) in zip(fs1_lines, fs2_lines): print indent + fs1_line + ' ' + fs2_line print indent+'-'*len(fs1_lines[0])+' '+'-'*len(fs2_lines[0]) linelen = len(fs1_lines[0])*2+3 print indent+'| |'.center(linelen) print indent+'+-----UNIFY-----+'.center(linelen) print indent+'|'.center(linelen) print indent+'V'.center(linelen) bindings = {} result = fs1.unify(fs2, bindings) if result is None: print indent+'(FAILED)'.center(linelen) else: print '\n'.join(indent+l.center(linelen) for l in str(result).split('\n')) if bindings and len(bindings.bound_variables()) > 0: print repr(bindings).center(linelen) return result def interactive_demo(trace=False): import random, sys HELP = ''' 1-%d: Select the corresponding feature structure q: Quit t: Turn tracing on or off l: List all feature structures ?: Help ''' print ''' This demo will repeatedly present you with a list of feature structures, and ask you to choose two for unification. Whenever a new feature structure is generated, it is added to the list of choices that you can pick from. However, since this can be a large number of feature structures, the demo will only print out a random subset for you to choose between at a given time. If you want to see the complete lists, type "l". For a list of valid commands, type "?". ''' print 'Press "Enter" to continue...' sys.stdin.readline() fstruct_strings = [ '[agr=[number=sing, gender=masc]]', '[agr=[gender=masc, person=3]]', '[agr=[gender=fem, person=3]]', '[subj=[agr=(1)[]], agr->(1)]', '[obj=?x]', '[subj=?x]', '[/=None]', '[/=NP]', '[cat=NP]', '[cat=VP]', '[cat=PP]', '[subj=[agr=[gender=?y]], obj=[agr=[gender=?y]]]', '[gender=masc, agr=?C]', '[gender=?S, agr=[gender=?S,person=3]]' ] all_fstructs = [(i, FeatStruct(fstruct_strings[i])) for i in range(len(fstruct_strings))] def list_fstructs(fstructs): for i, fstruct in fstructs: print lines = str(fstruct).split('\n') print '%3d: %s' % (i+1, lines[0]) for line in lines[1:]: print ' '+line print while 1: # Pick 5 feature structures at random from the master list. MAX_CHOICES = 5 if len(all_fstructs) > MAX_CHOICES: fstructs = random.sample(all_fstructs, MAX_CHOICES) fstructs.sort() else: fstructs = all_fstructs print '_'*75 print 'Choose two feature structures to unify:' list_fstructs(fstructs) selected = [None,None] for (nth,i) in (('First',0), ('Second',1)): while selected[i] is None: print ('%s feature structure (1-%d,q,t,l,?): ' % (nth, len(all_fstructs))), try: input = sys.stdin.readline().strip() if input in ('q', 'Q', 'x', 'X'): return if input in ('t', 'T'): trace = not trace print ' Trace = %s' % trace continue if input in ('h', 'H', '?'): print HELP % len(fstructs); continue if input in ('l', 'L'): list_fstructs(all_fstructs); continue num = int(input)-1 selected[i] = all_fstructs[num][1] print except: print 'Bad sentence number' continue if trace: result = selected[0].unify(selected[1], trace=1) else: result = display_unification(selected[0], selected[1]) if result is not None: for i, fstruct in all_fstructs: if `result` == `fstruct`: break else: all_fstructs.append((len(all_fstructs), result)) print '\nType "Enter" to continue unifying; or "q" to quit.' input = sys.stdin.readline().strip() if input in ('q', 'Q', 'x', 'X'): return def demo(trace=False): """ Just for testing """ #import random # parser breaks with values like '3rd' fstruct_strings = [ '[agr=[number=sing, gender=masc]]', '[agr=[gender=masc, person=3]]', '[agr=[gender=fem, person=3]]', '[subj=[agr=(1)[]], agr->(1)]', '[obj=?x]', '[subj=?x]', '[/=None]', '[/=NP]', '[cat=NP]', '[cat=VP]', '[cat=PP]', '[subj=[agr=[gender=?y]], obj=[agr=[gender=?y]]]', '[gender=masc, agr=?C]', '[gender=?S, agr=[gender=?S,person=3]]' ] all_fstructs = [FeatStruct(fss) for fss in fstruct_strings] #MAX_CHOICES = 5 #if len(all_fstructs) > MAX_CHOICES: #fstructs = random.sample(all_fstructs, MAX_CHOICES) #fstructs.sort() #else: #fstructs = all_fstructs for fs1 in all_fstructs: for fs2 in all_fstructs: print "\n*******************\nfs1 is:\n%s\n\nfs2 is:\n%s\n\nresult is:\n%s" % (fs1, fs2, unify(fs1, fs2)) if __name__ == '__main__': demo() __all__ = ['FeatStruct', 'FeatDict', 'FeatList', 'unify', 'subsumes', 'conflicts', 'Feature', 'SlashFeature', 'RangeFeature', 'SLASH', 'TYPE', 'FeatStructParser']
agpl-3.0
-561,273,284,066,415,940
40.030919
117
0.57617
false
r3tard/BartusBot
lib/setuptools/command/egg_info.py
301
16852
"""setuptools.command.egg_info Create a distribution's .egg-info directory and contents""" from distutils.filelist import FileList as _FileList from distutils.util import convert_path from distutils import log import distutils.errors import distutils.filelist import os import re import sys try: from setuptools_svn import svn_utils except ImportError: pass from setuptools import Command from setuptools.command.sdist import sdist from setuptools.compat import basestring, PY3, StringIO from setuptools.command.sdist import walk_revctrl from pkg_resources import ( parse_requirements, safe_name, parse_version, safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename) import setuptools.unicode_utils as unicode_utils from pkg_resources import packaging class egg_info(Command): description = "create a distribution's .egg-info directory" user_options = [ ('egg-base=', 'e', "directory containing .egg-info directories" " (default: top of the source tree)"), ('tag-svn-revision', 'r', "Add subversion revision ID to version number"), ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"), ('tag-build=', 'b', "Specify explicit tag to add to version number"), ('no-svn-revision', 'R', "Don't add subversion revision ID [default]"), ('no-date', 'D', "Don't include date stamp [default]"), ] boolean_options = ['tag-date', 'tag-svn-revision'] negative_opt = {'no-svn-revision': 'tag-svn-revision', 'no-date': 'tag-date'} def initialize_options(self): self.egg_name = None self.egg_version = None self.egg_base = None self.egg_info = None self.tag_build = None self.tag_svn_revision = 0 self.tag_date = 0 self.broken_egg_info = False self.vtags = None def save_version_info(self, filename): from setuptools.command.setopt import edit_config values = dict( egg_info=dict( tag_svn_revision=0, tag_date=0, tag_build=self.tags(), ) ) edit_config(filename, values) def finalize_options(self): self.egg_name = safe_name(self.distribution.get_name()) self.vtags = self.tags() self.egg_version = self.tagged_version() parsed_version = parse_version(self.egg_version) try: is_version = isinstance(parsed_version, packaging.version.Version) spec = ( "%s==%s" if is_version else "%s===%s" ) list( parse_requirements(spec % (self.egg_name, self.egg_version)) ) except ValueError: raise distutils.errors.DistutilsOptionError( "Invalid distribution name or version syntax: %s-%s" % (self.egg_name, self.egg_version) ) if self.egg_base is None: dirs = self.distribution.package_dir self.egg_base = (dirs or {}).get('', os.curdir) self.ensure_dirname('egg_base') self.egg_info = to_filename(self.egg_name) + '.egg-info' if self.egg_base != os.curdir: self.egg_info = os.path.join(self.egg_base, self.egg_info) if '-' in self.egg_name: self.check_broken_egg_info() # Set package version for the benefit of dumber commands # (e.g. sdist, bdist_wininst, etc.) # self.distribution.metadata.version = self.egg_version # If we bootstrapped around the lack of a PKG-INFO, as might be the # case in a fresh checkout, make sure that any special tags get added # to the version info # pd = self.distribution._patched_dist if pd is not None and pd.key == self.egg_name.lower(): pd._version = self.egg_version pd._parsed_version = parse_version(self.egg_version) self.distribution._patched_dist = None def write_or_delete_file(self, what, filename, data, force=False): """Write `data` to `filename` or delete if empty If `data` is non-empty, this routine is the same as ``write_file()``. If `data` is empty but not ``None``, this is the same as calling ``delete_file(filename)`. If `data` is ``None``, then this is a no-op unless `filename` exists, in which case a warning is issued about the orphaned file (if `force` is false), or deleted (if `force` is true). """ if data: self.write_file(what, filename, data) elif os.path.exists(filename): if data is None and not force: log.warn( "%s not set in setup(), but %s exists", what, filename ) return else: self.delete_file(filename) def write_file(self, what, filename, data): """Write `data` to `filename` (if not a dry run) after announcing it `what` is used in a log message to identify what is being written to the file. """ log.info("writing %s to %s", what, filename) if PY3: data = data.encode("utf-8") if not self.dry_run: f = open(filename, 'wb') f.write(data) f.close() def delete_file(self, filename): """Delete `filename` (if not a dry run) after announcing it""" log.info("deleting %s", filename) if not self.dry_run: os.unlink(filename) def tagged_version(self): version = self.distribution.get_version() # egg_info may be called more than once for a distribution, # in which case the version string already contains all tags. if self.vtags and version.endswith(self.vtags): return safe_version(version) return safe_version(version + self.vtags) def run(self): self.mkpath(self.egg_info) installer = self.distribution.fetch_build_egg for ep in iter_entry_points('egg_info.writers'): ep.require(installer=installer) writer = ep.resolve() writer(self, ep.name, os.path.join(self.egg_info, ep.name)) # Get rid of native_libs.txt if it was put there by older bdist_egg nl = os.path.join(self.egg_info, "native_libs.txt") if os.path.exists(nl): self.delete_file(nl) self.find_sources() def tags(self): version = '' if self.tag_build: version += self.tag_build if self.tag_svn_revision: rev = self.get_svn_revision() if rev: # is 0 if it's not an svn working copy version += '-r%s' % rev if self.tag_date: import time version += time.strftime("-%Y%m%d") return version @staticmethod def get_svn_revision(): if 'svn_utils' not in globals(): return "0" return str(svn_utils.SvnInfo.load(os.curdir).get_revision()) def find_sources(self): """Generate SOURCES.txt manifest file""" manifest_filename = os.path.join(self.egg_info, "SOURCES.txt") mm = manifest_maker(self.distribution) mm.manifest = manifest_filename mm.run() self.filelist = mm.filelist def check_broken_egg_info(self): bei = self.egg_name + '.egg-info' if self.egg_base != os.curdir: bei = os.path.join(self.egg_base, bei) if os.path.exists(bei): log.warn( "-" * 78 + '\n' "Note: Your current .egg-info directory has a '-' in its name;" '\nthis will not work correctly with "setup.py develop".\n\n' 'Please rename %s to %s to correct this problem.\n' + '-' * 78, bei, self.egg_info ) self.broken_egg_info = self.egg_info self.egg_info = bei # make it work for now class FileList(_FileList): """File list that accepts only existing, platform-independent paths""" def append(self, item): if item.endswith('\r'): # Fix older sdists built on Windows item = item[:-1] path = convert_path(item) if self._safe_path(path): self.files.append(path) def extend(self, paths): self.files.extend(filter(self._safe_path, paths)) def _repair(self): """ Replace self.files with only safe paths Because some owners of FileList manipulate the underlying ``files`` attribute directly, this method must be called to repair those paths. """ self.files = list(filter(self._safe_path, self.files)) def _safe_path(self, path): enc_warn = "'%s' not %s encodable -- skipping" # To avoid accidental trans-codings errors, first to unicode u_path = unicode_utils.filesys_decode(path) if u_path is None: log.warn("'%s' in unexpected encoding -- skipping" % path) return False # Must ensure utf-8 encodability utf8_path = unicode_utils.try_encode(u_path, "utf-8") if utf8_path is None: log.warn(enc_warn, path, 'utf-8') return False try: # accept is either way checks out if os.path.exists(u_path) or os.path.exists(utf8_path): return True # this will catch any encode errors decoding u_path except UnicodeEncodeError: log.warn(enc_warn, path, sys.getfilesystemencoding()) class manifest_maker(sdist): template = "MANIFEST.in" def initialize_options(self): self.use_defaults = 1 self.prune = 1 self.manifest_only = 1 self.force_manifest = 1 def finalize_options(self): pass def run(self): self.filelist = FileList() if not os.path.exists(self.manifest): self.write_manifest() # it must exist so it'll get in the list self.filelist.findall() self.add_defaults() if os.path.exists(self.template): self.read_template() self.prune_file_list() self.filelist.sort() self.filelist.remove_duplicates() self.write_manifest() def _manifest_normalize(self, path): path = unicode_utils.filesys_decode(path) return path.replace(os.sep, '/') def write_manifest(self): """ Write the file list in 'self.filelist' to the manifest file named by 'self.manifest'. """ self.filelist._repair() # Now _repairs should encodability, but not unicode files = [self._manifest_normalize(f) for f in self.filelist.files] msg = "writing manifest file '%s'" % self.manifest self.execute(write_file, (self.manifest, files), msg) def warn(self, msg): # suppress missing-file warnings from sdist if not msg.startswith("standard file not found:"): sdist.warn(self, msg) def add_defaults(self): sdist.add_defaults(self) self.filelist.append(self.template) self.filelist.append(self.manifest) rcfiles = list(walk_revctrl()) if rcfiles: self.filelist.extend(rcfiles) elif os.path.exists(self.manifest): self.read_manifest() ei_cmd = self.get_finalized_command('egg_info') self._add_egg_info(cmd=ei_cmd) self.filelist.include_pattern("*", prefix=ei_cmd.egg_info) def _add_egg_info(self, cmd): """ Add paths for egg-info files for an external egg-base. The egg-info files are written to egg-base. If egg-base is outside the current working directory, this method searchs the egg-base directory for files to include in the manifest. Uses distutils.filelist.findall (which is really the version monkeypatched in by setuptools/__init__.py) to perform the search. Since findall records relative paths, prefix the returned paths with cmd.egg_base, so add_default's include_pattern call (which is looking for the absolute cmd.egg_info) will match them. """ if cmd.egg_base == os.curdir: # egg-info files were already added by something else return discovered = distutils.filelist.findall(cmd.egg_base) resolved = (os.path.join(cmd.egg_base, path) for path in discovered) self.filelist.allfiles.extend(resolved) def prune_file_list(self): build = self.get_finalized_command('build') base_dir = self.distribution.get_fullname() self.filelist.exclude_pattern(None, prefix=build.build_base) self.filelist.exclude_pattern(None, prefix=base_dir) sep = re.escape(os.sep) self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep, is_regex=1) def write_file(filename, contents): """Create a file with the specified name and write 'contents' (a sequence of strings without line terminators) to it. """ contents = "\n".join(contents) # assuming the contents has been vetted for utf-8 encoding contents = contents.encode("utf-8") with open(filename, "wb") as f: # always write POSIX-style manifest f.write(contents) def write_pkg_info(cmd, basename, filename): log.info("writing %s", filename) if not cmd.dry_run: metadata = cmd.distribution.metadata metadata.version, oldver = cmd.egg_version, metadata.version metadata.name, oldname = cmd.egg_name, metadata.name try: # write unescaped data to PKG-INFO, so older pkg_resources # can still parse it metadata.write_pkg_info(cmd.egg_info) finally: metadata.name, metadata.version = oldname, oldver safe = getattr(cmd.distribution, 'zip_safe', None) from setuptools.command import bdist_egg bdist_egg.write_safety_flag(cmd.egg_info, safe) def warn_depends_obsolete(cmd, basename, filename): if os.path.exists(filename): log.warn( "WARNING: 'depends.txt' is not used by setuptools 0.6!\n" "Use the install_requires/extras_require setup() args instead." ) def _write_requirements(stream, reqs): lines = yield_lines(reqs or ()) append_cr = lambda line: line + '\n' lines = map(append_cr, lines) stream.writelines(lines) def write_requirements(cmd, basename, filename): dist = cmd.distribution data = StringIO() _write_requirements(data, dist.install_requires) extras_require = dist.extras_require or {} for extra in sorted(extras_require): data.write('\n[{extra}]\n'.format(**vars())) _write_requirements(data, extras_require[extra]) cmd.write_or_delete_file("requirements", filename, data.getvalue()) def write_setup_requirements(cmd, basename, filename): data = StringIO() _write_requirements(data, cmd.distribution.setup_requires) cmd.write_or_delete_file("setup-requirements", filename, data.getvalue()) def write_toplevel_names(cmd, basename, filename): pkgs = dict.fromkeys( [ k.split('.', 1)[0] for k in cmd.distribution.iter_distribution_names() ] ) cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n') def overwrite_arg(cmd, basename, filename): write_arg(cmd, basename, filename, True) def write_arg(cmd, basename, filename, force=False): argname = os.path.splitext(basename)[0] value = getattr(cmd.distribution, argname, None) if value is not None: value = '\n'.join(value) + '\n' cmd.write_or_delete_file(argname, filename, value, force) def write_entries(cmd, basename, filename): ep = cmd.distribution.entry_points if isinstance(ep, basestring) or ep is None: data = ep elif ep is not None: data = [] for section, contents in sorted(ep.items()): if not isinstance(contents, basestring): contents = EntryPoint.parse_group(section, contents) contents = '\n'.join(sorted(map(str, contents.values()))) data.append('[%s]\n%s\n\n' % (section, contents)) data = ''.join(data) cmd.write_or_delete_file('entry points', filename, data, True) def get_pkg_info_revision(): # See if we can get a -r### off of PKG-INFO, in case this is an sdist of # a subversion revision # if os.path.exists('PKG-INFO'): f = open('PKG-INFO', 'rU') for line in f: match = re.match(r"Version:.*-r(\d+)\s*$", line) if match: return int(match.group(1)) f.close() return 0
apache-2.0
-9,098,054,325,669,304,000
34.108333
79
0.599454
false
jjinux/party-playlist-picker
third-party/gdata/tlslite/VerifierDB.py
359
3104
"""Class for storing SRP password verifiers.""" from utils.cryptomath import * from utils.compat import * import mathtls from BaseDB import BaseDB class VerifierDB(BaseDB): """This class represent an in-memory or on-disk database of SRP password verifiers. A VerifierDB can be passed to a server handshake to authenticate a client based on one of the verifiers. This class is thread-safe. """ def __init__(self, filename=None): """Create a new VerifierDB instance. @type filename: str @param filename: Filename for an on-disk database, or None for an in-memory database. If the filename already exists, follow this with a call to open(). To create a new on-disk database, follow this with a call to create(). """ BaseDB.__init__(self, filename, "verifier") def _getItem(self, username, valueStr): (N, g, salt, verifier) = valueStr.split(" ") N = base64ToNumber(N) g = base64ToNumber(g) salt = base64ToString(salt) verifier = base64ToNumber(verifier) return (N, g, salt, verifier) def __setitem__(self, username, verifierEntry): """Add a verifier entry to the database. @type username: str @param username: The username to associate the verifier with. Must be less than 256 characters in length. Must not already be in the database. @type verifierEntry: tuple @param verifierEntry: The verifier entry to add. Use L{tlslite.VerifierDB.VerifierDB.makeVerifier} to create a verifier entry. """ BaseDB.__setitem__(self, username, verifierEntry) def _setItem(self, username, value): if len(username)>=256: raise ValueError("username too long") N, g, salt, verifier = value N = numberToBase64(N) g = numberToBase64(g) salt = stringToBase64(salt) verifier = numberToBase64(verifier) valueStr = " ".join( (N, g, salt, verifier) ) return valueStr def _checkItem(self, value, username, param): (N, g, salt, verifier) = value x = mathtls.makeX(salt, username, param) v = powMod(g, x, N) return (verifier == v) def makeVerifier(username, password, bits): """Create a verifier entry which can be stored in a VerifierDB. @type username: str @param username: The username for this verifier. Must be less than 256 characters in length. @type password: str @param password: The password for this verifier. @type bits: int @param bits: This values specifies which SRP group parameters to use. It must be one of (1024, 1536, 2048, 3072, 4096, 6144, 8192). Larger values are more secure but slower. 2048 is a good compromise between safety and speed. @rtype: tuple @return: A tuple which may be stored in a VerifierDB. """ return mathtls.makeVerifier(username, password, bits) makeVerifier = staticmethod(makeVerifier)
apache-2.0
1,424,599,307,344,914,700
33.5
71
0.632088
false
evgchz/scikit-learn
sklearn/ensemble/gradient_boosting.py
6
63474
"""Gradient Boosted Regression Trees This module contains methods for fitting gradient boosted regression trees for both classification and regression. The module structure is the following: - The ``BaseGradientBoosting`` base class implements a common ``fit`` method for all the estimators in the module. Regression and classification only differ in the concrete ``LossFunction`` used. - ``GradientBoostingClassifier`` implements gradient boosting for classification problems. - ``GradientBoostingRegressor`` implements gradient boosting for regression problems. """ # Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti, # Arnaud Joly # License: BSD 3 clause from __future__ import print_function from __future__ import division from abc import ABCMeta, abstractmethod from warnings import warn from time import time import numbers import numpy as np from scipy import stats from .base import BaseEnsemble from ..base import BaseEstimator from ..base import ClassifierMixin from ..base import RegressorMixin from ..utils import check_random_state, check_array, check_X_y, column_or_1d from ..utils.extmath import logsumexp from ..utils.stats import _weighted_percentile from ..externals import six from ..feature_selection.from_model import _LearntSelectorMixin from ..tree.tree import DecisionTreeRegressor from ..tree._tree import DTYPE, TREE_LEAF from ..tree._tree import PresortBestSplitter from ..tree._tree import FriedmanMSE from ._gradient_boosting import predict_stages from ._gradient_boosting import predict_stage from ._gradient_boosting import _random_sample_mask class QuantileEstimator(BaseEstimator): """An estimator predicting the alpha-quantile of the training targets.""" def __init__(self, alpha=0.9): if not 0 < alpha < 1.0: raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha) self.alpha = alpha def fit(self, X, y, sample_weight=None): if sample_weight is None: self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0) else: self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0) def predict(self, X): y = np.empty((X.shape[0], 1), dtype=np.float64) y.fill(self.quantile) return y class MeanEstimator(BaseEstimator): """An estimator predicting the mean of the training targets.""" def fit(self, X, y, sample_weight=None): if sample_weight is None: self.mean = np.mean(y) else: self.mean = np.average(y, weights=sample_weight) def predict(self, X): y = np.empty((X.shape[0], 1), dtype=np.float64) y.fill(self.mean) return y class LogOddsEstimator(BaseEstimator): """An estimator predicting the log odds ratio.""" scale = 1.0 def fit(self, X, y, sample_weight=None): # pre-cond: pos, neg are encoded as 1, 0 if sample_weight is None: pos = np.sum(y) neg = y.shape[0] - pos else: pos = np.sum(sample_weight * y) neg = np.sum(sample_weight * (1 - y)) if neg == 0 or pos == 0: raise ValueError('y contains non binary labels.') self.prior = self.scale * np.log(pos / neg) def predict(self, X): y = np.empty((X.shape[0], 1), dtype=np.float64) y.fill(self.prior) return y class ScaledLogOddsEstimator(LogOddsEstimator): """Log odds ratio scaled by 0.5 -- for exponential loss. """ scale = 0.5 class PriorProbabilityEstimator(BaseEstimator): """An estimator predicting the probability of each class in the training data. """ def fit(self, X, y, sample_weight=None): if sample_weight is None: sample_weight = np.ones_like(y, dtype=np.float) class_counts = np.bincount(y, weights=sample_weight) self.priors = class_counts / class_counts.sum() def predict(self, X): y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64) y[:] = self.priors return y class ZeroEstimator(BaseEstimator): """An estimator that simply predicts zero. """ def fit(self, X, y, sample_weight=None): if np.issubdtype(y.dtype, int): # classification self.n_classes = np.unique(y).shape[0] if self.n_classes == 2: self.n_classes = 1 else: # regression self.n_classes = 1 def predict(self, X): y = np.empty((X.shape[0], self.n_classes), dtype=np.float64) y.fill(0.0) return y class LossFunction(six.with_metaclass(ABCMeta, object)): """Abstract base class for various loss functions. Attributes ---------- K : int The number of regression trees to be induced; 1 for regression and binary classification; ``n_classes`` for multi-class classification. """ is_multi_class = False def __init__(self, n_classes): self.K = n_classes def init_estimator(self): """Default ``init`` estimator for loss function. """ raise NotImplementedError() @abstractmethod def __call__(self, y, pred, sample_weight=None): """Compute the loss of prediction ``pred`` and ``y``. """ @abstractmethod def negative_gradient(self, y, y_pred, **kargs): """Compute the negative gradient. Parameters --------- y : np.ndarray, shape=(n,) The target labels. y_pred : np.ndarray, shape=(n,): The predictions. """ def update_terminal_regions(self, tree, X, y, residual, y_pred, sample_weight, sample_mask, learning_rate=1.0, k=0): """Update the terminal regions (=leaves) of the given tree and updates the current predictions of the model. Traverses tree and invokes template method `_update_terminal_region`. Parameters ---------- tree : tree.Tree The tree object. X : np.ndarray, shape=(n, m) The data array. y : np.ndarray, shape=(n,) The target labels. residual : np.ndarray, shape=(n,) The residuals (usually the negative gradient). y_pred : np.ndarray, shape=(n,): The predictions. sample_weight np.ndarray, shape=(n,): The weight of each sample. """ # compute leaf for each sample in ``X``. terminal_regions = tree.apply(X) # mask all which are not in sample mask. masked_terminal_regions = terminal_regions.copy() masked_terminal_regions[~sample_mask] = -1 # update each leaf (= perform line search) for leaf in np.where(tree.children_left == TREE_LEAF)[0]: self._update_terminal_region(tree, masked_terminal_regions, leaf, X, y, residual, y_pred[:, k], sample_weight) # update predictions (both in-bag and out-of-bag) y_pred[:, k] += (learning_rate * tree.value[:, 0, 0].take(terminal_regions, axis=0)) @abstractmethod def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): """Template method for updating terminal regions (=leaves). """ class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)): """Base class for regression loss functions. """ def __init__(self, n_classes): if n_classes != 1: raise ValueError("``n_classes`` must be 1 for regression but " "was %r" % n_classes) super(RegressionLossFunction, self).__init__(n_classes) class LeastSquaresError(RegressionLossFunction): """Loss function for least squares (LS) estimation. Terminal regions need not to be updated for least squares. """ def init_estimator(self): return MeanEstimator() def __call__(self, y, pred, sample_weight=None): if sample_weight is None: return np.mean((y - pred.ravel()) ** 2.0) else: return (1.0 / sample_weight.sum()) * \ np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)) def negative_gradient(self, y, pred, **kargs): return y - pred.ravel() def update_terminal_regions(self, tree, X, y, residual, y_pred, sample_weight, sample_mask, learning_rate=1.0, k=0): """Least squares does not need to update terminal regions. But it has to update the predictions. """ # update predictions y_pred[:, k] += learning_rate * tree.predict(X).ravel() def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): pass class LeastAbsoluteError(RegressionLossFunction): """Loss function for least absolute deviation (LAD) regression. """ def init_estimator(self): return QuantileEstimator(alpha=0.5) def __call__(self, y, pred, sample_weight=None): if sample_weight is None: return np.abs(y - pred.ravel()).mean() else: return (1.0 / sample_weight.sum()) * \ np.sum(sample_weight * np.abs(y - pred.ravel())) def negative_gradient(self, y, pred, **kargs): """1.0 if y - pred > 0.0 else -1.0""" pred = pred.ravel() return 2.0 * (y - pred > 0.0) - 1.0 def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): """LAD updates terminal regions to median estimates. """ terminal_region = np.where(terminal_regions == leaf)[0] sample_weight = sample_weight.take(terminal_region, axis=0) diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0) tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50) class HuberLossFunction(RegressionLossFunction): """Huber loss function for robust regression. M-Regression proposed in Friedman 2001. See --- J. Friedman, Greedy Function Approximation: A Gradient Boosting Machine, The Annals of Statistics, Vol. 29, No. 5, 2001. """ def __init__(self, n_classes, alpha=0.9): super(HuberLossFunction, self).__init__(n_classes) self.alpha = alpha self.gamma = None def init_estimator(self): return QuantileEstimator(alpha=0.5) def __call__(self, y, pred, sample_weight=None): pred = pred.ravel() diff = y - pred gamma = self.gamma if gamma is None: if sample_weight is None: gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100) else: gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100) gamma_mask = np.abs(diff) <= gamma if sample_weight is None: sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0) lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0)) loss = (sq_loss + lin_loss) / y.shape[0] else: sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0) lin_loss = np.sum(gamma * sample_weight[~gamma_mask] * (np.abs(diff[~gamma_mask]) - gamma / 2.0)) loss = (sq_loss + lin_loss) / sample_weight.sum() return loss def negative_gradient(self, y, pred, sample_weight=None, **kargs): pred = pred.ravel() diff = y - pred if sample_weight is None: gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100) else: gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100) gamma_mask = np.abs(diff) <= gamma residual = np.zeros((y.shape[0],), dtype=np.float64) residual[gamma_mask] = diff[gamma_mask] residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask]) self.gamma = gamma return residual def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): terminal_region = np.where(terminal_regions == leaf)[0] sample_weight = sample_weight.take(terminal_region, axis=0) gamma = self.gamma diff = (y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)) median = _weighted_percentile(diff, sample_weight, percentile=50) diff_minus_median = diff - median tree.value[leaf, 0] = median + np.mean( np.sign(diff_minus_median) * np.minimum(np.abs(diff_minus_median), gamma)) class QuantileLossFunction(RegressionLossFunction): """Loss function for quantile regression. Quantile regression allows to estimate the percentiles of the conditional distribution of the target. """ def __init__(self, n_classes, alpha=0.9): super(QuantileLossFunction, self).__init__(n_classes) assert 0 < alpha < 1.0 self.alpha = alpha self.percentile = alpha * 100.0 def init_estimator(self): return QuantileEstimator(self.alpha) def __call__(self, y, pred, sample_weight=None): pred = pred.ravel() diff = y - pred alpha = self.alpha mask = y > pred if sample_weight is None: loss = (alpha * diff[mask].sum() + (1.0 - alpha) * diff[~mask].sum()) / y.shape[0] else: loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) + (1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) / sample_weight.sum()) return loss def negative_gradient(self, y, pred, **kargs): alpha = self.alpha pred = pred.ravel() mask = y > pred return (alpha * mask) - ((1.0 - alpha) * ~mask) def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): terminal_region = np.where(terminal_regions == leaf)[0] diff = (y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)) sample_weight = sample_weight.take(terminal_region, axis=0) val = _weighted_percentile(diff, sample_weight, self.percentile) tree.value[leaf, 0] = val class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)): """Base class for classification loss functions. """ def _score_to_proba(self, score): """Template method to convert scores to probabilities. If the loss does not support probabilites raises AttributeError. """ raise TypeError('%s does not support predict_proba' % type(self).__name__) @abstractmethod def _score_to_decision(self, score): """Template method to convert scores to decisions. Returns int arrays. """ class BinomialDeviance(ClassificationLossFunction): """Binomial deviance loss function for binary classification. Binary classification is a special case; here, we only need to fit one tree instead of ``n_classes`` trees. """ def __init__(self, n_classes): if n_classes != 2: raise ValueError("{0:s} requires 2 classes.".format( self.__class__.__name__)) # we only need to fit one tree for binary clf. super(BinomialDeviance, self).__init__(1) def init_estimator(self): return LogOddsEstimator() def __call__(self, y, pred, sample_weight=None): """Compute the deviance (= 2 * negative log-likelihood). """ # logaddexp(0, v) == log(1.0 + exp(v)) pred = pred.ravel() if sample_weight is None: return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred)) else: return (-2.0 / sample_weight.sum() * np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred)))) def negative_gradient(self, y, pred, **kargs): """Compute the residual (= negative gradient). """ return y - 1.0 / (1.0 + np.exp(-pred.ravel())) def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): """Make a single Newton-Raphson step. our node estimate is given by: sum(w * (y - prob)) / sum(w * prob * (1 - prob)) we take advantage that: y - prob = residual """ terminal_region = np.where(terminal_regions == leaf)[0] residual = residual.take(terminal_region, axis=0) y = y.take(terminal_region, axis=0) sample_weight = sample_weight.take(terminal_region, axis=0) numerator = np.sum(sample_weight * residual) denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual)) if denominator == 0.0: tree.value[leaf, 0, 0] = 0.0 else: tree.value[leaf, 0, 0] = numerator / denominator def _score_to_proba(self, score): proba = np.ones((score.shape[0], 2), dtype=np.float64) proba[:, 1] = 1.0 / (1.0 + np.exp(-score.ravel())) proba[:, 0] -= proba[:, 1] return proba def _score_to_decision(self, score): proba = self._score_to_proba(score) return np.argmax(proba, axis=1) class MultinomialDeviance(ClassificationLossFunction): """Multinomial deviance loss function for multi-class classification. For multi-class classification we need to fit ``n_classes`` trees at each stage. """ is_multi_class = True def __init__(self, n_classes): if n_classes < 3: raise ValueError("{0:s} requires more than 2 classes.".format( self.__class__.__name__)) super(MultinomialDeviance, self).__init__(n_classes) def init_estimator(self): return PriorProbabilityEstimator() def __call__(self, y, pred, sample_weight=None): # create one-hot label encoding Y = np.zeros((y.shape[0], self.K), dtype=np.float64) for k in range(self.K): Y[:, k] = y == k if sample_weight is None: return np.sum(-1 * (Y * pred).sum(axis=1) + logsumexp(pred, axis=1)) else: return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) + logsumexp(pred, axis=1)) def negative_gradient(self, y, pred, k=0, **kwargs): """Compute negative gradient for the ``k``-th class. """ return y - np.nan_to_num(np.exp(pred[:, k] - logsumexp(pred, axis=1))) def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): """Make a single Newton-Raphson step. """ terminal_region = np.where(terminal_regions == leaf)[0] residual = residual.take(terminal_region, axis=0) y = y.take(terminal_region, axis=0) sample_weight = sample_weight.take(terminal_region, axis=0) numerator = np.sum(sample_weight * residual) numerator *= (self.K - 1) / self.K denominator = np.sum(sample_weight * (y - residual) * (1.0 - y + residual)) if denominator == 0.0: tree.value[leaf, 0, 0] = 0.0 else: tree.value[leaf, 0, 0] = numerator / denominator def _score_to_proba(self, score): return np.nan_to_num( np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis]))) def _score_to_decision(self, score): proba = self._score_to_proba(score) return np.argmax(proba, axis=1) class ExponentialLoss(ClassificationLossFunction): """Exponential loss function for binary classification. Same loss as AdaBoost. See --- Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007 """ def __init__(self, n_classes): if n_classes != 2: raise ValueError("{0:s} requires 2 classes.".format( self.__class__.__name__)) # we only need to fit one tree for binary clf. super(ExponentialLoss, self).__init__(1) def init_estimator(self): return ScaledLogOddsEstimator() def __call__(self, y, pred, sample_weight=None): pred = pred.ravel() if sample_weight is None: return np.mean(np.exp(-(2. * y - 1.) * pred)) else: return (1.0 / sample_weight.sum()) * \ np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)) def negative_gradient(self, y, pred, **kargs): y_ = -(2. * y - 1.) return y_ * np.exp(y_ * pred.ravel()) def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): terminal_region = np.where(terminal_regions == leaf)[0] pred = pred.take(terminal_region, axis=0) y = y.take(terminal_region, axis=0) sample_weight = sample_weight.take(terminal_region, axis=0) y_ = 2. * y - 1. numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred)) denominator = np.sum(sample_weight * np.exp(-y_ * pred)) if denominator == 0.0: tree.value[leaf, 0, 0] = 0.0 else: tree.value[leaf, 0, 0] = numerator / denominator def _score_to_proba(self, score): proba = np.ones((score.shape[0], 2), dtype=np.float64) proba[:, 1] = 1.0 / (1.0 + np.exp(-2.0 * score.ravel())) proba[:, 0] -= proba[:, 1] return proba def _score_to_decision(self, score): return (score.ravel() >= 0.0).astype(np.int) LOSS_FUNCTIONS = {'ls': LeastSquaresError, 'lad': LeastAbsoluteError, 'huber': HuberLossFunction, 'quantile': QuantileLossFunction, 'deviance': None, # for both, multinomial and binomial 'exponential': ExponentialLoss, } INIT_ESTIMATORS = {'zero': ZeroEstimator} class VerboseReporter(object): """Reports verbose output to stdout. If ``verbose==1`` output is printed once in a while (when iteration mod verbose_mod is zero).; if larger than 1 then output is printed for each update. """ def __init__(self, verbose): self.verbose = verbose def init(self, est, begin_at_stage=0): # header fields and line format str header_fields = ['Iter', 'Train Loss'] verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}'] # do oob? if est.subsample < 1: header_fields.append('OOB Improve') verbose_fmt.append('{oob_impr:>16.4f}') header_fields.append('Remaining Time') verbose_fmt.append('{remaining_time:>16s}') # print the header line print(('%10s ' + '%16s ' * (len(header_fields) - 1)) % tuple(header_fields)) self.verbose_fmt = ' '.join(verbose_fmt) # plot verbose info each time i % verbose_mod == 0 self.verbose_mod = 1 self.start_time = time() self.begin_at_stage = begin_at_stage def update(self, j, est): """Update reporter with new iteration. """ do_oob = est.subsample < 1 # we need to take into account if we fit additional estimators. i = j - self.begin_at_stage # iteration relative to the start iter if (i + 1) % self.verbose_mod == 0: oob_impr = est.oob_improvement_[j] if do_oob else 0 remaining_time = ((est.n_estimators - (j + 1)) * (time() - self.start_time) / float(i + 1)) if remaining_time > 60: remaining_time = '{0:.2f}m'.format(remaining_time / 60.0) else: remaining_time = '{0:.2f}s'.format(remaining_time) print(self.verbose_fmt.format(iter=j + 1, train_score=est.train_score_[j], oob_impr=oob_impr, remaining_time=remaining_time)) if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0): # adjust verbose frequency (powers of 10) self.verbose_mod *= 10 class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble, _LearntSelectorMixin)): """Abstract base class for Gradient Boosting. """ @abstractmethod def __init__(self, loss, learning_rate, n_estimators, min_samples_split, min_samples_leaf, min_weight_fraction_leaf, max_depth, init, subsample, max_features, random_state, alpha=0.9, verbose=0, max_leaf_nodes=None, warm_start=False): self.n_estimators = n_estimators self.learning_rate = learning_rate self.loss = loss self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.min_weight_fraction_leaf = min_weight_fraction_leaf self.subsample = subsample self.max_features = max_features self.max_depth = max_depth self.init = init self.random_state = random_state self.alpha = alpha self.verbose = verbose self.max_leaf_nodes = max_leaf_nodes self.warm_start = warm_start self.estimators_ = np.empty((0, 0), dtype=np.object) def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask, criterion, splitter, random_state): """Fit another stage of ``n_classes_`` trees to the boosting model. """ assert sample_mask.dtype == np.bool loss = self.loss_ original_y = y for k in range(loss.K): if loss.is_multi_class: y = np.array(original_y == k, dtype=np.float64) residual = loss.negative_gradient(y, y_pred, k=k, sample_weight=sample_weight) # induce regression tree on residuals tree = DecisionTreeRegressor( criterion=criterion, splitter=splitter, max_depth=self.max_depth, min_samples_split=self.min_samples_split, min_samples_leaf=self.min_samples_leaf, min_weight_fraction_leaf=self.min_weight_fraction_leaf, max_features=self.max_features, max_leaf_nodes=self.max_leaf_nodes, random_state=random_state) if self.subsample < 1.0: # no inplace multiplication! sample_weight = sample_weight * sample_mask.astype(np.float64) tree.fit(X, residual, sample_weight=sample_weight, check_input=False) # update tree leaves loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred, sample_weight, sample_mask, self.learning_rate, k=k) # add tree to ensemble self.estimators_[i, k] = tree return y_pred def _check_params(self): """Check validity of parameters and raise ValueError if not valid. """ if self.n_estimators <= 0: raise ValueError("n_estimators must be greater than 0 but " "was %r" % self.n_estimators) if self.learning_rate <= 0.0: raise ValueError("learning_rate must be greater than 0 but " "was %r" % self.learning_rate) if (self.loss not in self._SUPPORTED_LOSS or self.loss not in LOSS_FUNCTIONS): raise ValueError("Loss '{0:s}' not supported. ".format(self.loss)) if self.loss == 'deviance': loss_class = (MultinomialDeviance if len(self.classes_) > 2 else BinomialDeviance) else: loss_class = LOSS_FUNCTIONS[self.loss] if self.loss in ('huber', 'quantile'): self.loss_ = loss_class(self.n_classes_, self.alpha) else: self.loss_ = loss_class(self.n_classes_) if not (0.0 < self.subsample <= 1.0): raise ValueError("subsample must be in (0,1] but " "was %r" % self.subsample) if self.init is not None: if isinstance(self.init, six.string_types): if self.init not in INIT_ESTIMATORS: raise ValueError('init="%s" is not supported' % self.init) else: if (not hasattr(self.init, 'fit') or not hasattr(self.init, 'predict')): raise ValueError("init=%r must be valid BaseEstimator " "and support both fit and " "predict" % self.init) if not (0.0 < self.alpha < 1.0): raise ValueError("alpha must be in (0.0, 1.0) but " "was %r" % self.alpha) if isinstance(self.max_features, six.string_types): if self.max_features == "auto": # if is_classification if self.n_classes_ > 1: max_features = max(1, int(np.sqrt(self.n_features))) else: # is regression max_features = self.n_features elif self.max_features == "sqrt": max_features = max(1, int(np.sqrt(self.n_features))) elif self.max_features == "log2": max_features = max(1, int(np.log2(self.n_features))) else: raise ValueError("Invalid value for max_features: %r. " "Allowed string values are 'auto', 'sqrt' " "or 'log2'." % self.max_features) elif self.max_features is None: max_features = self.n_features elif isinstance(self.max_features, (numbers.Integral, np.integer)): max_features = self.max_features else: # float max_features = int(self.max_features * self.n_features) self.max_features_ = max_features def _init_state(self): """Initialize model state and allocate model state data structures. """ if self.init is None: self.init_ = self.loss_.init_estimator() elif isinstance(self.init, six.string_types): self.init_ = INIT_ESTIMATORS[self.init]() else: self.init_ = self.init self.estimators_ = np.empty((self.n_estimators, self.loss_.K), dtype=np.object) self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64) # do oob? if self.subsample < 1.0: self.oob_improvement_ = np.zeros((self.n_estimators), dtype=np.float64) def _clear_state(self): """Clear the state of the gradient boosting model. """ if hasattr(self, 'estimators_'): self.estimators_ = np.empty((0, 0), dtype=np.object) if hasattr(self, 'train_score_'): del self.train_score_ if hasattr(self, 'oob_improvement_'): del self.oob_improvement_ if hasattr(self, 'init_'): del self.init_ def _resize_state(self): """Add additional ``n_estimators`` entries to all attributes. """ # self.n_estimators is the number of additional est to fit total_n_estimators = self.n_estimators if total_n_estimators < self.estimators_.shape[0]: raise ValueError('resize with smaller n_estimators %d < %d' % (total_n_estimators, self.estimators_[0])) self.estimators_.resize((total_n_estimators, self.loss_.K)) self.train_score_.resize(total_n_estimators) if (self.subsample < 1 or hasattr(self, 'oob_improvement_')): # if do oob resize arrays or create new if not available if hasattr(self, 'oob_improvement_'): self.oob_improvement_.resize(total_n_estimators) else: self.oob_improvement_ = np.zeros((total_n_estimators,), dtype=np.float64) def _is_initialized(self): return len(getattr(self, 'estimators_', [])) > 0 def fit(self, X, y, sample_weight=None, monitor=None): """Fit the gradient boosting model. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (integers in classification, real numbers in regression) For classification, labels must correspond to classes ``0, 1, ..., n_classes_-1`` sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. In the case of classification, splits are also ignored if they would result in any single class carrying a negative weight in either child node. monitor : callable, optional The monitor is called after each iteration with the current iteration, a reference to the estimator and the local variables of ``_fit_stages`` as keyword arguments ``callable(i, self, locals())``. If the callable returns ``True`` the fitting procedure is stopped. The monitor can be used for various things such as computing held-out estimates, early stopping, model introspect, and snapshoting. Returns ------- self : object Returns self. """ # if not warmstart - clear the estimator state if not self.warm_start: self._clear_state() # Check input X, y = check_X_y(X, y, dtype=DTYPE) n_samples, n_features = X.shape if sample_weight is None: sample_weight = np.ones(n_samples, dtype=np.float32) else: sample_weight = column_or_1d(sample_weight, warn=True) if y.shape[0] != n_samples: raise ValueError('Shape mismatch of X and y: %d != %d' % (n_samples, y.shape[0])) if n_samples != sample_weight.shape[0]: raise ValueError('Shape mismatch of sample_weight: %d != %d' % (sample_weight.shape[0], n_samples)) self.n_features = n_features random_state = check_random_state(self.random_state) self._check_params() if not self._is_initialized(): # init state self._init_state() # fit initial model - FIXME make sample_weight optional self.init_.fit(X, y, sample_weight) # init predictions y_pred = self.init_.predict(X) begin_at_stage = 0 else: # add more estimators to fitted model # invariant: warm_start = True if self.n_estimators < self.estimators_.shape[0]: raise ValueError('n_estimators=%d must be larger or equal to ' 'estimators_.shape[0]=%d when ' 'warm_start==True' % (self.n_estimators, self.estimators_.shape[0])) begin_at_stage = self.estimators_.shape[0] y_pred = self._decision_function(X) self._resize_state() # fit the boosting stages n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state, begin_at_stage, monitor) # change shape of arrays after fit (early-stopping or additional ests) if n_stages != self.estimators_.shape[0]: self.estimators_ = self.estimators_[:n_stages] self.train_score_ = self.train_score_[:n_stages] if hasattr(self, 'oob_improvement_'): self.oob_improvement_ = self.oob_improvement_[:n_stages] return self def _fit_stages(self, X, y, y_pred, sample_weight, random_state, begin_at_stage=0, monitor=None): """Iteratively fits the stages. For each stage it computes the progress (OOB, train score) and delegates to ``_fit_stage``. Returns the number of stages fit; might differ from ``n_estimators`` due to early stopping. """ n_samples = X.shape[0] do_oob = self.subsample < 1.0 sample_mask = np.ones((n_samples, ), dtype=np.bool) n_inbag = max(1, int(self.subsample * n_samples)) loss_ = self.loss_ # init criterion and splitter criterion = FriedmanMSE(1) splitter = PresortBestSplitter(criterion, self.max_features_, self.min_samples_leaf, self.min_weight_fraction_leaf, random_state) if self.verbose: verbose_reporter = VerboseReporter(self.verbose) verbose_reporter.init(self, begin_at_stage) # perform boosting iterations i = begin_at_stage for i in range(begin_at_stage, self.n_estimators): # subsampling if do_oob: sample_mask = _random_sample_mask(n_samples, n_inbag, random_state) # OOB score before adding this stage old_oob_score = loss_(y[~sample_mask], y_pred[~sample_mask], sample_weight[~sample_mask]) # fit next stage of trees y_pred = self._fit_stage(i, X, y, y_pred, sample_weight, sample_mask, criterion, splitter, random_state) # track deviance (= loss) if do_oob: self.train_score_[i] = loss_(y[sample_mask], y_pred[sample_mask], sample_weight[sample_mask]) self.oob_improvement_[i] = (old_oob_score - loss_(y[~sample_mask], y_pred[~sample_mask], sample_weight[~sample_mask])) else: # no need to fancy index w/ no subsampling self.train_score_[i] = loss_(y, y_pred, sample_weight) if self.verbose > 0: verbose_reporter.update(i, self) if monitor is not None: early_stopping = monitor(i, self, locals()) if early_stopping: break return i + 1 def _make_estimator(self, append=True): # we don't need _make_estimator raise NotImplementedError() def _init_decision_function(self, X): """Check input and compute prediction of ``init``. """ if self.estimators_ is None or len(self.estimators_) == 0: raise ValueError("Estimator not fitted, call `fit` " "before making predictions`.") if X.shape[1] != self.n_features: raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format( self.n_features, X.shape[1])) score = self.init_.predict(X).astype(np.float64) return score def _decision_function(self, X): # for use in inner loop, not raveling the output in single-class case, # not doing input validation. score = self._init_decision_function(X) predict_stages(self.estimators_, X, self.learning_rate, score) return score def decision_function(self, X): """Compute the decision function of ``X``. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- score : array, shape = [n_samples, n_classes] or [n_samples] The decision function of the input samples. The order of the classes corresponds to that in the attribute `classes_`. Regression and binary classification produce an array of shape [n_samples]. """ X = check_array(X, dtype=DTYPE, order="C") score = self._decision_function(X) if score.shape[1] == 1: return score.ravel() return score def staged_decision_function(self, X): """Compute decision function of ``X`` for each iteration. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- score : generator of array, shape = [n_samples, k] The decision function of the input samples. The order of the classes corresponds to that in the attribute `classes_`. Regression and binary classification are special cases with ``k == 1``, otherwise ``k==n_classes``. """ X = check_array(X, dtype=DTYPE, order="C") score = self._init_decision_function(X) for i in range(self.estimators_.shape[0]): predict_stage(self.estimators_, i, X, self.learning_rate, score) yield score @property def feature_importances_(self): """Return the feature importances (the higher, the more important the feature). Returns ------- feature_importances_ : array, shape = [n_features] """ if self.estimators_ is None or len(self.estimators_) == 0: raise ValueError("Estimator not fitted, " "call `fit` before `feature_importances_`.") total_sum = np.zeros((self.n_features, ), dtype=np.float64) for stage in self.estimators_: stage_sum = sum(tree.feature_importances_ for tree in stage) / len(stage) total_sum += stage_sum importances = total_sum / len(self.estimators_) return importances class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin): """Gradient Boosting for classification. GB builds an additive model in a forward stage-wise fashion; it allows for the optimization of arbitrary differentiable loss functions. In each stage ``n_classes_`` regression trees are fit on the negative gradient of the binomial or multinomial deviance loss function. Binary classification is a special case where only a single regression tree is induced. Parameters ---------- loss : {'deviance', 'exponential'}, optional (default='deviance') loss function to be optimized. 'deviance' refers to deviance (= logistic regression) for classification with probabilistic outputs. For loss 'exponential' gradient boosting recoveres the AdaBoost algorithm. learning_rate : float, optional (default=0.1) learning rate shrinks the contribution of each tree by `learning_rate`. There is a trade-off between learning_rate and n_estimators. n_estimators : int (default=100) The number of boosting stages to perform. Gradient boosting is fairly robust to over-fitting so a large number usually results in better performance. max_depth : integer, optional (default=3) maximum depth of the individual regression estimators. The maximum depth limits the number of nodes in the tree. Tune this parameter for best performance; the best value depends on the interaction of the input variables. Ignored if ``max_samples_leaf`` is not None. min_samples_split : integer, optional (default=2) The minimum number of samples required to split an internal node. min_samples_leaf : integer, optional (default=1) The minimum number of samples required to be at a leaf node. min_weight_fraction_leaf : float, optional (default=0.) The minimum weighted fraction of the input samples required to be at a leaf node. subsample : float, optional (default=1.0) The fraction of samples to be used for fitting the individual base learners. If smaller than 1.0 this results in Stochastic Gradient Boosting. `subsample` interacts with the parameter `n_estimators`. Choosing `subsample < 1.0` leads to a reduction of variance and an increase in bias. max_features : int, float, string or None, optional (default=None) The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a percentage and `int(max_features * n_features)` features are considered at each split. - If "auto", then `max_features=sqrt(n_features)`. - If "sqrt", then `max_features=sqrt(n_features)`. - If "log2", then `max_features=log2(n_features)`. - If None, then `max_features=n_features`. Choosing `max_features < n_features` leads to a reduction of variance and an increase in bias. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. max_leaf_nodes : int or None, optional (default=None) Grow trees with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. If not None then ``max_depth`` will be ignored. init : BaseEstimator, None, optional (default=None) An estimator object that is used to compute the initial predictions. ``init`` has to provide ``fit`` and ``predict``. If None it uses ``loss.init_estimator``. verbose : int, default: 0 Enable verbose output. If 1 then it prints progress and performance once in a while (the more trees the lower the frequency). If greater than 1 then it prints progress and performance for every tree. warm_start : bool, default: False When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just erase the previous solution. Attributes ---------- feature_importances_ : array, shape = [n_features] The feature importances (the higher, the more important the feature). oob_improvement_ : array, shape = [n_estimators] The improvement in loss (= deviance) on the out-of-bag samples relative to the previous iteration. ``oob_improvement_[0]`` is the improvement in loss of the first stage over the ``init`` estimator. train_score_ : array, shape = [n_estimators] The i-th score ``train_score_[i]`` is the deviance (= loss) of the model at iteration ``i`` on the in-bag sample. If ``subsample == 1`` this is the deviance on the training data. loss_ : LossFunction The concrete ``LossFunction`` object. `init` : BaseEstimator The estimator that provides the initial predictions. Set via the ``init`` argument or ``loss.init_estimator``. estimators_ : list of DecisionTreeRegressor The collection of fitted sub-estimators. See also -------- sklearn.tree.DecisionTreeClassifier, RandomForestClassifier AdaBoostClassifier References ---------- J. Friedman, Greedy Function Approximation: A Gradient Boosting Machine, The Annals of Statistics, Vol. 29, No. 5, 2001. J. Friedman, Stochastic Gradient Boosting, 1999 T. Hastie, R. Tibshirani and J. Friedman. Elements of Statistical Learning Ed. 2, Springer, 2009. """ _SUPPORTED_LOSS = ('deviance', 'exponential') def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100, subsample=1.0, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_depth=3, init=None, random_state=None, max_features=None, verbose=0, max_leaf_nodes=None, warm_start=False): super(GradientBoostingClassifier, self).__init__( loss, learning_rate, n_estimators, min_samples_split, min_samples_leaf, min_weight_fraction_leaf, max_depth, init, subsample, max_features, random_state, verbose=verbose, max_leaf_nodes=max_leaf_nodes, warm_start=warm_start) def fit(self, X, y, sample_weight=None, monitor=None): """Fit the gradient boosting model. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (integers in classification, real numbers in regression) For classification, labels must correspond to classes ``0, 1, ..., n_classes_-1``. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. In the case of classification, splits are also ignored if they would result in any single class carrying a negative weight in either child node. monitor : callable, optional The monitor is called after each iteration with the current iteration, a reference to the estimator and the local variables of ``_fit_stages`` as keyword arguments ``callable(i, self, locals())``. If the callable returns ``True`` the fitting procedure is stopped. The monitor can be used for various things such as computing held-out estimates, early stopping, model introspect, and snapshoting. Returns ------- self : object Returns self. """ y = column_or_1d(y, warn=True) self.classes_, y = np.unique(y, return_inverse=True) self.n_classes_ = len(self.classes_) return super(GradientBoostingClassifier, self).fit(X, y, sample_weight, monitor) def predict_proba(self, X): """Predict class probabilities for X. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Raises ------ AttributeError If the ``loss`` does not support probabilities. Returns ------- p : array of shape = [n_samples] The class probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`. """ score = self.decision_function(X) try: return self.loss_._score_to_proba(score) except AttributeError: raise AttributeError('loss=%r does not support predict_proba' % self.loss) def staged_predict_proba(self, X): """Predict class probabilities at each stage for X. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- y : array of shape = [n_samples] The predicted value of the input samples. """ try: for score in self.staged_decision_function(X): yield self.loss_._score_to_proba(score) except AttributeError: raise AttributeError('loss=%r does not support predict_proba' % self.loss) def predict(self, X): """Predict class for X. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- y : array of shape = [n_samples] The predicted classes. """ score = self.decision_function(X) decisions = self.loss_._score_to_decision(score) return self.classes_.take(decisions, axis=0) def staged_predict(self, X): """Predict classes at each stage for X. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- y : array of shape = [n_samples] The predicted value of the input samples. """ for score in self.staged_decision_function(X): decisions = self.loss_._score_to_decision(score) yield self.classes_.take(decisions, axis=0) class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin): """Gradient Boosting for regression. GB builds an additive model in a forward stage-wise fashion; it allows for the optimization of arbitrary differentiable loss functions. In each stage a regression tree is fit on the negative gradient of the given loss function. Parameters ---------- loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls') loss function to be optimized. 'ls' refers to least squares regression. 'lad' (least absolute deviation) is a highly robust loss function solely based on order information of the input variables. 'huber' is a combination of the two. 'quantile' allows quantile regression (use `alpha` to specify the quantile). learning_rate : float, optional (default=0.1) learning rate shrinks the contribution of each tree by `learning_rate`. There is a trade-off between learning_rate and n_estimators. n_estimators : int (default=100) The number of boosting stages to perform. Gradient boosting is fairly robust to over-fitting so a large number usually results in better performance. max_depth : integer, optional (default=3) maximum depth of the individual regression estimators. The maximum depth limits the number of nodes in the tree. Tune this parameter for best performance; the best value depends on the interaction of the input variables. min_samples_split : integer, optional (default=2) The minimum number of samples required to split an internal node. min_samples_leaf : integer, optional (default=1) The minimum number of samples required to be at a leaf node. min_weight_fraction_leaf : float, optional (default=0.) The minimum weighted fraction of the input samples required to be at a leaf node. subsample : float, optional (default=1.0) The fraction of samples to be used for fitting the individual base learners. If smaller than 1.0 this results in Stochastic Gradient Boosting. `subsample` interacts with the parameter `n_estimators`. Choosing `subsample < 1.0` leads to a reduction of variance and an increase in bias. max_features : int, float, string or None, optional (default=None) The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a percentage and `int(max_features * n_features)` features are considered at each split. - If "auto", then `max_features=n_features`. - If "sqrt", then `max_features=sqrt(n_features)`. - If "log2", then `max_features=log2(n_features)`. - If None, then `max_features=n_features`. Choosing `max_features < n_features` leads to a reduction of variance and an increase in bias. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. max_leaf_nodes : int or None, optional (default=None) Grow trees with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. alpha : float (default=0.9) The alpha-quantile of the huber loss function and the quantile loss function. Only if ``loss='huber'`` or ``loss='quantile'``. init : BaseEstimator, None, optional (default=None) An estimator object that is used to compute the initial predictions. ``init`` has to provide ``fit`` and ``predict``. If None it uses ``loss.init_estimator``. verbose : int, default: 0 Enable verbose output. If 1 then it prints progress and performance once in a while (the more trees the lower the frequency). If greater than 1 then it prints progress and performance for every tree. warm_start : bool, default: False When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just erase the previous solution. Attributes ---------- feature_importances_ : array, shape = [n_features] The feature importances (the higher, the more important the feature). oob_improvement_ : array, shape = [n_estimators] The improvement in loss (= deviance) on the out-of-bag samples relative to the previous iteration. ``oob_improvement_[0]`` is the improvement in loss of the first stage over the ``init`` estimator. train_score_ : array, shape = [n_estimators] The i-th score ``train_score_[i]`` is the deviance (= loss) of the model at iteration ``i`` on the in-bag sample. If ``subsample == 1`` this is the deviance on the training data. loss_ : LossFunction The concrete ``LossFunction`` object. `init` : BaseEstimator The estimator that provides the initial predictions. Set via the ``init`` argument or ``loss.init_estimator``. estimators_ : list of DecisionTreeRegressor The collection of fitted sub-estimators. See also -------- DecisionTreeRegressor, RandomForestRegressor References ---------- J. Friedman, Greedy Function Approximation: A Gradient Boosting Machine, The Annals of Statistics, Vol. 29, No. 5, 2001. J. Friedman, Stochastic Gradient Boosting, 1999 T. Hastie, R. Tibshirani and J. Friedman. Elements of Statistical Learning Ed. 2, Springer, 2009. """ _SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile') def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100, subsample=1.0, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_depth=3, init=None, random_state=None, max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None, warm_start=False): super(GradientBoostingRegressor, self).__init__( loss, learning_rate, n_estimators, min_samples_split, min_samples_leaf, min_weight_fraction_leaf, max_depth, init, subsample, max_features, random_state, alpha, verbose, max_leaf_nodes=max_leaf_nodes, warm_start=warm_start) def fit(self, X, y, sample_weight=None, monitor=None): """Fit the gradient boosting model. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (integers in classification, real numbers in regression) For classification, labels must correspond to classes ``0, 1, ..., n_classes_-1``. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. In the case of classification, splits are also ignored if they would result in any single class carrying a negative weight in either child node. monitor : callable, optional The monitor is called after each iteration with the current iteration, a reference to the estimator and the local variables of ``_fit_stages`` as keyword arguments ``callable(i, self, locals())``. If the callable returns ``True`` the fitting procedure is stopped. The monitor can be used for various things such as computing held-out estimates, early stopping, model introspect, and snapshoting. Returns ------- self : object Returns self. """ self.n_classes_ = 1 return super(GradientBoostingRegressor, self).fit(X, y, sample_weight, monitor) def predict(self, X): """Predict regression target for X. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- y: array of shape = [n_samples] The predicted values. """ return self.decision_function(X).ravel() def staged_predict(self, X): """Predict regression target at each stage for X. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- y : array of shape = [n_samples] The predicted value of the input samples. """ for y in self.staged_decision_function(X): yield y.ravel()
bsd-3-clause
-1,396,673,558,145,351,000
37.988943
91
0.581829
false
lfz/Guided-Denoise
Attackset/Iter8_v3_resv2_inresv2_random/attack_iter.py
4
10757
"""Implementation of sample attack.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np from scipy.misc import imread from scipy.misc import imsave from nets import inception_v3, inception_v4, inception_resnet_v2, resnet_v2 from functools import partial from multiprocessing import Pool import tensorflow as tf slim = tf.contrib.slim tf.flags.DEFINE_string( 'master', '', 'The address of the TensorFlow master to use.') tf.flags.DEFINE_string( 'checkpoint_path_inception_v3', '', 'Path to checkpoint for inception network.') tf.flags.DEFINE_string( 'checkpoint_path_adv_inception_v3', '', 'Path to checkpoint for inception network.') tf.flags.DEFINE_string( 'checkpoint_path_ens3_adv_inception_v3', '', 'Path to checkpoint for inception network.') tf.flags.DEFINE_string( 'checkpoint_path_ens4_adv_inception_v3', '', 'Path to checkpoint for inception network.') tf.flags.DEFINE_string( 'checkpoint_path_inception_v4', '', 'Path to checkpoint for inception network.') tf.flags.DEFINE_string( 'checkpoint_path_inception_resnet_v2', '', 'Path to checkpoint for inception network.') tf.flags.DEFINE_string( 'checkpoint_path_ens_adv_inception_resnet_v2', '', 'Path to checkpoint for inception network.') tf.flags.DEFINE_string( 'checkpoint_path_resnet', '', 'Path to checkpoint for inception network.') tf.flags.DEFINE_string( 'input_dir', '', 'Input directory with images.') tf.flags.DEFINE_string( 'output_dir', '', 'Output directory with images.') tf.flags.DEFINE_float( 'max_epsilon', 16.0, 'Maximum size of adversarial perturbation.') tf.flags.DEFINE_integer( 'num_iter', 10, 'Number of iterations.') tf.flags.DEFINE_integer( 'image_width', 299, 'Width of each input images.') tf.flags.DEFINE_integer( 'image_height', 299, 'Height of each input images.') tf.flags.DEFINE_integer( 'batch_size', 10, 'How many images process at one time.') tf.flags.DEFINE_integer( 'use_existing', 0, 'whether reuse existing result') tf.flags.DEFINE_integer( 'random_eps', 0, 'whether use random pertubation') tf.flags.DEFINE_float( 'momentum', 1.0, 'Momentum.') tf.flags.DEFINE_string( 'gpu','0','') FLAGS = tf.flags.FLAGS def load_images(input_dir, batch_shape): """Read png images from input directory in batches. Args: input_dir: input directory batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3] Yields: filenames: list file names without path of each image Lenght of this list could be less than batch_size, in this case only first few images of the result are elements of the minibatch. images: array with all images from this batch """ images = np.zeros(batch_shape) filenames = [] idx = 0 batch_size = batch_shape[0] for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')): with tf.gfile.Open(filepath) as f: image = imread(f, mode='RGB').astype(np.float) / 255.0 # Images for inception classifier are normalized to be in [-1, 1] interval. images[idx, :, :, :] = image * 2.0 - 1.0 filenames.append(os.path.basename(filepath)) idx += 1 if idx == batch_size: yield filenames, images filenames = [] images = np.zeros(batch_shape) idx = 0 if idx > 0: yield filenames, images def save_images(arg): image,filename,output_dir = arg imsave(os.path.join(output_dir, filename), (image + 1.0) * 0.5, format='png') def graph(x, y, i, x_max, x_min, grad, eps_inside): num_iter = FLAGS.num_iter alpha = eps_inside / num_iter momentum = FLAGS.momentum num_classes = 1001 with slim.arg_scope(inception_v3.inception_v3_arg_scope()): logits_v3, end_points_v3 = inception_v3.inception_v3( x, num_classes=num_classes, is_training=False) with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()): logits_res_v2, end_points_res_v2 = inception_resnet_v2.inception_resnet_v2( x, num_classes=num_classes, is_training=False) with slim.arg_scope(resnet_v2.resnet_arg_scope()): logits_resnet, end_points_resnet = resnet_v2.resnet_v2_50( x, num_classes=num_classes, is_training=False) pred = tf.argmax(end_points_v3['Predictions'] + end_points_res_v2['Predictions'] + end_points_resnet['predictions'], 1) first_round = tf.cast(tf.equal(i, 0), tf.int64) y = first_round * pred + (1 - first_round) * y one_hot = tf.one_hot(y, num_classes) logits = (logits_v3 + logits_res_v2 + logits_resnet) / 3 auxlogits = (end_points_v3['AuxLogits'] + end_points_res_v2['AuxLogits'] ) / 2 cross_entropy = tf.losses.softmax_cross_entropy(one_hot, logits, label_smoothing=0.0, weights=1.0) cross_entropy += tf.losses.softmax_cross_entropy(one_hot, auxlogits, label_smoothing=0.0, weights=0.4) noise = tf.gradients(cross_entropy, x)[0] x = x + alpha * tf.sign(noise) x = tf.clip_by_value(x, x_min, x_max) i = tf.add(i, 1) return x, y, i, x_max, x_min, noise, eps_inside def stop(x, y, i, x_max, x_min, grad, eps_inside): num_iter = FLAGS.num_iter return tf.less(i, num_iter) def main(_): # Images for inception classifier are normalized to be in [-1, 1] interval, # eps is a difference between pixels so it should be in [0, 2] interval. # Renormalizing epsilon from [0, 255] to [0, 2]. print(FLAGS.output_dir) #eps = 2.0 * FLAGS.max_epsilon / 255.0 gpus = np.array(FLAGS.gpu.split(',')).astype('int') n_gpus = len(gpus) bs_single = FLAGS.batch_size FLAGS.batch_size *= n_gpus batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3] batch_shape_single = [bs_single, FLAGS.image_height, FLAGS.image_width, 3] tf.logging.set_verbosity(tf.logging.INFO) pool = Pool() with tf.Graph().as_default(), tf.device('/cpu:0'): flists = set([f for f in os.listdir(FLAGS.input_dir) if 'png' in f]) if FLAGS.use_existing == 1: flists_existing = set([f for f in os.listdir(FLAGS.output_dir) if 'png' in f ]) newfiles = list(flists.difference(flists_existing)) newfiles = [os.path.join(FLAGS.input_dir,f) for f in newfiles] else: newfiles = [os.path.join(FLAGS.input_dir,f) for f in flists] print('creating %s new files'%(len(newfiles))) if len(newfiles) == 0: return filename_queue = tf.train.string_input_producer(newfiles, shuffle = False, num_epochs = FLAGS.batch_size) image_reader = tf.WholeFileReader() filename, image_file = image_reader.read(filename_queue) image = tf.image.decode_png(image_file) image.set_shape((299, 299, 3)) eps = tf.placeholder(dtype='float32', shape = [FLAGS.batch_size, None, None, None]) # Generate batch num_preprocess_threads = 20 min_queue_examples = 256 images,filenames = tf.train.batch( [image,filename], batch_size=FLAGS.batch_size, num_threads=num_preprocess_threads, capacity= 3 * FLAGS.batch_size, allow_smaller_final_batch = False) images = tf.cast(images,tf.float32)/255.0*2.-1. images_splits = tf.split(axis=0, num_or_size_splits=n_gpus, value=images) eps_splits = tf.split(axis=0, num_or_size_splits=n_gpus, value=eps) # Prepare graph #x_input = tf.placeholder(tf.float32, shape=batch_shape) x_advlist = [] for i_gpu in range(n_gpus): start = i_gpu*bs_single print('gpu'+str(i_gpu)) with tf.device('/gpu:'+str(i_gpu)): with tf.variable_scope(tf.get_variable_scope(), reuse=True if i_gpu > 0 else None): # with tf.name_scope('%s_%d' % ('tower', i_gpu)): x_in_single = images_splits[i_gpu] eps_single = eps_splits[i_gpu] x_max = tf.clip_by_value(x_in_single + eps_single, -1.0, 1.0) x_min = tf.clip_by_value(x_in_single - eps_single, -1.0, 1.0) bs_this = x_in_single.shape[0] y = tf.constant(np.zeros([bs_single]), tf.int64) i = tf.constant(0) grad = tf.zeros_like(x_in_single) x_adv, _, _, _, _, _, _ = tf.while_loop(stop, graph, [x_in_single, y, i, x_max, x_min, grad, eps_single]) x_advlist.append(x_adv) x_adv = tf.concat(x_advlist,0) # Run computation s1 = tf.train.Saver(slim.get_model_variables(scope='InceptionV3')) s6 = tf.train.Saver(slim.get_model_variables(scope='InceptionResnetV2')) s8 = tf.train.Saver(slim.get_model_variables(scope='resnet_v2')) init = (tf.global_variables_initializer(), tf.local_variables_initializer()) with tf.Session() as sess: sess.run(init) s1.restore(sess, FLAGS.checkpoint_path_inception_v3) s6.restore(sess, FLAGS.checkpoint_path_inception_resnet_v2) s8.restore(sess, FLAGS.checkpoint_path_resnet) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) n_iter = -(-(len(newfiles))//FLAGS.batch_size) stack_img = [] stack_names = [] for i in range(n_iter): if FLAGS.random_eps: eps_value = np.random.randint(1,FLAGS.max_epsilon, [FLAGS.batch_size,1,1,1]) else: eps_value = np.ones([FLAGS.batch_size,1,1,1]) * FLAGS.max_epsilon eps_value = eps_value.astype('float32') *2 /255 names,adv_images,orig_images = sess.run([filenames,x_adv,images], feed_dict={eps:eps_value}) names = [os.path.basename(name) for name in names] stack_img.append(adv_images) stack_names.append(names) # save_images2(adv_images, names, FLAGS.output_dir, pool) # save_images(adv_images, names, FLAGS.output_dir) if ((i+1)%100 ==0) or i == n_iter-1: print("%d / %d"%(i+1,n_iter)) stack_img = np.concatenate(stack_img) stack_names = np.concatenate(stack_names) #partial_save = partial(save_one,images=stack_img,filenames=stack_names,output_dir=FLAGS.output_dir) paras = ((im,name,FLAGS.output_dir) for (im,name) in zip(stack_img,stack_names)) pool.map_async(save_images,paras) stack_img = [] stack_names = [] # save_images(adv_images, filenames, FLAGS.output_dir) # Finish off the filename queue coordinator. coord.request_stop() coord.join(threads) pool.close() pool.join() if __name__ == '__main__': tf.app.run()
apache-2.0
3,506,776,663,877,861,400
37.417857
121
0.629915
false
nanophotonics/nplab
nplab/utils/array_with_attrs.py
1
3536
# -*- coding: utf-8 -*- """ Created on Tue May 26 08:08:14 2015 @author: rwb27 """ import numpy as np class AttributeDict(dict): """This class extends a dictionary to have a "create" method for compatibility with h5py attrs objects.""" def create(self, name, data): self[name] = data def modify(self, name, data): self[name] = data def copy_arrays(self): """Replace any numpy.ndarray in the dict with a copy, to break any unintentional links.""" for k in list(self.keys()): if isinstance(self[k], np.ndarray): self[k] = np.copy(self[k]) def ensure_attribute_dict(obj, copy=False): """Given a mapping that may or not be an AttributeDict, return an AttributeDict object that either is, or copies the data of, the input.""" if isinstance(obj, AttributeDict) and not copy: return obj else: out = AttributeDict(obj) if copy: out.copy_arrays() return out def ensure_attrs(obj): """Return an ArrayWithAttrs version of an array-like object, may be the original object if it already has attrs.""" if hasattr(obj, 'attrs'): return obj #if it has attrs, do nothing else: return ArrayWithAttrs(obj) #otherwise, wrap it class ArrayWithAttrs(np.ndarray): """A numpy ndarray, with an AttributeDict accessible as array.attrs. This class is intended as a temporary version of an h5py dataset to allow the easy passing of metadata/attributes around nplab functions. It owes a lot to the ``InfoArray`` example in `numpy` documentation on subclassing `numpy.ndarray`. """ def __new__(cls, input_array, attrs={}): """Make a new ndarray, based on an existing one, with an attrs dict. This function adds an attributes dictionary to a numpy array, to make it work like an h5py dataset. It doesn't copy data if it can be avoided.""" # the input array should be a numpy array, then we cast it to this type obj = np.asarray(input_array).view(cls) # next, add the dict # ensure_attribute_dict always returns an AttributeDict obj.attrs = ensure_attribute_dict(attrs) # return the new object return obj def __array_finalize__(self, obj): # this is called by numpy when the object is created (__new__ may or # may not get called) if obj is None: return # if obj is None, __new__ was called - do nothing # if we didn't create the object with __new__, we must add the attrs # dictionary. We copy this from the source object if possible (while # ensuring it's the right type) or create a new, empty one if not. # NB we don't use ensure_attribute_dict because we want to make sure the # dict object is *copied* not merely referenced. self.attrs = ensure_attribute_dict(getattr(obj, 'attrs', {}), copy=True) def attribute_bundler(attrs): """Return a function that bundles the supplied attributes with an array.""" def bundle_attrs(array): return ArrayWithAttrs(array, attrs=attrs) class DummyHDF5Group(dict): def __init__(self,dictionary, attrs ={}, name="DummyHDF5Group"): super(DummyHDF5Group, self).__init__() self.attrs = attrs for key in dictionary: self[key] = dictionary[key] self.name = name self.basename = name file = None parent = None
gpl-3.0
-164,220,774,799,381,980
36.231579
98
0.630656
false
ProfessionalIT/maxigenios-website
sdk/google_appengine/lib/django-1.4/django/db/backends/postgresql_psycopg2/base.py
81
9415
""" PostgreSQL database backend for Django. Requires psycopg 2: http://initd.org/projects/psycopg2 """ import sys from django.db import utils from django.db.backends import * from django.db.backends.signals import connection_created from django.db.backends.postgresql_psycopg2.operations import DatabaseOperations from django.db.backends.postgresql_psycopg2.client import DatabaseClient from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation from django.db.backends.postgresql_psycopg2.version import get_version from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection from django.utils.log import getLogger from django.utils.safestring import SafeUnicode, SafeString from django.utils.timezone import utc try: import psycopg2 as Database import psycopg2.extensions except ImportError, e: from django.core.exceptions import ImproperlyConfigured raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e) DatabaseError = Database.DatabaseError IntegrityError = Database.IntegrityError psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) psycopg2.extensions.register_adapter(SafeString, psycopg2.extensions.QuotedString) psycopg2.extensions.register_adapter(SafeUnicode, psycopg2.extensions.QuotedString) logger = getLogger('django.db.backends') def utc_tzinfo_factory(offset): if offset != 0: raise AssertionError("database connection isn't set to UTC") return utc class CursorWrapper(object): """ A thin wrapper around psycopg2's normal cursor class so that we can catch particular exception instances and reraise them with the right types. """ def __init__(self, cursor): self.cursor = cursor def execute(self, query, args=None): try: return self.cursor.execute(query, args) except Database.IntegrityError, e: raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2] except Database.DatabaseError, e: raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2] def executemany(self, query, args): try: return self.cursor.executemany(query, args) except Database.IntegrityError, e: raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2] except Database.DatabaseError, e: raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2] def __getattr__(self, attr): if attr in self.__dict__: return self.__dict__[attr] else: return getattr(self.cursor, attr) def __iter__(self): return iter(self.cursor) class DatabaseFeatures(BaseDatabaseFeatures): needs_datetime_string_cast = False can_return_id_from_insert = True requires_rollback_on_dirty_transaction = True has_real_datatype = True can_defer_constraint_checks = True has_select_for_update = True has_select_for_update_nowait = True has_bulk_insert = True supports_tablespaces = True can_distinct_on_fields = True class DatabaseWrapper(BaseDatabaseWrapper): vendor = 'postgresql' operators = { 'exact': '= %s', 'iexact': '= UPPER(%s)', 'contains': 'LIKE %s', 'icontains': 'LIKE UPPER(%s)', 'regex': '~ %s', 'iregex': '~* %s', 'gt': '> %s', 'gte': '>= %s', 'lt': '< %s', 'lte': '<= %s', 'startswith': 'LIKE %s', 'endswith': 'LIKE %s', 'istartswith': 'LIKE UPPER(%s)', 'iendswith': 'LIKE UPPER(%s)', } def __init__(self, *args, **kwargs): super(DatabaseWrapper, self).__init__(*args, **kwargs) self.features = DatabaseFeatures(self) autocommit = self.settings_dict["OPTIONS"].get('autocommit', False) self.features.uses_autocommit = autocommit if autocommit: level = psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT else: level = psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED self._set_isolation_level(level) self.ops = DatabaseOperations(self) self.client = DatabaseClient(self) self.creation = DatabaseCreation(self) self.introspection = DatabaseIntrospection(self) self.validation = BaseDatabaseValidation(self) self._pg_version = None def check_constraints(self, table_names=None): """ To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they are returned to deferred. """ self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE') self.cursor().execute('SET CONSTRAINTS ALL DEFERRED') def close(self): self.validate_thread_sharing() if self.connection is None: return try: self.connection.close() self.connection = None except Database.Error: # In some cases (database restart, network connection lost etc...) # the connection to the database is lost without giving Django a # notification. If we don't set self.connection to None, the error # will occur a every request. self.connection = None logger.warning('psycopg2 error while closing the connection.', exc_info=sys.exc_info() ) raise def _get_pg_version(self): if self._pg_version is None: self._pg_version = get_version(self.connection) return self._pg_version pg_version = property(_get_pg_version) def _cursor(self): settings_dict = self.settings_dict if self.connection is None: if settings_dict['NAME'] == '': from django.core.exceptions import ImproperlyConfigured raise ImproperlyConfigured("You need to specify NAME in your Django settings file.") conn_params = { 'database': settings_dict['NAME'], } conn_params.update(settings_dict['OPTIONS']) if 'autocommit' in conn_params: del conn_params['autocommit'] if settings_dict['USER']: conn_params['user'] = settings_dict['USER'] if settings_dict['PASSWORD']: conn_params['password'] = settings_dict['PASSWORD'] if settings_dict['HOST']: conn_params['host'] = settings_dict['HOST'] if settings_dict['PORT']: conn_params['port'] = settings_dict['PORT'] self.connection = Database.connect(**conn_params) self.connection.set_client_encoding('UTF8') tz = 'UTC' if settings.USE_TZ else settings_dict.get('TIME_ZONE') if tz: try: get_parameter_status = self.connection.get_parameter_status except AttributeError: # psycopg2 < 2.0.12 doesn't have get_parameter_status conn_tz = None else: conn_tz = get_parameter_status('TimeZone') if conn_tz != tz: # Set the time zone in autocommit mode (see #17062) self.connection.set_isolation_level( psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) self.connection.cursor().execute( self.ops.set_time_zone_sql(), [tz]) self.connection.set_isolation_level(self.isolation_level) self._get_pg_version() connection_created.send(sender=self.__class__, connection=self) cursor = self.connection.cursor() cursor.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None return CursorWrapper(cursor) def _enter_transaction_management(self, managed): """ Switch the isolation level when needing transaction support, so that the same transaction is visible across all the queries. """ if self.features.uses_autocommit and managed and not self.isolation_level: self._set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED) def _leave_transaction_management(self, managed): """ If the normal operating mode is "autocommit", switch back to that when leaving transaction management. """ if self.features.uses_autocommit and not managed and self.isolation_level: self._set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) def _set_isolation_level(self, level): """ Do all the related feature configurations for changing isolation levels. This doesn't touch the uses_autocommit feature, since that controls the movement *between* isolation levels. """ assert level in range(5) try: if self.connection is not None: self.connection.set_isolation_level(level) finally: self.isolation_level = level self.features.uses_savepoints = bool(level) def _commit(self): if self.connection is not None: try: return self.connection.commit() except Database.IntegrityError, e: raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
mit
-233,190,633,907,065,180
38.725738
105
0.629421
false
snowdream1314/scrapy
tests/test_spider.py
53
13990
import gzip import inspect import warnings from io import BytesIO from testfixtures import LogCapture from twisted.trial import unittest from scrapy import signals from scrapy.settings import Settings from scrapy.http import Request, Response, TextResponse, XmlResponse, HtmlResponse from scrapy.spiders.init import InitSpider from scrapy.spiders import Spider, BaseSpider, CrawlSpider, Rule, XMLFeedSpider, \ CSVFeedSpider, SitemapSpider from scrapy.linkextractors import LinkExtractor from scrapy.exceptions import ScrapyDeprecationWarning from scrapy.utils.trackref import object_ref from scrapy.utils.test import get_crawler from tests import mock class SpiderTest(unittest.TestCase): spider_class = Spider def setUp(self): warnings.simplefilter("always") def tearDown(self): warnings.resetwarnings() def test_base_spider(self): spider = self.spider_class("example.com") self.assertEqual(spider.name, 'example.com') self.assertEqual(spider.start_urls, []) def test_start_requests(self): spider = self.spider_class('example.com') start_requests = spider.start_requests() self.assertTrue(inspect.isgenerator(start_requests)) self.assertEqual(list(start_requests), []) def test_spider_args(self): """Constructor arguments are assigned to spider attributes""" spider = self.spider_class('example.com', foo='bar') self.assertEqual(spider.foo, 'bar') def test_spider_without_name(self): """Constructor arguments are assigned to spider attributes""" self.assertRaises(ValueError, self.spider_class) self.assertRaises(ValueError, self.spider_class, somearg='foo') def test_deprecated_set_crawler_method(self): spider = self.spider_class('example.com') crawler = get_crawler() with warnings.catch_warnings(record=True) as w: spider.set_crawler(crawler) self.assertIn("set_crawler", str(w[0].message)) self.assertTrue(hasattr(spider, 'crawler')) self.assertIs(spider.crawler, crawler) self.assertTrue(hasattr(spider, 'settings')) self.assertIs(spider.settings, crawler.settings) def test_from_crawler_crawler_and_settings_population(self): crawler = get_crawler() spider = self.spider_class.from_crawler(crawler, 'example.com') self.assertTrue(hasattr(spider, 'crawler')) self.assertIs(spider.crawler, crawler) self.assertTrue(hasattr(spider, 'settings')) self.assertIs(spider.settings, crawler.settings) def test_from_crawler_init_call(self): with mock.patch.object(self.spider_class, '__init__', return_value=None) as mock_init: self.spider_class.from_crawler(get_crawler(), 'example.com', foo='bar') mock_init.assert_called_once_with('example.com', foo='bar') def test_closed_signal_call(self): class TestSpider(self.spider_class): closed_called = False def closed(self, reason): self.closed_called = True crawler = get_crawler() spider = TestSpider.from_crawler(crawler, 'example.com') crawler.signals.send_catch_log(signal=signals.spider_opened, spider=spider) crawler.signals.send_catch_log(signal=signals.spider_closed, spider=spider, reason=None) self.assertTrue(spider.closed_called) def test_update_settings(self): spider_settings = {'TEST1': 'spider', 'TEST2': 'spider'} project_settings = {'TEST1': 'project', 'TEST3': 'project'} self.spider_class.custom_settings = spider_settings settings = Settings(project_settings, priority='project') self.spider_class.update_settings(settings) self.assertEqual(settings.get('TEST1'), 'spider') self.assertEqual(settings.get('TEST2'), 'spider') self.assertEqual(settings.get('TEST3'), 'project') def test_logger(self): spider = self.spider_class('example.com') with LogCapture() as l: spider.logger.info('test log msg') l.check(('example.com', 'INFO', 'test log msg')) record = l.records[0] self.assertIn('spider', record.__dict__) self.assertIs(record.spider, spider) def test_log(self): spider = self.spider_class('example.com') with mock.patch('scrapy.spiders.Spider.logger') as mock_logger: spider.log('test log msg', 'INFO') mock_logger.log.assert_called_once_with('INFO', 'test log msg') class InitSpiderTest(SpiderTest): spider_class = InitSpider class XMLFeedSpiderTest(SpiderTest): spider_class = XMLFeedSpider def test_register_namespace(self): body = b"""<?xml version="1.0" encoding="UTF-8"?> <urlset xmlns:x="http://www.google.com/schemas/sitemap/0.84" xmlns:y="http://www.example.com/schemas/extras/1.0"> <url><x:loc>http://www.example.com/Special-Offers.html</loc><y:updated>2009-08-16</updated><other value="bar" y:custom="fuu"/></url> <url><loc>http://www.example.com/</loc><y:updated>2009-08-16</updated><other value="foo"/></url> </urlset>""" response = XmlResponse(url='http://example.com/sitemap.xml', body=body) class _XMLSpider(self.spider_class): itertag = 'url' namespaces = ( ('a', 'http://www.google.com/schemas/sitemap/0.84'), ('b', 'http://www.example.com/schemas/extras/1.0'), ) def parse_node(self, response, selector): yield { 'loc': selector.xpath('a:loc/text()').extract(), 'updated': selector.xpath('b:updated/text()').extract(), 'other': selector.xpath('other/@value').extract(), 'custom': selector.xpath('other/@b:custom').extract(), } for iterator in ('iternodes', 'xml'): spider = _XMLSpider('example', iterator=iterator) output = list(spider.parse(response)) self.assertEqual(len(output), 2, iterator) self.assertEqual(output, [ {'loc': [u'http://www.example.com/Special-Offers.html'], 'updated': [u'2009-08-16'], 'custom': [u'fuu'], 'other': [u'bar']}, {'loc': [], 'updated': [u'2009-08-16'], 'other': [u'foo'], 'custom': []}, ], iterator) class CSVFeedSpiderTest(SpiderTest): spider_class = CSVFeedSpider class CrawlSpiderTest(SpiderTest): test_body = b"""<html><head><title>Page title<title> <body> <p><a href="item/12.html">Item 12</a></p> <div class='links'> <p><a href="/about.html">About us</a></p> </div> <div> <p><a href="/nofollow.html">This shouldn't be followed</a></p> </div> </body></html>""" spider_class = CrawlSpider def test_process_links(self): response = HtmlResponse("http://example.org/somepage/index.html", body=self.test_body) class _CrawlSpider(self.spider_class): name="test" allowed_domains=['example.org'] rules = ( Rule(LinkExtractor(), process_links="dummy_process_links"), ) def dummy_process_links(self, links): return links spider = _CrawlSpider() output = list(spider._requests_to_follow(response)) self.assertEqual(len(output), 3) self.assertTrue(all(map(lambda r: isinstance(r, Request), output))) self.assertEquals([r.url for r in output], ['http://example.org/somepage/item/12.html', 'http://example.org/about.html', 'http://example.org/nofollow.html']) def test_process_links_filter(self): response = HtmlResponse("http://example.org/somepage/index.html", body=self.test_body) class _CrawlSpider(self.spider_class): import re name="test" allowed_domains=['example.org'] rules = ( Rule(LinkExtractor(), process_links="filter_process_links"), ) _test_regex = re.compile('nofollow') def filter_process_links(self, links): return [link for link in links if not self._test_regex.search(link.url)] spider = _CrawlSpider() output = list(spider._requests_to_follow(response)) self.assertEqual(len(output), 2) self.assertTrue(all(map(lambda r: isinstance(r, Request), output))) self.assertEquals([r.url for r in output], ['http://example.org/somepage/item/12.html', 'http://example.org/about.html']) def test_process_links_generator(self): response = HtmlResponse("http://example.org/somepage/index.html", body=self.test_body) class _CrawlSpider(self.spider_class): name="test" allowed_domains=['example.org'] rules = ( Rule(LinkExtractor(), process_links="dummy_process_links"), ) def dummy_process_links(self, links): for link in links: yield link spider = _CrawlSpider() output = list(spider._requests_to_follow(response)) self.assertEqual(len(output), 3) self.assertTrue(all(map(lambda r: isinstance(r, Request), output))) self.assertEquals([r.url for r in output], ['http://example.org/somepage/item/12.html', 'http://example.org/about.html', 'http://example.org/nofollow.html']) def test_follow_links_attribute_population(self): crawler = get_crawler() spider = self.spider_class.from_crawler(crawler, 'example.com') self.assertTrue(hasattr(spider, '_follow_links')) self.assertTrue(spider._follow_links) settings_dict = {'CRAWLSPIDER_FOLLOW_LINKS': False} crawler = get_crawler(settings_dict=settings_dict) spider = self.spider_class.from_crawler(crawler, 'example.com') self.assertTrue(hasattr(spider, '_follow_links')) self.assertFalse(spider._follow_links) def test_follow_links_attribute_deprecated_population(self): spider = self.spider_class('example.com') self.assertFalse(hasattr(spider, '_follow_links')) spider.set_crawler(get_crawler()) self.assertTrue(hasattr(spider, '_follow_links')) self.assertTrue(spider._follow_links) spider = self.spider_class('example.com') settings_dict = {'CRAWLSPIDER_FOLLOW_LINKS': False} spider.set_crawler(get_crawler(settings_dict=settings_dict)) self.assertTrue(hasattr(spider, '_follow_links')) self.assertFalse(spider._follow_links) class SitemapSpiderTest(SpiderTest): spider_class = SitemapSpider BODY = b"SITEMAP" f = BytesIO() g = gzip.GzipFile(fileobj=f, mode='w+b') g.write(BODY) g.close() GZBODY = f.getvalue() def test_get_sitemap_body(self): spider = self.spider_class("example.com") r = XmlResponse(url="http://www.example.com/", body=self.BODY) self.assertEqual(spider._get_sitemap_body(r), self.BODY) r = HtmlResponse(url="http://www.example.com/", body=self.BODY) self.assertEqual(spider._get_sitemap_body(r), None) r = Response(url="http://www.example.com/favicon.ico", body=self.BODY) self.assertEqual(spider._get_sitemap_body(r), None) r = Response(url="http://www.example.com/sitemap", body=self.GZBODY, headers={"content-type": "application/gzip"}) self.assertEqual(spider._get_sitemap_body(r), self.BODY) r = TextResponse(url="http://www.example.com/sitemap.xml", body=self.BODY) self.assertEqual(spider._get_sitemap_body(r), self.BODY) r = Response(url="http://www.example.com/sitemap.xml.gz", body=self.GZBODY) self.assertEqual(spider._get_sitemap_body(r), self.BODY) class BaseSpiderDeprecationTest(unittest.TestCase): def test_basespider_is_deprecated(self): with warnings.catch_warnings(record=True) as w: class MySpider1(BaseSpider): pass self.assertEqual(len(w), 1) self.assertEqual(w[0].category, ScrapyDeprecationWarning) self.assertEqual(w[0].lineno, inspect.getsourcelines(MySpider1)[1]) def test_basespider_issubclass(self): class MySpider2(Spider): pass class MySpider2a(MySpider2): pass class Foo(object): pass class Foo2(object_ref): pass assert issubclass(MySpider2, BaseSpider) assert issubclass(MySpider2a, BaseSpider) assert not issubclass(Foo, BaseSpider) assert not issubclass(Foo2, BaseSpider) def test_basespider_isinstance(self): class MySpider3(Spider): name = 'myspider3' class MySpider3a(MySpider3): pass class Foo(object): pass class Foo2(object_ref): pass assert isinstance(MySpider3(), BaseSpider) assert isinstance(MySpider3a(), BaseSpider) assert not isinstance(Foo(), BaseSpider) assert not isinstance(Foo2(), BaseSpider) def test_crawl_spider(self): assert issubclass(CrawlSpider, Spider) assert issubclass(CrawlSpider, BaseSpider) assert isinstance(CrawlSpider(name='foo'), Spider) assert isinstance(CrawlSpider(name='foo'), BaseSpider) if __name__ == '__main__': unittest.main()
bsd-3-clause
1,410,957,226,837,916,700
35.623037
140
0.605075
false
archf/ansible
lib/ansible/modules/network/f5/bigip_monitor_tcp_echo.py
16
16338
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2017 F5 Networks Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: bigip_monitor_tcp_echo short_description: Manages F5 BIG-IP LTM tcp monitors. description: Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API. version_added: "2.4" options: name: description: - Monitor name. required: True aliases: - monitor parent: description: - The parent template of this monitor template. Once this value has been set, it cannot be changed. By default, this value is the C(tcp) parent on the C(Common) partition. default: "/Common/tcp" ip: description: - IP address part of the IP/port definition. If this parameter is not provided when creating a new monitor, then the default value will be '*'. - If this value is an IP address, and the C(type) is C(tcp) (the default), then a C(port) number must be specified. interval: description: - The interval specifying how frequently the monitor instance of this template will run. If this parameter is not provided when creating a new monitor, then the default value will be 5. This value B(must) be less than the C(timeout) value. timeout: description: - The number of seconds in which the node or service must respond to the monitor request. If the target responds within the set time period, it is considered up. If the target does not respond within the set time period, it is considered down. You can change this number to any number you want, however, it should be 3 times the interval number of seconds plus 1 second. If this parameter is not provided when creating a new monitor, then the default value will be 16. time_until_up: description: - Specifies the amount of time in seconds after the first successful response before a node will be marked up. A value of 0 will cause a node to be marked up immediately after a valid response is received from the node. If this parameter is not provided when creating a new monitor, then the default value will be 0. notes: - Requires the f5-sdk Python package on the host. This is as easy as pip install f5-sdk. - Requires BIG-IP software version >= 12 requirements: - f5-sdk >= 2.2.3 extends_documentation_fragment: f5 author: - Tim Rupp (@caphrim007) ''' EXAMPLES = ''' - name: Create TCP Echo Monitor bigip_monitor_tcp_echo: state: "present" server: "lb.mydomain.com" user: "admin" ip: 10.10.10.10 password: "secret" name: "my_tcp_monitor" delegate_to: localhost - name: Remove TCP Echo Monitor bigip_monitor_tcp_echo: state: "absent" server: "lb.mydomain.com" user: "admin" password: "secret" name: "my_tcp_monitor" delegate_to: localhost ''' RETURN = ''' parent: description: New parent template of the monitor. returned: changed type: string sample: "tcp" ip: description: The new IP of IP/port definition. returned: changed type: string sample: "10.12.13.14" interval: description: The new interval in which to run the monitor check. returned: changed type: int sample: 2 timeout: description: The new timeout in which the remote system must respond to the monitor. returned: changed type: int sample: 10 time_until_up: description: The new time in which to mark a system as up after first successful response. returned: changed type: int sample: 2 ''' import os try: import netaddr HAS_NETADDR = True except ImportError: HAS_NETADDR = False from ansible.module_utils.f5_utils import AnsibleF5Client from ansible.module_utils.f5_utils import AnsibleF5Parameters from ansible.module_utils.f5_utils import HAS_F5SDK from ansible.module_utils.f5_utils import F5ModuleError from ansible.module_utils.f5_utils import iteritems from ansible.module_utils.f5_utils import defaultdict try: from ansible.module_utils.f5_utils import iControlUnexpectedHTTPError except ImportError: HAS_F5SDK = False class Parameters(AnsibleF5Parameters): api_map = { 'timeUntilUp': 'time_until_up', 'defaultsFrom': 'parent' } api_attributes = [ 'timeUntilUp', 'defaultsFrom', 'interval', 'timeout', 'destination' ] returnables = [ 'parent', 'ip', 'interval', 'timeout', 'time_until_up' ] updatables = [ 'ip', 'interval', 'timeout', 'time_until_up' ] def __init__(self, params=None): self._values = defaultdict(lambda: None) self._values['__warnings'] = [] if params: self.update(params=params) def update(self, params=None): if params: for k, v in iteritems(params): if self.api_map is not None and k in self.api_map: map_key = self.api_map[k] else: map_key = k # Handle weird API parameters like `dns.proxy.__iter__` by # using a map provided by the module developer class_attr = getattr(type(self), map_key, None) if isinstance(class_attr, property): # There is a mapped value for the api_map key if class_attr.fset is None: # If the mapped value does not have # an associated setter self._values[map_key] = v else: # The mapped value has a setter setattr(self, map_key, v) else: # If the mapped value is not a @property self._values[map_key] = v def to_return(self): result = {} try: for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) return result except Exception: return result def api_params(self): result = {} for api_attribute in self.api_attributes: if self.api_map is not None and api_attribute in self.api_map: result[api_attribute] = getattr(self, self.api_map[api_attribute]) else: result[api_attribute] = getattr(self, api_attribute) result = self._filter_params(result) return result @property def interval(self): if self._values['interval'] is None: return None if 1 > int(self._values['interval']) > 86400: raise F5ModuleError( "Interval value must be between 1 and 86400" ) return int(self._values['interval']) @property def timeout(self): if self._values['timeout'] is None: return None return int(self._values['timeout']) @property def ip(self): if self._values['ip'] is None: return None try: if self._values['ip'] in ['*', '0.0.0.0']: return '*' result = str(netaddr.IPAddress(self._values['ip'])) return result except netaddr.core.AddrFormatError: raise F5ModuleError( "The provided 'ip' parameter is not an IP address." ) @property def destination(self): return self.ip @destination.setter def destination(self, value): self._values['ip'] = value @property def time_until_up(self): if self._values['time_until_up'] is None: return None return int(self._values['time_until_up']) @property def parent(self): if self._values['parent'] is None: return None if self._values['parent'].startswith('/'): parent = os.path.basename(self._values['parent']) result = '/{0}/{1}'.format(self.partition, parent) else: result = '/{0}/{1}'.format(self.partition, self._values['parent']) return result @property def type(self): return 'tcp_echo' class Difference(object): def __init__(self, want, have=None): self.want = want self.have = have def compare(self, param): try: result = getattr(self, param) return result except AttributeError: result = self.__default(param) return result @property def parent(self): if self.want.parent != self.want.parent: raise F5ModuleError( "The parent monitor cannot be changed" ) @property def destination(self): if self.want.ip is None: return None if self.want.destination != self.have.destination: return self.want.destination @property def interval(self): if self.want.timeout is not None and self.want.interval is not None: if self.want.interval >= self.want.timeout: raise F5ModuleError( "Parameter 'interval' must be less than 'timeout'." ) elif self.want.timeout is not None: if self.have.interval >= self.want.timeout: raise F5ModuleError( "Parameter 'interval' must be less than 'timeout'." ) elif self.want.interval is not None: if self.want.interval >= self.have.timeout: raise F5ModuleError( "Parameter 'interval' must be less than 'timeout'." ) if self.want.interval != self.have.interval: return self.want.interval def __default(self, param): attr1 = getattr(self.want, param) try: attr2 = getattr(self.have, param) if attr1 != attr2: return attr1 except AttributeError: return attr1 class ModuleManager(object): def __init__(self, client): self.client = client self.have = None self.want = Parameters(self.client.module.params) self.changes = Parameters() def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = Parameters(changed) def _update_changed_options(self): diff = Difference(self.want, self.have) updatables = Parameters.updatables changed = dict() for k in updatables: change = diff.compare(k) if change is None: continue else: changed[k] = change if changed: self.changes = Parameters(changed) return True return False def _announce_deprecations(self): warnings = [] if self.want: warnings += self.want._values.get('__warnings', []) if self.have: warnings += self.have._values.get('__warnings', []) for warning in warnings: self.client.module.deprecate( msg=warning['msg'], version=warning['version'] ) def exec_module(self): changed = False result = dict() state = self.want.state try: if state == "present": changed = self.present() elif state == "absent": changed = self.absent() except iControlUnexpectedHTTPError as e: raise F5ModuleError(str(e)) changes = self.changes.to_return() result.update(**changes) result.update(dict(changed=changed)) self._announce_deprecations() return result def present(self): if self.exists(): return self.update() else: return self.create() def create(self): self._set_changed_options() if self.want.timeout is None: self.want.update({'timeout': 16}) if self.want.interval is None: self.want.update({'interval': 5}) if self.want.time_until_up is None: self.want.update({'time_until_up': 0}) if self.want.ip is None: self.want.update({'ip': '*'}) if self.client.check_mode: return True self.create_on_device() return True def should_update(self): result = self._update_changed_options() if result: return True return False def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.client.check_mode: return True self.update_on_device() return True def absent(self): if self.exists(): return self.remove() return False def remove(self): if self.client.check_mode: return True self.remove_from_device() if self.exists(): raise F5ModuleError("Failed to delete the monitor.") return True def read_current_from_device(self): resource = self.client.api.tm.ltm.monitor.tcp_echos.tcp_echo.load( name=self.want.name, partition=self.want.partition ) result = resource.attrs return Parameters(result) def exists(self): result = self.client.api.tm.ltm.monitor.tcp_echos.tcp_echo.exists( name=self.want.name, partition=self.want.partition ) return result def update_on_device(self): params = self.want.api_params() result = self.client.api.tm.ltm.monitor.tcp_echos.tcp_echo.load( name=self.want.name, partition=self.want.partition ) result.modify(**params) def create_on_device(self): params = self.want.api_params() self.client.api.tm.ltm.monitor.tcp_echos.tcp_echo.create( name=self.want.name, partition=self.want.partition, **params ) def remove_from_device(self): result = self.client.api.tm.ltm.monitor.tcp_echos.tcp_echo.load( name=self.want.name, partition=self.want.partition ) if result: result.delete() class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True self.argument_spec = dict( name=dict(required=True), parent=dict(), ip=dict(), interval=dict(type='int'), timeout=dict(type='int'), time_until_up=dict(type='int') ) self.f5_product_name = 'bigip' def main(): try: spec = ArgumentSpec() client = AnsibleF5Client( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, f5_product_name=spec.f5_product_name ) if not HAS_F5SDK: raise F5ModuleError("The python f5-sdk module is required") if not HAS_NETADDR: raise F5ModuleError("The python netaddr module is required") mm = ModuleManager(client) results = mm.exec_module() client.module.exit_json(**results) except F5ModuleError as e: client.module.fail_json(msg=str(e)) if __name__ == '__main__': main()
gpl-3.0
-1,554,223,833,871,160,000
30.001898
94
0.584955
false
sebrandon1/nova
nova/db/sqlalchemy/migrate_repo/versions/229_add_extra_resources_in_compute_nodes.py
25
1358
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import MetaData from sqlalchemy import Table from sqlalchemy import Text def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # Add a new column extra_resources to save extra_resources info for # compute nodes compute_nodes = Table('compute_nodes', meta, autoload=True) shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True) extra_resources = Column('extra_resources', Text, nullable=True) shadow_extra_resources = Column('extra_resources', Text, nullable=True) compute_nodes.create_column(extra_resources) shadow_compute_nodes.create_column(shadow_extra_resources)
apache-2.0
-6,431,888,702,800,318,000
37.8
78
0.73785
false
danmergens/mi-instrument
mi/dataset/driver/moas/gl/dosta/driver_common.py
7
1132
## # OOIPLACEHOLDER # # Copyright 2014 Raytheon Co. ## __author__ = "mworden" from mi.core.log import get_logger from mi.dataset.dataset_driver import DataSetDriver from mi.dataset.parser.glider import GliderParser class DostaAbcdjmGliderDriver: def __init__(self, source_file_path, particle_data_handler, parser_config): self._source_file_path = source_file_path self._particle_data_handler = particle_data_handler self._parser_config = parser_config def process(self): log = get_logger() with open(self._source_file_path,"rb") as file_handle: def exception_callback(exception): log.debug("Exception: %s", exception) self._particle_data_handler.setParticleDataCaptureFailure() parser = GliderParser(self._parser_config, file_handle, exception_callback) driver = DataSetDriver(parser, self._particle_data_handler) driver.processFileStream() return self._particle_data_handler
bsd-2-clause
-4,193,161,188,427,083,000
26.609756
79
0.60689
false
lukeiwanski/tensorflow
tensorflow/python/ops/losses/losses.py
61
1102
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Loss operations for use in neural networks. Note: All the losses are added to the `GraphKeys.LOSSES` collection by default. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=wildcard-import from tensorflow.python.ops.losses.losses_impl import * from tensorflow.python.ops.losses.util import * # pylint: enable=wildcard-import
apache-2.0
-8,567,462,894,064,323,000
39.814815
80
0.712341
false
5GExchange/escape
mininet/examples/test/test_multipoll.py
2
1105
#!/usr/bin/env python """ Test for multipoll.py """ import unittest import pexpect class testMultiPoll( unittest.TestCase ): def testMultiPoll( self ): "Verify that we receive one ping per second per host" p = pexpect.spawn( 'python -m mininet.examples.multipoll' ) opts = [ "\*\*\* (h\d) :" , "(h\d+): \d+ bytes from", "Monitoring output for (\d+) seconds", pexpect.EOF ] pings = {} while True: index = p.expect( opts ) if index == 0: name = p.match.group( 1 ) pings[ name ] = 0 elif index == 1: name = p.match.group( 1 ) pings[ name ] += 1 elif index == 2: seconds = int( p.match.group( 1 ) ) else: break self.assertTrue( len( pings ) > 0 ) # make sure we have received at least one ping per second for count in pings.values(): self.assertTrue( count >= seconds ) if __name__ == '__main__': unittest.main()
apache-2.0
-5,339,093,238,142,895,000
28.078947
67
0.482353
false
albertomurillo/ansible
lib/ansible/modules/network/f5/bigip_config.py
15
13150
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2017, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: bigip_config short_description: Manage BIG-IP configuration sections description: - Manages a BIG-IP configuration by allowing TMSH commands that modify running configuration, or merge SCF formatted files into the running configuration. Additionally, this module is of significant importance because it allows you to save your running configuration to disk. Since the F5 module only manipulate running configuration, it is important that you utilize this module to save that running config. version_added: 2.4 options: save: description: - The C(save) argument instructs the module to save the running-config to startup-config. - This operation is performed after any changes are made to the current running config. If no changes are made, the configuration is still saved to the startup config. - This option will always cause the module to return changed. type: bool default: yes reset: description: - Loads the default configuration on the device. - If this option is specified, the default configuration will be loaded before any commands or other provided configuration is run. type: bool default: no merge_content: description: - Loads the specified configuration that you want to merge into the running configuration. This is equivalent to using the C(tmsh) command C(load sys config from-terminal merge). - If you need to read configuration from a file or template, use Ansible's C(file) or C(template) lookup plugins respectively. verify: description: - Validates the specified configuration to see whether they are valid to replace the running configuration. - The running configuration will not be changed. - When this parameter is set to C(yes), no change will be reported by the module. type: bool default: no extends_documentation_fragment: f5 author: - Tim Rupp (@caphrim007) ''' EXAMPLES = r''' - name: Save the running configuration of the BIG-IP bigip_config: save: yes provider: server: lb.mydomain.com password: secret user: admin delegate_to: localhost - name: Reset the BIG-IP configuration, for example, to RMA the device bigip_config: reset: yes save: yes provider: server: lb.mydomain.com password: secret user: admin delegate_to: localhost - name: Load an SCF configuration bigip_config: merge_content: "{{ lookup('file', '/path/to/config.scf') }}" provider: server: lb.mydomain.com password: secret user: admin delegate_to: localhost ''' RETURN = r''' stdout: description: The set of responses from the options returned: always type: list sample: ['...', '...'] stdout_lines: description: The value of stdout split into a list returned: always type: list sample: [['...', '...'], ['...'], ['...']] ''' try: from StringIO import StringIO except ImportError: from io import StringIO import os import tempfile from ansible.module_utils.basic import AnsibleModule try: from library.module_utils.network.f5.bigip import F5RestClient from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import fq_name from library.module_utils.network.f5.common import f5_argument_spec from library.module_utils.network.f5.icontrol import upload_file except ImportError: from ansible.module_utils.network.f5.bigip import F5RestClient from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import fq_name from ansible.module_utils.network.f5.common import f5_argument_spec from ansible.module_utils.network.f5.icontrol import upload_file class Parameters(AnsibleF5Parameters): returnables = ['stdout', 'stdout_lines'] def to_return(self): result = {} for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) return result class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = F5RestClient(**self.module.params) self.want = Parameters(params=self.module.params) self.changes = Parameters() def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = Parameters(params=changed) def _to_lines(self, stdout): lines = list() for item in stdout: if isinstance(item, str): item = str(item).split('\n') lines.append(item) return lines def exec_module(self): result = {} changed = self.execute() result.update(**self.changes.to_return()) result.update(dict(changed=changed)) return result def execute(self): responses = [] if self.want.reset: response = self.reset() responses.append(response) if self.want.merge_content: if self.want.verify: response = self.merge(verify=True) responses.append(response) else: response = self.merge(verify=False) responses.append(response) if self.want.save: response = self.save() responses.append(response) self._detect_errors(responses) changes = { 'stdout': responses, 'stdout_lines': self._to_lines(responses) } self.changes = Parameters(params=changes) if self.want.verify: return False return True def _detect_errors(self, stdout): errors = [ 'Unexpected Error:' ] msg = [x for x in stdout for y in errors if y in x] if msg: # Error only contains the lines that include the error raise F5ModuleError(' '.join(msg)) def reset(self): if self.module.check_mode: return True return self.reset_device() def reset_device(self): command = 'tmsh load sys config default' uri = "https://{0}:{1}/mgmt/tm/util/bash".format( self.client.provider['server'], self.client.provider['server_port'], ) args = dict( command='run', utilCmdArgs='-c "{0}"'.format(command) ) resp = self.client.api.post(uri, json=args) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if 'commandResult' in response: return str(response['commandResult']) def merge(self, verify=True): temp_name = next(tempfile._get_candidate_names()) remote_path = "/var/config/rest/downloads/{0}".format(temp_name) temp_path = '/tmp/' + temp_name if self.module.check_mode: return True self.upload_to_device(temp_name) self.move_on_device(remote_path) response = self.merge_on_device( remote_path=temp_path, verify=verify ) self.remove_temporary_file(remote_path=temp_path) return response def merge_on_device(self, remote_path, verify=True): command = 'tmsh load sys config file {0} merge'.format( remote_path ) if verify: command += ' verify' uri = "https://{0}:{1}/mgmt/tm/util/bash".format( self.client.provider['server'], self.client.provider['server_port'], ) args = dict( command='run', utilCmdArgs='-c "{0}"'.format(command) ) resp = self.client.api.post(uri, json=args) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if 'commandResult' in response: return str(response['commandResult']) def remove_temporary_file(self, remote_path): uri = "https://{0}:{1}/mgmt/tm/util/unix-rm".format( self.client.provider['server'], self.client.provider['server_port'], ) args = dict( command='run', utilCmdArgs=remote_path ) resp = self.client.api.post(uri, json=args) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def move_on_device(self, remote_path): uri = "https://{0}:{1}/mgmt/tm/util/unix-mv".format( self.client.provider['server'], self.client.provider['server_port'], ) args = dict( command='run', utilCmdArgs='{0} /tmp/{1}'.format( remote_path, os.path.basename(remote_path) ) ) resp = self.client.api.post(uri, json=args) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def upload_to_device(self, temp_name): template = StringIO(self.want.merge_content) url = 'https://{0}:{1}/mgmt/shared/file-transfer/uploads'.format( self.client.provider['server'], self.client.provider['server_port'] ) try: upload_file(self.client, url, template, temp_name) except F5ModuleError: raise F5ModuleError( "Failed to upload the file." ) def save(self): if self.module.check_mode: return True return self.save_on_device() def save_on_device(self): command = 'tmsh save sys config' uri = "https://{0}:{1}/mgmt/tm/util/bash".format( self.client.provider['server'], self.client.provider['server_port'], ) args = dict( command='run', utilCmdArgs='-c "{0}"'.format(command) ) resp = self.client.api.post(uri, json=args) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if 'commandResult' in response: return str(response['commandResult']) class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( reset=dict( type='bool', default=False ), merge_content=dict(), verify=dict( type='bool', default=False ), save=dict( type='bool', default='yes' ) ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode ) try: mm = ModuleManager(module=module) results = mm.exec_module() module.exit_json(**results) except F5ModuleError as ex: module.fail_json(msg=str(ex)) if __name__ == '__main__': main()
gpl-3.0
1,630,164,974,763,817,500
29.868545
91
0.59346
false
Beyond-Imagination/BlubBlub
ChatbotServer/ChatbotEnv/Lib/site-packages/konlpy/tag/_mecab.py
1
3616
#! /usr/bin/python # -*- coding: utf-8 -*- import sys try: from MeCab import Tagger except ImportError: pass from .. import utils __all__ = ['Mecab'] attrs = ['tags', # 품사 태그 'semantic', # 의미 부류 'has_jongsung', # 종성 유무 'read', # 읽기 'type', # 타입 'first_pos', # 첫번째 품사 'last_pos', # 마지막 품사 'original', # 원형 'indexed'] # 인덱스 표현 def parse(result, allattrs=False): def split(elem): if not elem: return ('', 'SY') s, t = elem.split('\t') return (s, t.split(',', 1)[0]) return [split(elem) for elem in result.splitlines()[:-1]] class Mecab(): """Wrapper for MeCab-ko morphological analyzer. `MeCab`_, originally a Japanese morphological analyzer and POS tagger developed by the Graduate School of Informatics in Kyoto University, was modified to MeCab-ko by the `Eunjeon Project`_ to adapt to the Korean language. In order to use MeCab-ko within KoNLPy, follow the directions in :ref:`optional-installations`. .. code-block:: python :emphasize-lines: 1 >>> # MeCab installation needed >>> from konlpy.tag import Mecab >>> mecab = Mecab() >>> print(mecab.morphs(u'영등포구청역에 있는 맛집 좀 알려주세요.')) ['영등포구', '청역', '에', '있', '는', '맛집', '좀', '알려', '주', '세요', '.'] >>> print(mecab.nouns(u'우리나라에는 무릎 치료를 잘하는 정형외과가 없는가!')) ['우리', '나라', '무릎', '치료', '정형외과'] >>> print(mecab.pos(u'자연주의 쇼핑몰은 어떤 곳인가?')) [('자연', 'NNG'), ('주', 'NNG'), ('의', 'JKG'), ('쇼핑몰', 'NNG'), ('은', 'JX'), ('어떤', 'MM'), ('곳', 'NNG'), ('인가', 'VCP+EF'), ('?', 'SF')] :param dicpath: The path of the MeCab-ko dictionary. .. _MeCab: https://code.google.com/p/mecab/ .. _Eunjeon Project: http://eunjeon.blogspot.kr/ """ # TODO: check whether flattened results equal non-flattened def pos(self, phrase, flatten=True): """POS tagger. :param flatten: If False, preserves eojeols. """ if sys.version_info[0] < 3: phrase = phrase.encode('utf-8') if flatten: result = self.tagger.parse(phrase).decode('utf-8') return parse(result) else: return [parse(self.tagger.parse(eojeol).decode('utf-8')) for eojeol in phrase.split()] else: if flatten: result = self.tagger.parse(phrase) return parse(result) else: return [parse(self.tagger.parse(eojeol).decode('utf-8')) for eojeol in phrase.split()] def morphs(self, phrase): """Parse phrase to morphemes.""" return [s for s, t in self.pos(phrase)] def nouns(self, phrase): """Noun extractor.""" tagged = self.pos(phrase) return [s for s, t in tagged if t.startswith('N')] def __init__(self, dicpath='/usr/local/lib/mecab/dic/mecab-ko-dic'): try: self.tagger = Tagger('-d %s' % dicpath) self.tagset = utils.read_json('%s/data/tagset/mecab.json' % utils.installpath) except RuntimeError: raise Exception('Invalid MeCab dictionary path: "%s"\nInput the correct path when initiializing class: "Mecab(\'/some/dic/path\')"' % dicpath)
gpl-3.0
2,614,760,195,193,435,000
31.019048
154
0.538667
false
CenturylinkTechnology/ansible-modules-extras
network/dnsimple.py
16
11833
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: dnsimple version_added: "1.6" short_description: Interface with dnsimple.com (a DNS hosting service). description: - "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)" options: account_email: description: - "Account email. If omitted, the env variables DNSIMPLE_EMAIL and DNSIMPLE_API_TOKEN will be looked for. If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)" required: false default: null account_api_token: description: - Account API token. See I(account_email) for info. required: false default: null domain: description: - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple. If omitted, a list of domains will be returned. - If domain is present but the domain doesn't exist, it will be created. required: false default: null record: description: - Record to add, if blank a record for the domain will be created, supports the wildcard (*) required: false default: null record_ids: description: - List of records to ensure they either exist or don't exist required: false default: null type: description: - The type of DNS record to create required: false choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL' ] default: null ttl: description: - The TTL to give the new record required: false default: 3600 (one hour) value: description: - Record value - "Must be specified when trying to ensure a record exists" required: false default: null priority: description: - Record priority required: false default: null state: description: - whether the record should exist or not required: false choices: [ 'present', 'absent' ] default: null solo: description: - Whether the record should be the only one for that record type and record name. Only use with state=present on a record required: false default: null requirements: [ dnsimple ] author: "Alex Coomans (@drcapulet)" ''' EXAMPLES = ''' # authenticate using email and API token - local_action: dnsimple [email protected] account_api_token=dummyapitoken # fetch all domains - local_action dnsimple register: domains # fetch my.com domain records - local_action: dnsimple domain=my.com state=present register: records # delete a domain - local_action: dnsimple domain=my.com state=absent # create a test.my.com A record to point to 127.0.0.01 - local_action: dnsimple domain=my.com record=test type=A value=127.0.0.1 register: record # and then delete it - local_action: dnsimple domain=my.com record_ids={{ record['id'] }} # create a my.com CNAME record to example.com - local_action: dnsimple domain=my.com record= type=CNAME value=example.com state=present # change it's ttl - local_action: dnsimple domain=my.com record= type=CNAME value=example.com ttl=600 state=present # and delete the record - local_action: dnsimpledomain=my.com record= type=CNAME value=example.com state=absent ''' import os try: from dnsimple import DNSimple from dnsimple.dnsimple import DNSimpleException HAS_DNSIMPLE = True except ImportError: HAS_DNSIMPLE = False def main(): module = AnsibleModule( argument_spec = dict( account_email = dict(required=False), account_api_token = dict(required=False, no_log=True), domain = dict(required=False), record = dict(required=False), record_ids = dict(required=False, type='list'), type = dict(required=False, choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL']), ttl = dict(required=False, default=3600, type='int'), value = dict(required=False), priority = dict(required=False, type='int'), state = dict(required=False, choices=['present', 'absent']), solo = dict(required=False, type='bool'), ), required_together = ( ['record', 'value'] ), supports_check_mode = True, ) if not HAS_DNSIMPLE: module.fail_json(msg="dnsimple required for this module") account_email = module.params.get('account_email') account_api_token = module.params.get('account_api_token') domain = module.params.get('domain') record = module.params.get('record') record_ids = module.params.get('record_ids') record_type = module.params.get('type') ttl = module.params.get('ttl') value = module.params.get('value') priority = module.params.get('priority') state = module.params.get('state') is_solo = module.params.get('solo') if account_email and account_api_token: client = DNSimple(email=account_email, api_token=account_api_token) elif os.environ.get('DNSIMPLE_EMAIL') and os.environ.get('DNSIMPLE_API_TOKEN'): client = DNSimple(email=os.environ.get('DNSIMPLE_EMAIL'), api_token=os.environ.get('DNSIMPLE_API_TOKEN')) else: client = DNSimple() try: # Let's figure out what operation we want to do # No domain, return a list if not domain: domains = client.domains() module.exit_json(changed=False, result=[d['domain'] for d in domains]) # Domain & No record if domain and record is None and not record_ids: domains = [d['domain'] for d in client.domains()] if domain.isdigit(): dr = next((d for d in domains if d['id'] == int(domain)), None) else: dr = next((d for d in domains if d['name'] == domain), None) if state == 'present': if dr: module.exit_json(changed=False, result=dr) else: if module.check_mode: module.exit_json(changed=True) else: module.exit_json(changed=True, result=client.add_domain(domain)['domain']) elif state == 'absent': if dr: if not module.check_mode: client.delete(domain) module.exit_json(changed=True) else: module.exit_json(changed=False) else: module.fail_json(msg="'%s' is an unknown value for the state argument" % state) # need the not none check since record could be an empty string if domain and record is not None: records = [r['record'] for r in client.records(str(domain))] if not record_type: module.fail_json(msg="Missing the record type") if not value: module.fail_json(msg="Missing the record value") rr = next((r for r in records if r['name'] == record and r['record_type'] == record_type and r['content'] == value), None) if state == 'present': changed = False if is_solo: # delete any records that have the same name and record type same_type = [r['id'] for r in records if r['name'] == record and r['record_type'] == record_type] if rr: same_type = [rid for rid in same_type if rid != rr['id']] if same_type: if not module.check_mode: for rid in same_type: client.delete_record(str(domain), rid) changed = True if rr: # check if we need to update if rr['ttl'] != ttl or rr['prio'] != priority: data = {} if ttl: data['ttl'] = ttl if priority: data['prio'] = priority if module.check_mode: module.exit_json(changed=True) else: module.exit_json(changed=True, result=client.update_record(str(domain), str(rr['id']), data)['record']) else: module.exit_json(changed=changed, result=rr) else: # create it data = { 'name': record, 'record_type': record_type, 'content': value, } if ttl: data['ttl'] = ttl if priority: data['prio'] = priority if module.check_mode: module.exit_json(changed=True) else: module.exit_json(changed=True, result=client.add_record(str(domain), data)['record']) elif state == 'absent': if rr: if not module.check_mode: client.delete_record(str(domain), rr['id']) module.exit_json(changed=True) else: module.exit_json(changed=False) else: module.fail_json(msg="'%s' is an unknown value for the state argument" % state) # Make sure these record_ids either all exist or none if domain and record_ids: current_records = [str(r['record']['id']) for r in client.records(str(domain))] wanted_records = [str(r) for r in record_ids] if state == 'present': difference = list(set(wanted_records) - set(current_records)) if difference: module.fail_json(msg="Missing the following records: %s" % difference) else: module.exit_json(changed=False) elif state == 'absent': difference = list(set(wanted_records) & set(current_records)) if difference: if not module.check_mode: for rid in difference: client.delete_record(str(domain), rid) module.exit_json(changed=True) else: module.exit_json(changed=False) else: module.fail_json(msg="'%s' is an unknown value for the state argument" % state) except DNSimpleException: e = get_exception() module.fail_json(msg="Unable to contact DNSimple: %s" % e.message) module.fail_json(msg="Unknown what you wanted me to do") # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.pycompat24 import get_exception main()
gpl-3.0
-4,552,723,886,163,365,400
37.543974
246
0.568833
false
cherylyli/stress-aid
env/lib/python3.5/site-packages/pymongo/server_selectors.py
20
5307
# Copyright 2014-2016 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you # may not use this file except in compliance with the License. You # may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. """Criteria to select some ServerDescriptions from a TopologyDescription.""" from pymongo.server_type import SERVER_TYPE class Selection(object): """Input or output of a server selector function.""" @classmethod def from_topology_description(cls, topology_description): known_servers = topology_description.known_servers primary = None for sd in known_servers: if sd.server_type == SERVER_TYPE.RSPrimary: primary = sd break return Selection(topology_description, topology_description.known_servers, topology_description.common_wire_version, primary) def __init__(self, topology_description, server_descriptions, common_wire_version, primary): self.topology_description = topology_description self.server_descriptions = server_descriptions self.primary = primary self.common_wire_version = common_wire_version def with_server_descriptions(self, server_descriptions): return Selection(self.topology_description, server_descriptions, self.common_wire_version, self.primary) def secondary_with_max_last_write_date(self): secondaries = secondary_server_selector(self) if secondaries.server_descriptions: return max(secondaries.server_descriptions, key=lambda sd: sd.last_write_date) @property def primary_selection(self): primaries = [self.primary] if self.primary else [] return self.with_server_descriptions(primaries) @property def heartbeat_frequency(self): return self.topology_description.heartbeat_frequency @property def topology_type(self): return self.topology_description.topology_type def __bool__(self): return bool(self.server_descriptions) __nonzero__ = __bool__ # Python 2. def __getitem__(self, item): return self.server_descriptions[item] def any_server_selector(selection): return selection def readable_server_selector(selection): return selection.with_server_descriptions( [s for s in selection.server_descriptions if s.is_readable]) def writable_server_selector(selection): return selection.with_server_descriptions( [s for s in selection.server_descriptions if s.is_writable]) def secondary_server_selector(selection): return selection.with_server_descriptions( [s for s in selection.server_descriptions if s.server_type == SERVER_TYPE.RSSecondary]) def arbiter_server_selector(selection): return selection.with_server_descriptions( [s for s in selection.server_descriptions if s.server_type == SERVER_TYPE.RSArbiter]) def writable_preferred_server_selector(selection): """Like PrimaryPreferred but doesn't use tags or latency.""" return (writable_server_selector(selection) or secondary_server_selector(selection)) def apply_single_tag_set(tag_set, selection): """All servers matching one tag set. A tag set is a dict. A server matches if its tags are a superset: A server tagged {'a': '1', 'b': '2'} matches the tag set {'a': '1'}. The empty tag set {} matches any server. """ def tags_match(server_tags): for key, value in tag_set.items(): if key not in server_tags or server_tags[key] != value: return False return True return selection.with_server_descriptions( [s for s in selection.server_descriptions if tags_match(s.tags)]) def apply_tag_sets(tag_sets, selection): """All servers match a list of tag sets. tag_sets is a list of dicts. The empty tag set {} matches any server, and may be provided at the end of the list as a fallback. So [{'a': 'value'}, {}] expresses a preference for servers tagged {'a': 'value'}, but accepts any server if none matches the first preference. """ for tag_set in tag_sets: with_tag_set = apply_single_tag_set(tag_set, selection) if with_tag_set: return with_tag_set return selection.with_server_descriptions([]) def secondary_with_tags_server_selector(tag_sets, selection): """All near-enough secondaries matching the tag sets.""" return apply_tag_sets(tag_sets, secondary_server_selector(selection)) def member_with_tags_server_selector(tag_sets, selection): """All near-enough members matching the tag sets.""" return apply_tag_sets(tag_sets, readable_server_selector(selection))
mit
3,430,962,506,653,999,000
33.019231
76
0.664029
false
mcanthony/rethinkdb
external/v8_3.30.33.16/build/gyp/test/mac/gyptest-installname.py
244
2512
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies that LD_DYLIB_INSTALL_NAME and DYLIB_INSTALL_NAME_BASE are handled correctly. """ import TestGyp import re import subprocess import sys if sys.platform == 'darwin': test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode']) CHDIR = 'installname' test.run_gyp('test.gyp', chdir=CHDIR) test.build('test.gyp', test.ALL, chdir=CHDIR) def GetInstallname(p): p = test.built_file_path(p, chdir=CHDIR) r = re.compile(r'cmd LC_ID_DYLIB.*?name (.*?) \(offset \d+\)', re.DOTALL) proc = subprocess.Popen(['otool', '-l', p], stdout=subprocess.PIPE) o = proc.communicate()[0] assert not proc.returncode m = r.search(o) assert m return m.group(1) if (GetInstallname('libdefault_installname.dylib') != '/usr/local/lib/libdefault_installname.dylib'): test.fail_test() if (GetInstallname('My Framework.framework/My Framework') != '/Library/Frameworks/My Framework.framework/' 'Versions/A/My Framework'): test.fail_test() if (GetInstallname('libexplicit_installname.dylib') != 'Trapped in a dynamiclib factory'): test.fail_test() if (GetInstallname('libexplicit_installname_base.dylib') != '@executable_path/../../../libexplicit_installname_base.dylib'): test.fail_test() if (GetInstallname('My Other Framework.framework/My Other Framework') != '@executable_path/../../../My Other Framework.framework/' 'Versions/A/My Other Framework'): test.fail_test() if (GetInstallname('libexplicit_installname_with_base.dylib') != '/usr/local/lib/libexplicit_installname_with_base.dylib'): test.fail_test() if (GetInstallname('libexplicit_installname_with_explicit_base.dylib') != '@executable_path/../libexplicit_installname_with_explicit_base.dylib'): test.fail_test() if (GetInstallname('libboth_base_and_installname.dylib') != 'Still trapped in a dynamiclib factory'): test.fail_test() if (GetInstallname('install_name_with_info_plist.framework/' 'install_name_with_info_plist') != '/Library/Frameworks/install_name_with_info_plist.framework/' 'Versions/A/install_name_with_info_plist'): test.fail_test() if ('DYLIB_INSTALL_NAME_BASE:standardizepath: command not found' in test.stdout()): test.fail_test() test.pass_test()
agpl-3.0
1,958,927,814,491,359,500
30.797468
78
0.675557
false
jmptrader/dirigible-spreadsheet
dirigible/sheet/tests/parser/test_fl_cell_reference_parse_node.py
2
6314
# Copyright (c) 2005-2010 Resolver Systems Ltd, PythonAnywhere LLP # See LICENSE.md # try: import unittest2 as unittest except ImportError: import unittest from sheet.parser.parse_node import ParseNode from sheet.parser.fl_cell_reference_parse_node import FLCellReferenceParseNode from sheet.parser.fl_reference_parse_node import FLReferenceParseNode class FLCellReferenceParseNodeTest(unittest.TestCase): def testConstructor(self): flCellReference = FLCellReferenceParseNode(["A1"]) self.assertTrue(isinstance(flCellReference, FLReferenceParseNode), 'should be a parse node') self.assertEquals(flCellReference.type, ParseNode.FL_CELL_REFERENCE, "Node was of the wrong type") self.assertEquals(flCellReference.children, ["A1"], "Node had the wrong children") def testStr(self): node = FLCellReferenceParseNode(["a1"]) self.assertEquals(str(node), "<FLCellReferenceParseNode type=\"FL_CELL_REFERENCE\" children=['a1']>", "Wrong string representation") def testColAbsolute(self): self.assertFalse(FLCellReferenceParseNode(["A1"]).colAbsolute, "Incorrect colAbsolute for A1") self.assertFalse(FLCellReferenceParseNode(["A$1"]).colAbsolute, "Incorrect colAbsolute for A$1") self.assertTrue(FLCellReferenceParseNode(["$A1"]).colAbsolute, "Incorrect colAbsolute for $A1") self.assertTrue(FLCellReferenceParseNode(["$A$1"]).colAbsolute, "Incorrect colAbsolute for $A$1") self.assertFalse(FLCellReferenceParseNode(["SheetSomething", "! ", "A1"]).colAbsolute, "Incorrect colAbsolute for A1 with worksheet") self.assertTrue(FLCellReferenceParseNode(["SheetSomething", "! ", "$A$1"]).colAbsolute, "Incorrect colAbsolute for $A$1 with worksheet") def testRowAbsolute(self): self.assertFalse(FLCellReferenceParseNode(["A1"]).rowAbsolute, "Incorrect rowAbsolute for A1") self.assertTrue(FLCellReferenceParseNode(["A$1"]).rowAbsolute, "Incorrect rowAbsolute for A$1") self.assertFalse(FLCellReferenceParseNode(["$A1"]).rowAbsolute, "Incorrect rowAbsolute for $A1") self.assertTrue(FLCellReferenceParseNode(["$A$1"]).rowAbsolute, "Incorrect rowAbsolute for $A$1") self.assertFalse(FLCellReferenceParseNode(["SheetSomething", "! ", "A1"]).rowAbsolute, "Incorrect colAbsolute for A1 with worksheet") self.assertTrue(FLCellReferenceParseNode(["SheetSomething", "! ", "$A$1"]).rowAbsolute, "Incorrect colAbsolute for $A$1 with worksheet") def testPlainCellName(self): self.assertEquals(FLCellReferenceParseNode(["A1"]).plainCellName, "A1", "Incorrect plainCellName for A1") self.assertEquals(FLCellReferenceParseNode(["A$1"]).plainCellName, "A1", "Incorrect plainCellName for A$1") self.assertEquals(FLCellReferenceParseNode(["$A1"]).plainCellName, "A1", "Incorrect plainCellName for $A1") self.assertEquals(FLCellReferenceParseNode(["$A$1"]).plainCellName, "A1", "Incorrect plainCellName for $A$1") self.assertEquals(FLCellReferenceParseNode(["SheetSomething", "! ", "A1"]).plainCellName, "A1", "Incorrect plainCellName for A1 with worksheet") self.assertEquals(FLCellReferenceParseNode(["SheetSomething", "! ", "$A$1"]).plainCellName, "A1", "Incorrect plainCellName for $A$1 with worksheet") def testRegisteredWithParse(self): "test registered with ParseNode" self.assertEquals(type(ParseNode.construct_node(ParseNode.FL_CELL_REFERENCE, ['A1'])), FLCellReferenceParseNode, "Class is not registered with ParseNode") def testCellProperty(self): node = FLCellReferenceParseNode(["G8 "]) self.assertEquals(node.localReference, "G8 ", "cellref wrong") node = FLCellReferenceParseNode(["Sheet1", "!", "G8 "]) self.assertEquals(node.localReference, "G8 ", "cellref wrong") node = FLCellReferenceParseNode(["G8 "]) node.localReference = "F5" self.assertEquals(node.localReference, "F5", "should discard whitespace") node = FLCellReferenceParseNode(["G8 "]) node.localReference = "F5 " self.assertEquals(node.localReference, "F5 ", "should not pile whitespace") def testCanonicalise(self): node = FLCellReferenceParseNode(["bertie ", "!", "a1 "]) node.canonicalise(['Bertie']) self.assertEquals(node.localReference, 'A1 ') self.assertEquals(node.worksheetReference, 'Bertie') def testOffset(self): node = FLCellReferenceParseNode(["G8 "]) node.offset(1, 4) self.assertEquals(node.localReference, "H12 ", "offset didnt work") node = FLCellReferenceParseNode(["G8 "]) node.offset(-7, 1) self.assertEquals(node.localReference, "#Invalid! ", "offset didnt work") node = FLCellReferenceParseNode(["G8 "]) node.offset(1, -8) self.assertEquals(node.localReference, "#Invalid! ", "offset didnt work") node = FLCellReferenceParseNode(["G8 "]) node.offset(-6, -7) self.assertEquals(node.localReference, "A1 ", "offset didnt work") node = FLCellReferenceParseNode(["$G8 "]) node.offset(-6, -7) self.assertEquals(node.localReference, "$G1 ", "offset didnt work") node = FLCellReferenceParseNode(["G$8 "]) node.offset(-6, -7) self.assertEquals(node.localReference, "A$8 ", "offset didnt work") node = FLCellReferenceParseNode(["$G$8 "]) node.offset(-6, -7) self.assertEquals(node.localReference, "$G$8 ", "offset didnt work") node = FLCellReferenceParseNode(["$G$8 "]) node.offset(-6, -7, move_absolute=True) self.assertEquals(node.localReference, "$A$1 ", "offset didnt work") node = FLCellReferenceParseNode(["ZZZ9 "]) node.offset(1, -1) self.assertEquals(node.localReference, "#Invalid! ", "offset didnt work") def testCoords(self): node = FLCellReferenceParseNode(["A2"]) self.assertEquals(node.coords, (1, 2)) node = FLCellReferenceParseNode(["B1"]) self.assertEquals(node.coords, (2, 1))
mit
-921,135,956,750,384,800
45.426471
140
0.663605
false
cristiandima/highlights
highlights/extractive/erank.py
1
3576
""" This is in many ways identical to the textrank algorithms. The only difference is that we expand the sentence graph to also include the title of the text, the topics associated with the text, and the named entitites present The output is still an importance score for each sentence in the original text but these new nodes offer extra information and increase the weights of those sentences which are more closely related to the topics/title/named entities associated with the text """ import spacy from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.metrics.pairwise import cosine_similarity from highlights.extractive.textrank import _textrank_scores from highlights.internals.helpers import summary_length, NLP _word_tokenize = TfidfVectorizer(stop_words='english').build_analyzer() def _get_named_entities(nlp_doc): """ Given a spacy document return the top ten most frequent name entities present in the text. Name entities appearing only once are skipped. Args: nlp_doc (spacy document): document to extract named entities from Returns: a list of words, the most frequent named entities present in the document """ ignored_ents = {'DATE', 'TIME', 'PERCENT', 'MONEY', 'QUANTITY', 'ORDINAL', 'CARDINAL'} ne = [n.text for n in nlp_doc.ents if n.label_ not in ignored_ents] ne = [n.replace('the', '').strip() for n in ne] ne = set(ne) counter = CountVectorizer(ngram_range=(1,2)) counts = counter.fit_transform([nlp_doc.text]) ne_scores = [] for entity in ne: entity = entity.lower() if entity in counter.vocabulary_: ne_scores.append((counts[0, counter.vocabulary_.get(entity)], entity)) ne_scores = sorted([n for n in ne_scores if n[0] != 1], reverse=True)[:10] return [n[1] for n in ne_scores] def _get_topics(nlp_doc, lda, word_dict, topic_terms): """ Given a spacy document, as well as an lda model, this function returns a list of lists where each list holds the string words associated with each topic associated with the document """ doc_bow = word_dict.doc2bow(_word_tokenize(nlp_doc.text)) topics = lda.get_document_topics(doc_bow) topics_as_words = [] for topic_tuple in topics: topic_words = [] for word_tuple in topic_terms[topic_tuple[0]]: topic_words.append(word_dict[word_tuple[0]]) topics_as_words.append(topic_words) return topics_as_words def _erank_scores(nlp_doc, topics, named_entities, title=None): sentences = [sent.text for sent in nlp_doc.sents] original_len = len(sentences) for topic_words in topics: sentences.append(' '.join(topic_words)) if len(named_entities) >= 1: sentences.append(' '.join(named_entities)) if title is not None: sentences.append(' '.join(_word_tokenize(title))) scores = _textrank_scores(sentences) scores = {i: scores.get(i, 0) for i in range(original_len)} return scores def erank(text, lda, word_dict, topic_terms, title=None, len_func=summary_length): nlp_doc = NLP(text) sentences = [sent.text for sent in nlp_doc.sents] topics = _get_topics(nlp_doc, lda, word_dict, topic_terms) named_entities = _get_named_entities(nlp_doc) scores = _erank_scores(nlp_doc, topics, named_entities, title) sum_len = len_func(len(scores)) sent_scores = [(scores[i], s) for i, s in enumerate(sentences)] top_sentences = sorted(sent_scores, reverse=True)[:sum_len] return [s[1] for s in top_sentences]
mit
-583,513,381,419,460,200
35.489796
90
0.691555
false
SerialShadow/SickRage
autoProcessTV/lib/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py
2360
3778
"""The match_hostname() function from Python 3.3.3, essential when using SSL.""" # Note: This file is under the PSF license as the code comes from the python # stdlib. http://docs.python.org/3/license.html import re __version__ = '3.4.0.2' class CertificateError(ValueError): pass def _dnsname_match(dn, hostname, max_wildcards=1): """Matching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3 """ pats = [] if not dn: return False # Ported from python3-syntax: # leftmost, *remainder = dn.split(r'.') parts = dn.split(r'.') leftmost = parts[0] remainder = parts[1:] wildcards = leftmost.count('*') if wildcards > max_wildcards: # Issue #17980: avoid denials of service by refusing more # than one wildcard per fragment. A survey of established # policy among SSL implementations showed it to be a # reasonable choice. raise CertificateError( "too many wildcards in certificate DNS name: " + repr(dn)) # speed up common case w/o wildcards if not wildcards: return dn.lower() == hostname.lower() # RFC 6125, section 6.4.3, subitem 1. # The client SHOULD NOT attempt to match a presented identifier in which # the wildcard character comprises a label other than the left-most label. if leftmost == '*': # When '*' is a fragment by itself, it matches a non-empty dotless # fragment. pats.append('[^.]+') elif leftmost.startswith('xn--') or hostname.startswith('xn--'): # RFC 6125, section 6.4.3, subitem 3. # The client SHOULD NOT attempt to match a presented identifier # where the wildcard character is embedded within an A-label or # U-label of an internationalized domain name. pats.append(re.escape(leftmost)) else: # Otherwise, '*' matches any dotless string, e.g. www* pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) # add the remaining fragments, ignore any wildcards for frag in remainder: pats.append(re.escape(frag)) pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) return pat.match(hostname) def match_hostname(cert, hostname): """Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. """ if not cert: raise ValueError("empty or no certificate") dnsnames = [] san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': if _dnsname_match(value, hostname): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName for sub in cert.get('subject', ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == 'commonName': if _dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError("hostname %r " "doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError("hostname %r " "doesn't match %r" % (hostname, dnsnames[0])) else: raise CertificateError("no appropriate commonName or " "subjectAltName fields were found")
gpl-3.0
5,962,808,152,702,438,000
34.980952
80
0.610905
false
jbzdak/edx-platform
cms/djangoapps/contentstore/views/tests/test_course_index.py
5
35844
""" Unit tests for getting the list of courses and the course outline. """ import ddt import json import lxml import datetime import mock import pytz from django.conf import settings from django.core.exceptions import PermissionDenied from django.utils.translation import ugettext as _ from contentstore.courseware_index import CoursewareSearchIndexer, SearchIndexingError from contentstore.tests.utils import CourseTestCase from contentstore.utils import reverse_course_url, reverse_library_url, add_instructor, reverse_usage_url from contentstore.views.course import ( course_outline_initial_state, reindex_course_and_check_access, _deprecated_blocks_info ) from contentstore.views.item import create_xblock_info, VisibilityState from course_action_state.managers import CourseRerunUIStateManager from course_action_state.models import CourseRerunState from opaque_keys.edx.locator import CourseLocator from search.api import perform_search from student.auth import has_course_author_access from student.tests.factories import UserFactory from util.date_utils import get_default_time_display from xmodule.modulestore import ModuleStoreEnum from xmodule.modulestore.exceptions import ItemNotFoundError from xmodule.modulestore.django import modulestore from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, LibraryFactory class TestCourseIndex(CourseTestCase): """ Unit tests for getting the list of courses and the course outline. """ def setUp(self): """ Add a course with odd characters in the fields """ super(TestCourseIndex, self).setUp() # had a problem where index showed course but has_access failed to retrieve it for non-staff self.odd_course = CourseFactory.create( org='test.org_1-2', number='test-2.3_course', display_name='dotted.course.name-2', ) def check_index_and_outline(self, authed_client): """ Test getting the list of courses and then pulling up their outlines """ index_url = '/home/' index_response = authed_client.get(index_url, {}, HTTP_ACCEPT='text/html') parsed_html = lxml.html.fromstring(index_response.content) course_link_eles = parsed_html.find_class('course-link') self.assertGreaterEqual(len(course_link_eles), 2) for link in course_link_eles: self.assertRegexpMatches( link.get("href"), 'course/{}'.format(settings.COURSE_KEY_PATTERN) ) # now test that url outline_response = authed_client.get(link.get("href"), {}, HTTP_ACCEPT='text/html') # ensure it has the expected 2 self referential links outline_parsed = lxml.html.fromstring(outline_response.content) outline_link = outline_parsed.find_class('course-link')[0] self.assertEqual(outline_link.get("href"), link.get("href")) course_menu_link = outline_parsed.find_class('nav-course-courseware-outline')[0] self.assertEqual(course_menu_link.find("a").get("href"), link.get("href")) def test_libraries_on_course_index(self): """ Test getting the list of libraries from the course listing page """ # Add a library: lib1 = LibraryFactory.create() index_url = '/home/' index_response = self.client.get(index_url, {}, HTTP_ACCEPT='text/html') parsed_html = lxml.html.fromstring(index_response.content) library_link_elements = parsed_html.find_class('library-link') self.assertEqual(len(library_link_elements), 1) link = library_link_elements[0] self.assertEqual( link.get("href"), reverse_library_url('library_handler', lib1.location.library_key), ) # now test that url outline_response = self.client.get(link.get("href"), {}, HTTP_ACCEPT='text/html') self.assertEqual(outline_response.status_code, 200) def test_is_staff_access(self): """ Test that people with is_staff see the courses and can navigate into them """ self.check_index_and_outline(self.client) def test_negative_conditions(self): """ Test the error conditions for the access """ outline_url = reverse_course_url('course_handler', self.course.id) # register a non-staff member and try to delete the course branch non_staff_client, _ = self.create_non_staff_authed_user_client() response = non_staff_client.delete(outline_url, {}, HTTP_ACCEPT='application/json') self.assertEqual(response.status_code, 403) def test_course_staff_access(self): """ Make and register course_staff and ensure they can access the courses """ course_staff_client, course_staff = self.create_non_staff_authed_user_client() for course in [self.course, self.odd_course]: permission_url = reverse_course_url('course_team_handler', course.id, kwargs={'email': course_staff.email}) self.client.post( permission_url, data=json.dumps({"role": "staff"}), content_type="application/json", HTTP_ACCEPT="application/json", ) # test access self.check_index_and_outline(course_staff_client) def test_json_responses(self): outline_url = reverse_course_url('course_handler', self.course.id) chapter = ItemFactory.create(parent_location=self.course.location, category='chapter', display_name="Week 1") lesson = ItemFactory.create(parent_location=chapter.location, category='sequential', display_name="Lesson 1") subsection = ItemFactory.create( parent_location=lesson.location, category='vertical', display_name='Subsection 1' ) ItemFactory.create(parent_location=subsection.location, category="video", display_name="My Video") resp = self.client.get(outline_url, HTTP_ACCEPT='application/json') json_response = json.loads(resp.content) # First spot check some values in the root response self.assertEqual(json_response['category'], 'course') self.assertEqual(json_response['id'], unicode(self.course.location)) self.assertEqual(json_response['display_name'], self.course.display_name) self.assertTrue(json_response['published']) self.assertIsNone(json_response['visibility_state']) # Now verify the first child children = json_response['child_info']['children'] self.assertTrue(len(children) > 0) first_child_response = children[0] self.assertEqual(first_child_response['category'], 'chapter') self.assertEqual(first_child_response['id'], unicode(chapter.location)) self.assertEqual(first_child_response['display_name'], 'Week 1') self.assertTrue(json_response['published']) self.assertEqual(first_child_response['visibility_state'], VisibilityState.unscheduled) self.assertTrue(len(first_child_response['child_info']['children']) > 0) # Finally, validate the entire response for consistency self.assert_correct_json_response(json_response) def test_notifications_handler_get(self): state = CourseRerunUIStateManager.State.FAILED action = CourseRerunUIStateManager.ACTION should_display = True # try when no notification exists notification_url = reverse_course_url('course_notifications_handler', self.course.id, kwargs={ 'action_state_id': 1, }) resp = self.client.get(notification_url, HTTP_ACCEPT='application/json') # verify that we get an empty dict out self.assertEquals(resp.status_code, 400) # create a test notification rerun_state = CourseRerunState.objects.update_state( course_key=self.course.id, new_state=state, allow_not_found=True ) CourseRerunState.objects.update_should_display( entry_id=rerun_state.id, user=UserFactory(), should_display=should_display ) # try to get information on this notification notification_url = reverse_course_url('course_notifications_handler', self.course.id, kwargs={ 'action_state_id': rerun_state.id, }) resp = self.client.get(notification_url, HTTP_ACCEPT='application/json') json_response = json.loads(resp.content) self.assertEquals(json_response['state'], state) self.assertEquals(json_response['action'], action) self.assertEquals(json_response['should_display'], should_display) def test_notifications_handler_dismiss(self): state = CourseRerunUIStateManager.State.FAILED should_display = True rerun_course_key = CourseLocator(org='testx', course='test_course', run='test_run') # add an instructor to this course user2 = UserFactory() add_instructor(rerun_course_key, self.user, user2) # create a test notification rerun_state = CourseRerunState.objects.update_state( course_key=rerun_course_key, new_state=state, allow_not_found=True ) CourseRerunState.objects.update_should_display( entry_id=rerun_state.id, user=user2, should_display=should_display ) # try to get information on this notification notification_dismiss_url = reverse_course_url('course_notifications_handler', self.course.id, kwargs={ 'action_state_id': rerun_state.id, }) resp = self.client.delete(notification_dismiss_url) self.assertEquals(resp.status_code, 200) with self.assertRaises(CourseRerunState.DoesNotExist): # delete nofications that are dismissed CourseRerunState.objects.get(id=rerun_state.id) self.assertFalse(has_course_author_access(user2, rerun_course_key)) def assert_correct_json_response(self, json_response): """ Asserts that the JSON response is syntactically consistent """ self.assertIsNotNone(json_response['display_name']) self.assertIsNotNone(json_response['id']) self.assertIsNotNone(json_response['category']) self.assertTrue(json_response['published']) if json_response.get('child_info', None): for child_response in json_response['child_info']['children']: self.assert_correct_json_response(child_response) def test_course_updates_invalid_url(self): """ Tests the error conditions for the invalid course updates URL. """ # Testing the response code by passing slash separated course id whose format is valid but no course # having this id exists. invalid_course_key = '{}_blah_blah_blah'.format(self.course.id) course_updates_url = reverse_course_url('course_info_handler', invalid_course_key) response = self.client.get(course_updates_url) self.assertEqual(response.status_code, 404) # Testing the response code by passing split course id whose format is valid but no course # having this id exists. split_course_key = CourseLocator(org='orgASD', course='course_01213', run='Run_0_hhh_hhh_hhh') course_updates_url_split = reverse_course_url('course_info_handler', split_course_key) response = self.client.get(course_updates_url_split) self.assertEqual(response.status_code, 404) # Testing the response by passing split course id whose format is invalid. invalid_course_id = 'invalid.course.key/{}'.format(split_course_key) course_updates_url_split = reverse_course_url('course_info_handler', invalid_course_id) response = self.client.get(course_updates_url_split) self.assertEqual(response.status_code, 404) def test_course_index_invalid_url(self): """ Tests the error conditions for the invalid course index URL. """ # Testing the response code by passing slash separated course key, no course # having this key exists. invalid_course_key = '{}_some_invalid_run'.format(self.course.id) course_outline_url = reverse_course_url('course_handler', invalid_course_key) response = self.client.get_html(course_outline_url) self.assertEqual(response.status_code, 404) # Testing the response code by passing split course key, no course # having this key exists. split_course_key = CourseLocator(org='invalid_org', course='course_01111', run='Run_0_invalid') course_outline_url_split = reverse_course_url('course_handler', split_course_key) response = self.client.get_html(course_outline_url_split) self.assertEqual(response.status_code, 404) @ddt.ddt class TestCourseOutline(CourseTestCase): """ Unit tests for the course outline. """ def setUp(self): """ Set up the for the course outline tests. """ super(TestCourseOutline, self).setUp() self.chapter = ItemFactory.create( parent_location=self.course.location, category='chapter', display_name="Week 1" ) self.sequential = ItemFactory.create( parent_location=self.chapter.location, category='sequential', display_name="Lesson 1" ) self.vertical = ItemFactory.create( parent_location=self.sequential.location, category='vertical', display_name='Subsection 1' ) self.video = ItemFactory.create( parent_location=self.vertical.location, category="video", display_name="My Video" ) def test_json_responses(self): """ Verify the JSON responses returned for the course. """ outline_url = reverse_course_url('course_handler', self.course.id) resp = self.client.get(outline_url, HTTP_ACCEPT='application/json') json_response = json.loads(resp.content) # First spot check some values in the root response self.assertEqual(json_response['category'], 'course') self.assertEqual(json_response['id'], unicode(self.course.location)) self.assertEqual(json_response['display_name'], self.course.display_name) self.assertTrue(json_response['published']) self.assertIsNone(json_response['visibility_state']) # Now verify the first child children = json_response['child_info']['children'] self.assertTrue(len(children) > 0) first_child_response = children[0] self.assertEqual(first_child_response['category'], 'chapter') self.assertEqual(first_child_response['id'], unicode(self.chapter.location)) self.assertEqual(first_child_response['display_name'], 'Week 1') self.assertTrue(json_response['published']) self.assertEqual(first_child_response['visibility_state'], VisibilityState.unscheduled) self.assertTrue(len(first_child_response['child_info']['children']) > 0) # Finally, validate the entire response for consistency self.assert_correct_json_response(json_response) def assert_correct_json_response(self, json_response): """ Asserts that the JSON response is syntactically consistent """ self.assertIsNotNone(json_response['display_name']) self.assertIsNotNone(json_response['id']) self.assertIsNotNone(json_response['category']) self.assertTrue(json_response['published']) if json_response.get('child_info', None): for child_response in json_response['child_info']['children']: self.assert_correct_json_response(child_response) def test_course_outline_initial_state(self): course_module = modulestore().get_item(self.course.location) course_structure = create_xblock_info( course_module, include_child_info=True, include_children_predicate=lambda xblock: not xblock.category == 'vertical' ) # Verify that None is returned for a non-existent locator self.assertIsNone(course_outline_initial_state('no-such-locator', course_structure)) # Verify that the correct initial state is returned for the test chapter chapter_locator = unicode(self.chapter.location) initial_state = course_outline_initial_state(chapter_locator, course_structure) self.assertEqual(initial_state['locator_to_show'], chapter_locator) expanded_locators = initial_state['expanded_locators'] self.assertIn(unicode(self.sequential.location), expanded_locators) self.assertIn(unicode(self.vertical.location), expanded_locators) def test_start_date_on_page(self): """ Verify that the course start date is included on the course outline page. """ def _get_release_date(response): """Return the release date from the course page""" parsed_html = lxml.html.fromstring(response.content) return parsed_html.find_class('course-status')[0].find_class('status-release-value')[0].text_content() def _assert_settings_link_present(response): """ Asserts there's a course settings link on the course page by the course release date. """ parsed_html = lxml.html.fromstring(response.content) settings_link = parsed_html.find_class('course-status')[0].find_class('action-edit')[0].find('a') self.assertIsNotNone(settings_link) self.assertEqual(settings_link.get('href'), reverse_course_url('settings_handler', self.course.id)) outline_url = reverse_course_url('course_handler', self.course.id) response = self.client.get(outline_url, {}, HTTP_ACCEPT='text/html') # A course with the default release date should display as "Unscheduled" self.assertEqual(_get_release_date(response), 'Unscheduled') _assert_settings_link_present(response) self.course.start = datetime.datetime(2014, 1, 1, tzinfo=pytz.utc) modulestore().update_item(self.course, ModuleStoreEnum.UserID.test) response = self.client.get(outline_url, {}, HTTP_ACCEPT='text/html') self.assertEqual(_get_release_date(response), get_default_time_display(self.course.start)) _assert_settings_link_present(response) def _create_test_data(self, course_module, create_blocks=False, publish=True, block_types=None): """ Create data for test. """ if create_blocks: for block_type in block_types: ItemFactory.create( parent_location=self.vertical.location, category=block_type, display_name='{} Problem'.format(block_type) ) if not publish: self.store.unpublish(self.vertical.location, self.user.id) course_module.advanced_modules.extend(block_types) def _verify_deprecated_info(self, course_id, advanced_modules, info, deprecated_block_types): """ Verify deprecated info. """ expected_blocks = [] for block_type in deprecated_block_types: expected_blocks.append( [ reverse_usage_url('container_handler', self.vertical.location), '{} Problem'.format(block_type) ] ) self.assertEqual(info['block_types'], deprecated_block_types) self.assertEqual( info['block_types_enabled'], any(component in advanced_modules for component in deprecated_block_types) ) self.assertItemsEqual(info['blocks'], expected_blocks) self.assertEqual( info['advance_settings_url'], reverse_course_url('advanced_settings_handler', course_id) ) @ddt.data( {'publish': True}, {'publish': False}, ) @ddt.unpack def test_verify_deprecated_warning_message_with_single_feature(self, publish): """ Verify deprecated warning info for single deprecated feature. """ block_types = settings.DEPRECATED_BLOCK_TYPES course_module = modulestore().get_item(self.course.location) self._create_test_data(course_module, create_blocks=True, block_types=block_types, publish=publish) info = _deprecated_blocks_info(course_module, block_types) self._verify_deprecated_info( course_module.id, course_module.advanced_modules, info, block_types ) def test_verify_deprecated_warning_message_with_multiple_features(self): """ Verify deprecated warning info for multiple deprecated features. """ block_types = ['peergrading', 'combinedopenended', 'openassessment'] course_module = modulestore().get_item(self.course.location) self._create_test_data(course_module, create_blocks=True, block_types=block_types) info = _deprecated_blocks_info(course_module, block_types) self._verify_deprecated_info(course_module.id, course_module.advanced_modules, info, block_types) @ddt.data( {'delete_vertical': True}, {'delete_vertical': False}, ) @ddt.unpack def test_deprecated_blocks_list_updated_correctly(self, delete_vertical): """ Verify that deprecated blocks list shown on banner is updated correctly. Here is the scenario: This list of deprecated blocks shown on banner contains published and un-published blocks. That list should be updated when we delete un-published block(s). This behavior should be same if we delete unpublished vertical or problem. """ block_types = ['peergrading'] course_module = modulestore().get_item(self.course.location) vertical1 = ItemFactory.create( parent_location=self.sequential.location, category='vertical', display_name='Vert1 Subsection1' ) problem1 = ItemFactory.create( parent_location=vertical1.location, category='peergrading', display_name='peergrading problem in vert1', publish_item=False ) info = _deprecated_blocks_info(course_module, block_types) # info['blocks'] should be empty here because there is nothing # published or un-published present self.assertEqual(info['blocks'], []) vertical2 = ItemFactory.create( parent_location=self.sequential.location, category='vertical', display_name='Vert2 Subsection1' ) ItemFactory.create( parent_location=vertical2.location, category='peergrading', display_name='peergrading problem in vert2', pubish_item=True ) # At this point CourseStructure will contain both the above # published and un-published verticals info = _deprecated_blocks_info(course_module, block_types) self.assertItemsEqual( info['blocks'], [ [reverse_usage_url('container_handler', vertical1.location), 'peergrading problem in vert1'], [reverse_usage_url('container_handler', vertical2.location), 'peergrading problem in vert2'] ] ) # Delete the un-published vertical or problem so that CourseStructure updates its data if delete_vertical: self.store.delete_item(vertical1.location, self.user.id) else: self.store.delete_item(problem1.location, self.user.id) info = _deprecated_blocks_info(course_module, block_types) # info['blocks'] should only contain the info about vertical2 which is published. # There shouldn't be any info present about un-published vertical1 self.assertEqual( info['blocks'], [[reverse_usage_url('container_handler', vertical2.location), 'peergrading problem in vert2']] ) class TestCourseReIndex(CourseTestCase): """ Unit tests for the course outline. """ SUCCESSFUL_RESPONSE = _("Course has been successfully reindexed.") def setUp(self): """ Set up the for the course outline tests. """ super(TestCourseReIndex, self).setUp() self.course.start = datetime.datetime(2014, 1, 1, tzinfo=pytz.utc) modulestore().update_item(self.course, self.user.id) self.chapter = ItemFactory.create( parent_location=self.course.location, category='chapter', display_name="Week 1" ) self.sequential = ItemFactory.create( parent_location=self.chapter.location, category='sequential', display_name="Lesson 1" ) self.vertical = ItemFactory.create( parent_location=self.sequential.location, category='vertical', display_name='Subsection 1' ) self.video = ItemFactory.create( parent_location=self.vertical.location, category="video", display_name="My Video" ) self.html = ItemFactory.create( parent_location=self.vertical.location, category="html", display_name="My HTML", data="<div>This is my unique HTML content</div>", ) def test_reindex_course(self): """ Verify that course gets reindexed. """ index_url = reverse_course_url('course_search_index_handler', self.course.id) response = self.client.get(index_url, {}, HTTP_ACCEPT='application/json') # A course with the default release date should display as "Unscheduled" self.assertIn(self.SUCCESSFUL_RESPONSE, response.content) self.assertEqual(response.status_code, 200) response = self.client.post(index_url, {}, HTTP_ACCEPT='application/json') self.assertEqual(response.content, '') self.assertEqual(response.status_code, 405) self.client.logout() response = self.client.get(index_url, {}, HTTP_ACCEPT='application/json') self.assertEqual(response.status_code, 302) def test_negative_conditions(self): """ Test the error conditions for the access """ index_url = reverse_course_url('course_search_index_handler', self.course.id) # register a non-staff member and try to delete the course branch non_staff_client, _ = self.create_non_staff_authed_user_client() response = non_staff_client.get(index_url, {}, HTTP_ACCEPT='application/json') self.assertEqual(response.status_code, 403) def test_empty_content_type(self): """ Test json content type is set if '' is selected """ index_url = reverse_course_url('course_search_index_handler', self.course.id) response = self.client.get(index_url, {}, CONTENT_TYPE='') # A course with the default release date should display as "Unscheduled" self.assertIn(self.SUCCESSFUL_RESPONSE, response.content) self.assertEqual(response.status_code, 200) @mock.patch('xmodule.html_module.HtmlDescriptor.index_dictionary') def test_reindex_course_search_index_error(self, mock_index_dictionary): """ Test json response with mocked error data for html """ # set mocked exception response err = SearchIndexingError mock_index_dictionary.return_value = err index_url = reverse_course_url('course_search_index_handler', self.course.id) # Start manual reindex and check error in response response = self.client.get(index_url, {}, HTTP_ACCEPT='application/json') self.assertEqual(response.status_code, 500) def test_reindex_json_responses(self): """ Test json response with real data """ # results are indexed because they are published from ItemFactory response = perform_search( "unique", user=self.user, size=10, from_=0, course_id=unicode(self.course.id)) self.assertEqual(response['total'], 1) # Start manual reindex reindex_course_and_check_access(self.course.id, self.user) # Check results remain the same response = perform_search( "unique", user=self.user, size=10, from_=0, course_id=unicode(self.course.id)) self.assertEqual(response['total'], 1) @mock.patch('xmodule.video_module.VideoDescriptor.index_dictionary') def test_reindex_video_error_json_responses(self, mock_index_dictionary): """ Test json response with mocked error data for video """ # results are indexed because they are published from ItemFactory response = perform_search( "unique", user=self.user, size=10, from_=0, course_id=unicode(self.course.id)) self.assertEqual(response['total'], 1) # set mocked exception response err = SearchIndexingError mock_index_dictionary.return_value = err # Start manual reindex and check error in response with self.assertRaises(SearchIndexingError): reindex_course_and_check_access(self.course.id, self.user) @mock.patch('xmodule.html_module.HtmlDescriptor.index_dictionary') def test_reindex_html_error_json_responses(self, mock_index_dictionary): """ Test json response with mocked error data for html """ # results are indexed because they are published from ItemFactory response = perform_search( "unique", user=self.user, size=10, from_=0, course_id=unicode(self.course.id)) self.assertEqual(response['total'], 1) # set mocked exception response err = SearchIndexingError mock_index_dictionary.return_value = err # Start manual reindex and check error in response with self.assertRaises(SearchIndexingError): reindex_course_and_check_access(self.course.id, self.user) @mock.patch('xmodule.seq_module.SequenceDescriptor.index_dictionary') def test_reindex_seq_error_json_responses(self, mock_index_dictionary): """ Test json response with mocked error data for sequence """ # results are indexed because they are published from ItemFactory response = perform_search( "unique", user=self.user, size=10, from_=0, course_id=unicode(self.course.id)) self.assertEqual(response['total'], 1) # set mocked exception response err = Exception mock_index_dictionary.return_value = err # Start manual reindex and check error in response with self.assertRaises(SearchIndexingError): reindex_course_and_check_access(self.course.id, self.user) @mock.patch('xmodule.modulestore.mongo.base.MongoModuleStore.get_course') def test_reindex_no_item(self, mock_get_course): """ Test system logs an error if no item found. """ # set mocked exception response err = ItemNotFoundError mock_get_course.return_value = err # Start manual reindex and check error in response with self.assertRaises(SearchIndexingError): reindex_course_and_check_access(self.course.id, self.user) def test_reindex_no_permissions(self): # register a non-staff member and try to delete the course branch user2 = UserFactory() with self.assertRaises(PermissionDenied): reindex_course_and_check_access(self.course.id, user2) def test_indexing_responses(self): """ Test do_course_reindex response with real data """ # results are indexed because they are published from ItemFactory response = perform_search( "unique", user=self.user, size=10, from_=0, course_id=unicode(self.course.id)) self.assertEqual(response['total'], 1) # Start manual reindex CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id) # Check results are the same following reindex response = perform_search( "unique", user=self.user, size=10, from_=0, course_id=unicode(self.course.id)) self.assertEqual(response['total'], 1) @mock.patch('xmodule.video_module.VideoDescriptor.index_dictionary') def test_indexing_video_error_responses(self, mock_index_dictionary): """ Test do_course_reindex response with mocked error data for video """ # results are indexed because they are published from ItemFactory response = perform_search( "unique", user=self.user, size=10, from_=0, course_id=unicode(self.course.id)) self.assertEqual(response['total'], 1) # set mocked exception response err = Exception mock_index_dictionary.return_value = err # Start manual reindex and check error in response with self.assertRaises(SearchIndexingError): CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id) @mock.patch('xmodule.html_module.HtmlDescriptor.index_dictionary') def test_indexing_html_error_responses(self, mock_index_dictionary): """ Test do_course_reindex response with mocked error data for html """ # results are indexed because they are published from ItemFactory response = perform_search( "unique", user=self.user, size=10, from_=0, course_id=unicode(self.course.id)) self.assertEqual(response['total'], 1) # set mocked exception response err = Exception mock_index_dictionary.return_value = err # Start manual reindex and check error in response with self.assertRaises(SearchIndexingError): CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id) @mock.patch('xmodule.seq_module.SequenceDescriptor.index_dictionary') def test_indexing_seq_error_responses(self, mock_index_dictionary): """ Test do_course_reindex response with mocked error data for sequence """ # results are indexed because they are published from ItemFactory response = perform_search( "unique", user=self.user, size=10, from_=0, course_id=unicode(self.course.id)) self.assertEqual(response['total'], 1) # set mocked exception response err = Exception mock_index_dictionary.return_value = err # Start manual reindex and check error in response with self.assertRaises(SearchIndexingError): CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id) @mock.patch('xmodule.modulestore.mongo.base.MongoModuleStore.get_course') def test_indexing_no_item(self, mock_get_course): """ Test system logs an error if no item found. """ # set mocked exception response err = ItemNotFoundError mock_get_course.return_value = err # Start manual reindex and check error in response with self.assertRaises(SearchIndexingError): CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id)
agpl-3.0
-2,721,059,396,229,328,000
41.070423
119
0.644989
false
ujenmr/ansible
lib/ansible/modules/database/misc/kibana_plugin.py
52
7252
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2016, Thierno IB. BARRY @barryib # Sponsored by Polyconseil http://polyconseil.fr. # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: kibana_plugin short_description: Manage Kibana plugins description: - This module can be used to manage Kibana plugins. version_added: "2.2" author: Thierno IB. BARRY (@barryib) options: name: description: - Name of the plugin to install. required: True state: description: - Desired state of a plugin. choices: ["present", "absent"] default: present url: description: - Set exact URL to download the plugin from. - For local file, prefix its absolute path with file:// timeout: description: - "Timeout setting: 30s, 1m, 1h etc." default: 1m plugin_bin: description: - Location of the Kibana binary. default: /opt/kibana/bin/kibana plugin_dir: description: - Your configured plugin directory specified in Kibana. default: /opt/kibana/installedPlugins/ version: description: - Version of the plugin to be installed. - If plugin exists with previous version, plugin will NOT be updated unless C(force) is set to yes. force: description: - Delete and re-install the plugin. Can be useful for plugins update. type: bool default: 'no' ''' EXAMPLES = ''' - name: Install Elasticsearch head plugin kibana_plugin: state: present name: elasticsearch/marvel - name: Install specific version of a plugin kibana_plugin: state: present name: elasticsearch/marvel version: '2.3.3' - name: Uninstall Elasticsearch head plugin kibana_plugin: state: absent name: elasticsearch/marvel ''' RETURN = ''' cmd: description: the launched command during plugin management (install / remove) returned: success type: str name: description: the plugin name to install or remove returned: success type: str url: description: the url from where the plugin is installed from returned: success type: str timeout: description: the timeout for plugin download returned: success type: str stdout: description: the command stdout returned: success type: str stderr: description: the command stderr returned: success type: str state: description: the state for the managed plugin returned: success type: str ''' import os from distutils.version import LooseVersion from ansible.module_utils.basic import AnsibleModule PACKAGE_STATE_MAP = dict( present="--install", absent="--remove" ) def parse_plugin_repo(string): elements = string.split("/") # We first consider the simplest form: pluginname repo = elements[0] # We consider the form: username/pluginname if len(elements) > 1: repo = elements[1] # remove elasticsearch- prefix # remove es- prefix for string in ("elasticsearch-", "es-"): if repo.startswith(string): return repo[len(string):] return repo def is_plugin_present(plugin_dir, working_dir): return os.path.isdir(os.path.join(working_dir, plugin_dir)) def parse_error(string): reason = "reason: " try: return string[string.index(reason) + len(reason):].strip() except ValueError: return string def install_plugin(module, plugin_bin, plugin_name, url, timeout, kibana_version='4.6'): if LooseVersion(kibana_version) > LooseVersion('4.6'): kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin') cmd_args = [kibana_plugin_bin, "install"] if url: cmd_args.append(url) else: cmd_args.append(plugin_name) else: cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name] if url: cmd_args.append("--url %s" % url) if timeout: cmd_args.append("--timeout %s" % timeout) cmd = " ".join(cmd_args) if module.check_mode: return True, cmd, "check mode", "" rc, out, err = module.run_command(cmd) if rc != 0: reason = parse_error(out) module.fail_json(msg=reason) return True, cmd, out, err def remove_plugin(module, plugin_bin, plugin_name, kibana_version='4.6'): if LooseVersion(kibana_version) > LooseVersion('4.6'): kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin') cmd_args = [kibana_plugin_bin, "remove", plugin_name] else: cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name] cmd = " ".join(cmd_args) if module.check_mode: return True, cmd, "check mode", "" rc, out, err = module.run_command(cmd) if rc != 0: reason = parse_error(out) module.fail_json(msg=reason) return True, cmd, out, err def get_kibana_version(module, plugin_bin): cmd_args = [plugin_bin, '--version'] cmd = " ".join(cmd_args) rc, out, err = module.run_command(cmd) if rc != 0: module.fail_json(msg="Failed to get Kibana version : %s" % err) return out.strip() def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()), url=dict(default=None), timeout=dict(default="1m"), plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"), plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"), version=dict(default=None), force=dict(default="no", type="bool") ), supports_check_mode=True, ) name = module.params["name"] state = module.params["state"] url = module.params["url"] timeout = module.params["timeout"] plugin_bin = module.params["plugin_bin"] plugin_dir = module.params["plugin_dir"] version = module.params["version"] force = module.params["force"] changed, cmd, out, err = False, '', '', '' kibana_version = get_kibana_version(module, plugin_bin) present = is_plugin_present(parse_plugin_repo(name), plugin_dir) # skip if the state is correct if (present and state == "present" and not force) or (state == "absent" and not present and not force): module.exit_json(changed=False, name=name, state=state) if version: name = name + '/' + version if state == "present": if force: remove_plugin(module, plugin_bin, name) changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout, kibana_version) elif state == "absent": changed, cmd, out, err = remove_plugin(module, plugin_bin, name, kibana_version) module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err) if __name__ == '__main__': main()
gpl-3.0
3,521,916,468,770,162,700
26.366038
120
0.634308
false
nhejazi/scikit-learn
sklearn/neural_network/_base.py
50
6856
"""Utilities for the neural network modules """ # Author: Issam H. Laradji <[email protected]> # License: BSD 3 clause import numpy as np from scipy.special import expit as logistic_sigmoid def identity(X): """Simply return the input array. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Data, where n_samples is the number of samples and n_features is the number of features. Returns ------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Same as the input data. """ return X def logistic(X): """Compute the logistic function inplace. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The input data. Returns ------- X_new : {array-like, sparse matrix}, shape (n_samples, n_features) The transformed data. """ return logistic_sigmoid(X, out=X) def tanh(X): """Compute the hyperbolic tan function inplace. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The input data. Returns ------- X_new : {array-like, sparse matrix}, shape (n_samples, n_features) The transformed data. """ return np.tanh(X, out=X) def relu(X): """Compute the rectified linear unit function inplace. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The input data. Returns ------- X_new : {array-like, sparse matrix}, shape (n_samples, n_features) The transformed data. """ np.clip(X, 0, np.finfo(X.dtype).max, out=X) return X def softmax(X): """Compute the K-way softmax function inplace. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The input data. Returns ------- X_new : {array-like, sparse matrix}, shape (n_samples, n_features) The transformed data. """ tmp = X - X.max(axis=1)[:, np.newaxis] np.exp(tmp, out=X) X /= X.sum(axis=1)[:, np.newaxis] return X ACTIVATIONS = {'identity': identity, 'tanh': tanh, 'logistic': logistic, 'relu': relu, 'softmax': softmax} def inplace_identity_derivative(Z, delta): """Apply the derivative of the identity function: do nothing. Parameters ---------- Z : {array-like, sparse matrix}, shape (n_samples, n_features) The data which was output from the identity activation function during the forward pass. delta : {array-like}, shape (n_samples, n_features) The backpropagated error signal to be modified inplace. """ # Nothing to do def inplace_logistic_derivative(Z, delta): """Apply the derivative of the logistic sigmoid function. It exploits the fact that the derivative is a simple function of the output value from logistic function. Parameters ---------- Z : {array-like, sparse matrix}, shape (n_samples, n_features) The data which was output from the logistic activation function during the forward pass. delta : {array-like}, shape (n_samples, n_features) The backpropagated error signal to be modified inplace. """ delta *= Z delta *= (1 - Z) def inplace_tanh_derivative(Z, delta): """Apply the derivative of the hyperbolic tanh function. It exploits the fact that the derivative is a simple function of the output value from hyperbolic tangent. Parameters ---------- Z : {array-like, sparse matrix}, shape (n_samples, n_features) The data which was output from the hyperbolic tangent activation function during the forward pass. delta : {array-like}, shape (n_samples, n_features) The backpropagated error signal to be modified inplace. """ delta *= (1 - Z ** 2) def inplace_relu_derivative(Z, delta): """Apply the derivative of the relu function. It exploits the fact that the derivative is a simple function of the output value from rectified linear units activation function. Parameters ---------- Z : {array-like, sparse matrix}, shape (n_samples, n_features) The data which was output from the rectified linear units activation function during the forward pass. delta : {array-like}, shape (n_samples, n_features) The backpropagated error signal to be modified inplace. """ delta[Z == 0] = 0 DERIVATIVES = {'identity': inplace_identity_derivative, 'tanh': inplace_tanh_derivative, 'logistic': inplace_logistic_derivative, 'relu': inplace_relu_derivative} def squared_loss(y_true, y_pred): """Compute the squared loss for regression. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) values. y_pred : array-like or label indicator matrix Predicted values, as returned by a regression estimator. Returns ------- loss : float The degree to which the samples are correctly predicted. """ return ((y_true - y_pred) ** 2).mean() / 2 def log_loss(y_true, y_prob): """Compute Logistic loss for classification. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels. y_prob : array-like of float, shape = (n_samples, n_classes) Predicted probabilities, as returned by a classifier's predict_proba method. Returns ------- loss : float The degree to which the samples are correctly predicted. """ y_prob = np.clip(y_prob, 1e-10, 1 - 1e-10) if y_prob.shape[1] == 1: y_prob = np.append(1 - y_prob, y_prob, axis=1) if y_true.shape[1] == 1: y_true = np.append(1 - y_true, y_true, axis=1) return -np.sum(y_true * np.log(y_prob)) / y_prob.shape[0] def binary_log_loss(y_true, y_prob): """Compute binary logistic loss for classification. This is identical to log_loss in binary classification case, but is kept for its use in multilabel case. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels. y_prob : array-like of float, shape = (n_samples, n_classes) Predicted probabilities, as returned by a classifier's predict_proba method. Returns ------- loss : float The degree to which the samples are correctly predicted. """ y_prob = np.clip(y_prob, 1e-10, 1 - 1e-10) return -np.sum(y_true * np.log(y_prob) + (1 - y_true) * np.log(1 - y_prob)) / y_prob.shape[0] LOSS_FUNCTIONS = {'squared_loss': squared_loss, 'log_loss': log_loss, 'binary_log_loss': binary_log_loss}
bsd-3-clause
-8,872,579,794,825,533,000
26.206349
79
0.618728
false
isyippee/oslo.messaging
oslo_messaging/_drivers/protocols/amqp/drivertasks.py
7
4058
# Copyright 2014, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import threading import time from oslo_messaging._drivers.protocols.amqp import controller from oslo_messaging import exceptions from six import moves LOG = logging.getLogger(__name__) class SendTask(controller.Task): """A task that sends a message to a target, and optionally waits for a reply message. The caller may block until the remote confirms receipt or the reply message has arrived. """ def __init__(self, target, request, wait_for_reply, deadline): super(SendTask, self).__init__() self._target = target self._request = request self._deadline = deadline self._wait_for_reply = wait_for_reply self._results_queue = moves.queue.Queue() def wait(self, timeout): """Wait for the send to complete, and, optionally, a reply message from the remote. Will raise MessagingTimeout if the send does not complete or no reply is received within timeout seconds. If the request has failed for any other reason, a MessagingException is raised." """ try: result = self._results_queue.get(timeout=timeout) except moves.queue.Empty: if self._wait_for_reply: reason = "Timed out waiting for a reply." else: reason = "Timed out waiting for send to complete." raise exceptions.MessagingTimeout(reason) if result["status"] == "OK": return result.get("response", None) raise result["error"] def execute(self, controller): """Runs on eventloop thread - sends request.""" if not self._deadline or self._deadline > time.time(): controller.request(self._target, self._request, self._results_queue, self._wait_for_reply) else: LOG.warn("Send request to %s aborted: TTL expired.", self._target) class ListenTask(controller.Task): """A task that creates a subscription to the given target. Messages arriving from the target are given to the listener. """ def __init__(self, target, listener, notifications=False): """Create a subscription to the target.""" super(ListenTask, self).__init__() self._target = target self._listener = listener self._notifications = notifications def execute(self, controller): """Run on the eventloop thread - subscribes to target. Inbound messages are queued to the listener's incoming queue. """ if self._notifications: controller.subscribe_notifications(self._target, self._listener.incoming) else: controller.subscribe(self._target, self._listener.incoming) class ReplyTask(controller.Task): """A task that sends 'response' message to 'address'. """ def __init__(self, address, response, log_failure): super(ReplyTask, self).__init__() self._address = address self._response = response self._log_failure = log_failure self._wakeup = threading.Event() def wait(self): """Wait for the controller to send the message. """ self._wakeup.wait() def execute(self, controller): """Run on the eventloop thread - send the response message.""" controller.response(self._address, self._response) self._wakeup.set()
apache-2.0
-7,825,941,226,630,720,000
36.925234
79
0.635042
false
amenonsen/ansible
lib/ansible/modules/storage/netapp/netapp_e_facts.py
4
27761
#!/usr/bin/python # (c) 2016, NetApp, Inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' module: netapp_e_facts short_description: NetApp E-Series retrieve facts about NetApp E-Series storage arrays description: - The netapp_e_facts module returns a collection of facts regarding NetApp E-Series storage arrays. - When contacting a storage array directly the collection includes details about the array, controllers, management interfaces, hostside interfaces, driveside interfaces, disks, storage pools, volumes, snapshots, and features. - When contacting a web services proxy the collection will include basic information regarding the storage systems that are under its management. version_added: '2.2' author: - Kevin Hulquest (@hulquest) - Nathan Swartz (@ndswartz) extends_documentation_fragment: - netapp.eseries ''' EXAMPLES = """ --- - name: Get array facts netapp_e_facts: ssid: "{{ netapp_array_id }}" api_url: "https://{{ netapp_e_api_host }}:8443/devmgr/v2" api_username: "{{ netapp_api_username }}" api_password: "{{ netapp_api_password }}" validate_certs: "{{ netapp_api_validate_certs }}" - name: Get array facts netapp_e_facts: ssid: 1 api_url: https://192.168.1.100:8443/devmgr/v2 api_username: myApiUser api_password: myApiPass validate_certs: true """ RETURN = """ msg: description: Success message returned: on success type: str sample: - Gathered facts for storage array. Array ID [1]. - Gathered facts for web services proxy. storage_array_facts: description: provides details about the array, controllers, management interfaces, hostside interfaces, driveside interfaces, disks, storage pools, volumes, snapshots, and features. returned: on successful inquiry from from embedded web services rest api type: complex contains: netapp_controllers: description: storage array controller list that contains basic controller identification and status type: complex sample: - [{"name": "A", "serial": "021632007299", "status": "optimal"}, {"name": "B", "serial": "021632007300", "status": "failed"}] netapp_disks: description: drive list that contains identification, type, and status information for each drive type: complex sample: - [{"available": false, "firmware_version": "MS02", "id": "01000000500003960C8B67880000000000000000", "media_type": "ssd", "product_id": "PX02SMU080 ", "serial_number": "15R0A08LT2BA", "status": "optimal", "tray_ref": "0E00000000000000000000000000000000000000", "usable_bytes": "799629205504" }] netapp_driveside_interfaces: description: drive side interface list that contains identification, type, and speed for each interface type: complex sample: - [{ "controller": "A", "interface_speed": "12g", "interface_type": "sas" }] - [{ "controller": "B", "interface_speed": "10g", "interface_type": "iscsi" }] netapp_enabled_features: description: specifies the enabled features on the storage array. returned: on success type: complex sample: - [ "flashReadCache", "performanceTier", "protectionInformation", "secureVolume" ] netapp_host_groups: description: specifies the host groups on the storage arrays. returned: on success type: complex sample: - [{ "id": "85000000600A098000A4B28D003610705C40B964", "name": "group1" }] netapp_hosts: description: specifies the hosts on the storage arrays. returned: on success type: complex sample: - [{ "id": "8203800000000000000000000000000000000000", "name": "host1", "group_id": "85000000600A098000A4B28D003610705C40B964", "host_type_index": 28, "ports": [{ "type": "fc", "address": "1000FF7CFFFFFF01", "label": "FC_1" }, { "type": "fc", "address": "1000FF7CFFFFFF00", "label": "FC_2" }]}] netapp_host_types: description: lists the available host types on the storage array. returned: on success type: complex sample: - [{ "index": 0, "type": "FactoryDefault" }, { "index": 1, "type": "W2KNETNCL"}, { "index": 2, "type": "SOL" }, { "index": 5, "type": "AVT_4M" }, { "index": 6, "type": "LNX" }, { "index": 7, "type": "LnxALUA" }, { "index": 8, "type": "W2KNETCL" }, { "index": 9, "type": "AIX MPIO" }, { "index": 10, "type": "VmwTPGSALUA" }, { "index": 15, "type": "HPXTPGS" }, { "index": 17, "type": "SolTPGSALUA" }, { "index": 18, "type": "SVC" }, { "index": 22, "type": "MacTPGSALUA" }, { "index": 23, "type": "WinTPGSALUA" }, { "index": 24, "type": "LnxTPGSALUA" }, { "index": 25, "type": "LnxTPGSALUA_PM" }, { "index": 26, "type": "ONTAP_ALUA" }, { "index": 27, "type": "LnxTPGSALUA_SF" }, { "index": 28, "type": "LnxDHALUA" }, { "index": 29, "type": "ATTOClusterAllOS" }] netapp_hostside_interfaces: description: host side interface list that contains identification, configuration, type, speed, and status information for each interface type: complex sample: - [{"iscsi": [{ "controller": "A", "current_interface_speed": "10g", "ipv4_address": "10.10.10.1", "ipv4_enabled": true, "ipv4_gateway": "10.10.10.1", "ipv4_subnet_mask": "255.255.255.0", "ipv6_enabled": false, "iqn": "iqn.1996-03.com.netapp:2806.600a098000a81b6d0000000059d60c76", "link_status": "up", "mtu": 9000, "supported_interface_speeds": [ "10g" ] }]}] netapp_management_interfaces: description: management interface list that contains identification, configuration, and status for each interface type: complex sample: - [{"alias": "ict-2800-A", "channel": 1, "controller": "A", "dns_config_method": "dhcp", "dns_servers": [], "ipv4_address": "10.1.1.1", "ipv4_address_config_method": "static", "ipv4_enabled": true, "ipv4_gateway": "10.113.1.1", "ipv4_subnet_mask": "255.255.255.0", "ipv6_enabled": false, "link_status": "up", "mac_address": "00A098A81B5D", "name": "wan0", "ntp_config_method": "disabled", "ntp_servers": [], "remote_ssh_access": false }] netapp_storage_array: description: provides storage array identification, firmware version, and available capabilities type: dict sample: - {"chassis_serial": "021540006043", "firmware": "08.40.00.01", "name": "ict-2800-11_40", "wwn": "600A098000A81B5D0000000059D60C76", "cacheBlockSizes": [4096, 8192, 16384, 32768], "supportedSegSizes": [8192, 16384, 32768, 65536, 131072, 262144, 524288]} netapp_storage_pools: description: storage pool list that contains identification and capacity information for each pool type: complex sample: - [{"available_capacity": "3490353782784", "id": "04000000600A098000A81B5D000002B45A953A61", "name": "Raid6", "total_capacity": "5399466745856", "used_capacity": "1909112963072" }] netapp_volumes: description: storage volume list that contains identification and capacity information for each volume type: complex sample: - [{"capacity": "5368709120", "id": "02000000600A098000AAC0C3000002C45A952BAA", "is_thin_provisioned": false, "name": "5G", "parent_storage_pool_id": "04000000600A098000A81B5D000002B45A953A61" }] netapp_workload_tags: description: workload tag list type: complex sample: - [{"id": "87e19568-43fb-4d8d-99ea-2811daaa2b38", "name": "ftp_server", "workloadAttributes": [{"key": "use", "value": "general"}]}] netapp_volumes_by_initiators: description: list of available volumes keyed by the mapped initiators. type: complex sample: - {"192_168_1_1": [{"id": "02000000600A098000A4B9D1000015FD5C8F7F9E", "meta_data": {"filetype": "xfs", "public": true}, "name": "some_volume", "workload_name": "test2_volumes", "wwn": "600A098000A4B9D1000015FD5C8F7F9E"}]} snapshot_images: description: snapshot image list that contains identification, capacity, and status information for each snapshot image type: complex sample: - [{"active_cow": true, "creation_method": "user", "id": "34000000600A098000A81B5D00630A965B0535AC", "pit_capacity": "5368709120", "reposity_cap_utilization": "0", "rollback_source": false, "status": "optimal" }] """ from re import match from pprint import pformat from ansible.module_utils.netapp import NetAppESeriesModule class Facts(NetAppESeriesModule): def __init__(self): web_services_version = "02.00.0000.0000" super(Facts, self).__init__(ansible_options={}, web_services_version=web_services_version, supports_check_mode=True) def get_controllers(self): """Retrieve a mapping of controller references to their labels.""" controllers = list() try: rc, controllers = self.request('storage-systems/%s/graph/xpath-filter?query=/controller/id' % self.ssid) except Exception as err: self.module.fail_json( msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]." % (self.ssid, str(err))) controllers.sort() controllers_dict = {} i = ord('A') for controller in controllers: label = chr(i) controllers_dict[controller] = label i += 1 return controllers_dict def get_array_facts(self): """Extract particular facts from the storage array graph""" facts = dict(facts_from_proxy=False, ssid=self.ssid) controller_reference_label = self.get_controllers() array_facts = None # Get the storage array graph try: rc, array_facts = self.request("storage-systems/%s/graph" % self.ssid) except Exception as error: self.module.fail_json(msg="Failed to obtain facts from storage array with id [%s]. Error [%s]" % (self.ssid, str(error))) facts['netapp_storage_array'] = dict( name=array_facts['sa']['saData']['storageArrayLabel'], chassis_serial=array_facts['sa']['saData']['chassisSerialNumber'], firmware=array_facts['sa']['saData']['fwVersion'], wwn=array_facts['sa']['saData']['saId']['worldWideName'], segment_sizes=array_facts['sa']['featureParameters']['supportedSegSizes'], cache_block_sizes=array_facts['sa']['featureParameters']['cacheBlockSizes']) facts['netapp_controllers'] = [ dict( name=controller_reference_label[controller['controllerRef']], serial=controller['serialNumber'].strip(), status=controller['status'], ) for controller in array_facts['controller']] facts['netapp_host_groups'] = [ dict( id=group['id'], name=group['name'] ) for group in array_facts['storagePoolBundle']['cluster']] facts['netapp_hosts'] = [ dict( group_id=host['clusterRef'], hosts_reference=host['hostRef'], id=host['id'], name=host['name'], host_type_index=host['hostTypeIndex'], posts=host['hostSidePorts'] ) for host in array_facts['storagePoolBundle']['host']] facts['netapp_host_types'] = [ dict( type=host_type['hostType'], index=host_type['index'] ) for host_type in array_facts['sa']['hostSpecificVals'] if 'hostType' in host_type.keys() and host_type['hostType'] # This conditional ignores zero-length strings which indicates that the associated host-specific NVSRAM region has been cleared. ] facts['snapshot_images'] = [ dict( id=snapshot['id'], status=snapshot['status'], pit_capacity=snapshot['pitCapacity'], creation_method=snapshot['creationMethod'], reposity_cap_utilization=snapshot['repositoryCapacityUtilization'], active_cow=snapshot['activeCOW'], rollback_source=snapshot['isRollbackSource'] ) for snapshot in array_facts['highLevelVolBundle']['pit']] facts['netapp_disks'] = [ dict( id=disk['id'], available=disk['available'], media_type=disk['driveMediaType'], status=disk['status'], usable_bytes=disk['usableCapacity'], tray_ref=disk['physicalLocation']['trayRef'], product_id=disk['productID'], firmware_version=disk['firmwareVersion'], serial_number=disk['serialNumber'].lstrip() ) for disk in array_facts['drive']] facts['netapp_management_interfaces'] = [ dict(controller=controller_reference_label[controller['controllerRef']], name=iface['ethernet']['interfaceName'], alias=iface['ethernet']['alias'], channel=iface['ethernet']['channel'], mac_address=iface['ethernet']['macAddr'], remote_ssh_access=iface['ethernet']['rloginEnabled'], link_status=iface['ethernet']['linkStatus'], ipv4_enabled=iface['ethernet']['ipv4Enabled'], ipv4_address_config_method=iface['ethernet']['ipv4AddressConfigMethod'].lower().replace("config", ""), ipv4_address=iface['ethernet']['ipv4Address'], ipv4_subnet_mask=iface['ethernet']['ipv4SubnetMask'], ipv4_gateway=iface['ethernet']['ipv4GatewayAddress'], ipv6_enabled=iface['ethernet']['ipv6Enabled'], dns_config_method=iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsAcquisitionType'], dns_servers=(iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers'] if iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers'] else []), ntp_config_method=iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpAcquisitionType'], ntp_servers=(iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers'] if iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers'] else []) ) for controller in array_facts['controller'] for iface in controller['netInterfaces']] facts['netapp_hostside_interfaces'] = [ dict( fc=[dict(controller=controller_reference_label[controller['controllerRef']], channel=iface['fibre']['channel'], link_status=iface['fibre']['linkStatus'], current_interface_speed=strip_interface_speed(iface['fibre']['currentInterfaceSpeed']), maximum_interface_speed=strip_interface_speed(iface['fibre']['maximumInterfaceSpeed'])) for controller in array_facts['controller'] for iface in controller['hostInterfaces'] if iface['interfaceType'] == 'fc'], ib=[dict(controller=controller_reference_label[controller['controllerRef']], channel=iface['ib']['channel'], link_status=iface['ib']['linkState'], mtu=iface['ib']['maximumTransmissionUnit'], current_interface_speed=strip_interface_speed(iface['ib']['currentSpeed']), maximum_interface_speed=strip_interface_speed(iface['ib']['supportedSpeed'])) for controller in array_facts['controller'] for iface in controller['hostInterfaces'] if iface['interfaceType'] == 'ib'], iscsi=[dict(controller=controller_reference_label[controller['controllerRef']], iqn=iface['iscsi']['iqn'], link_status=iface['iscsi']['interfaceData']['ethernetData']['linkStatus'], ipv4_enabled=iface['iscsi']['ipv4Enabled'], ipv4_address=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4Address'], ipv4_subnet_mask=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4SubnetMask'], ipv4_gateway=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4GatewayAddress'], ipv6_enabled=iface['iscsi']['ipv6Enabled'], mtu=iface['iscsi']['interfaceData']['ethernetData']['maximumFramePayloadSize'], current_interface_speed=strip_interface_speed(iface['iscsi']['interfaceData'] ['ethernetData']['currentInterfaceSpeed']), supported_interface_speeds=strip_interface_speed(iface['iscsi']['interfaceData'] ['ethernetData'] ['supportedInterfaceSpeeds'])) for controller in array_facts['controller'] for iface in controller['hostInterfaces'] if iface['interfaceType'] == 'iscsi'], sas=[dict(controller=controller_reference_label[controller['controllerRef']], channel=iface['sas']['channel'], current_interface_speed=strip_interface_speed(iface['sas']['currentInterfaceSpeed']), maximum_interface_speed=strip_interface_speed(iface['sas']['maximumInterfaceSpeed']), link_status=iface['sas']['iocPort']['state']) for controller in array_facts['controller'] for iface in controller['hostInterfaces'] if iface['interfaceType'] == 'sas'])] facts['netapp_driveside_interfaces'] = [ dict( controller=controller_reference_label[controller['controllerRef']], interface_type=interface['interfaceType'], interface_speed=strip_interface_speed( interface[interface['interfaceType']]['maximumInterfaceSpeed'] if (interface['interfaceType'] == 'sata' or interface['interfaceType'] == 'sas' or interface['interfaceType'] == 'fibre') else ( interface[interface['interfaceType']]['currentSpeed'] if interface['interfaceType'] == 'ib' else ( interface[interface['interfaceType']]['interfaceData']['maximumInterfaceSpeed'] if interface['interfaceType'] == 'iscsi' else 'unknown' ))), ) for controller in array_facts['controller'] for interface in controller['driveInterfaces']] facts['netapp_storage_pools'] = [ dict( id=storage_pool['id'], name=storage_pool['name'], available_capacity=storage_pool['freeSpace'], total_capacity=storage_pool['totalRaidedSpace'], used_capacity=storage_pool['usedSpace'] ) for storage_pool in array_facts['volumeGroup']] all_volumes = list(array_facts['volume']) facts['netapp_volumes'] = [ dict( id=v['id'], name=v['name'], parent_storage_pool_id=v['volumeGroupRef'], capacity=v['capacity'], is_thin_provisioned=v['thinProvisioned'], workload=v['metadata'], ) for v in all_volumes] workload_tags = None try: rc, workload_tags = self.request("storage-systems/%s/workloads" % self.ssid) except Exception as error: self.module.fail_json(msg="Failed to retrieve workload tags. Array [%s]." % self.ssid) facts['netapp_workload_tags'] = [ dict( id=workload_tag['id'], name=workload_tag['name'], attributes=workload_tag['workloadAttributes'] ) for workload_tag in workload_tags] # Create a dictionary of volume lists keyed by host names facts['netapp_volumes_by_initiators'] = dict() for mapping in array_facts['storagePoolBundle']['lunMapping']: for host in facts['netapp_hosts']: if mapping['mapRef'] == host['hosts_reference'] or mapping['mapRef'] == host['group_id']: if host['name'] not in facts['netapp_volumes_by_initiators'].keys(): facts['netapp_volumes_by_initiators'].update({host['name']: []}) for volume in all_volumes: if mapping['id'] in [volume_mapping['id'] for volume_mapping in volume['listOfMappings']]: # Determine workload name if there is one workload_name = "" metadata = dict() for volume_tag in volume['metadata']: if volume_tag['key'] == 'workloadId': for workload_tag in facts['netapp_workload_tags']: if volume_tag['value'] == workload_tag['id']: workload_name = workload_tag['name'] metadata = dict((entry['key'], entry['value']) for entry in workload_tag['attributes'] if entry['key'] != 'profileId') facts['netapp_volumes_by_initiators'][host['name']].append( dict(name=volume['name'], id=volume['id'], wwn=volume['wwn'], workload_name=workload_name, meta_data=metadata)) features = [feature for feature in array_facts['sa']['capabilities']] features.extend([feature['capability'] for feature in array_facts['sa']['premiumFeatures'] if feature['isEnabled']]) features = list(set(features)) # ensure unique features.sort() facts['netapp_enabled_features'] = features return facts def get_facts(self): """Get the embedded or web services proxy information.""" facts = self.get_array_facts() self.module.log("isEmbedded: %s" % self.is_embedded()) self.module.log(pformat(facts)) self.module.exit_json(msg="Gathered facts for storage array. Array ID: [%s]." % self.ssid, storage_array_facts=facts) def strip_interface_speed(speed): """Converts symbol interface speeds to a more common notation. Example: 'speed10gig' -> '10g'""" if isinstance(speed, list): result = [match(r"speed[0-9]{1,3}[gm]", sp) for sp in speed] result = [sp.group().replace("speed", "") if result else "unknown" for sp in result if sp] result = ["auto" if match(r"auto", sp) else sp for sp in result] else: result = match(r"speed[0-9]{1,3}[gm]", speed) result = result.group().replace("speed", "") if result else "unknown" result = "auto" if match(r"auto", result.lower()) else result return result def main(): facts = Facts() facts.get_facts() if __name__ == "__main__": main()
gpl-3.0
-609,340,091,246,414,700
50.219557
140
0.503224
false
Mactory/easy-thumbnails
easy_thumbnails/south_migrations/0009_auto__del_storage.py
20
2337
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Deleting model 'Storage' db.delete_table('easy_thumbnails_storage') def backwards(self, orm): # Adding model 'Storage' db.create_table('easy_thumbnails_storage', ( ('pickle', self.gf('django.db.models.fields.TextField')()), ('hash', self.gf('django.db.models.fields.CharField')(max_length=40, primary_key=True, db_index=True)), )) db.send_create_signal('easy_thumbnails', ['Storage']) models = { 'easy_thumbnails.source': { 'Meta': {'object_name': 'Source'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 7, 21, 4, 34, 17, 1330)'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'storage_new': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['easy_thumbnails.StorageNew']"}) }, 'easy_thumbnails.storagenew': { 'Meta': {'object_name': 'StorageNew'}, 'hash': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'pickle': ('django.db.models.fields.TextField', [], {}) }, 'easy_thumbnails.thumbnail': { 'Meta': {'object_name': 'Thumbnail'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 7, 21, 4, 34, 17, 1330)'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'thumbnails'", 'to': "orm['easy_thumbnails.Source']"}), 'storage_new': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['easy_thumbnails.StorageNew']"}) } } complete_apps = ['easy_thumbnails']
bsd-3-clause
5,434,401,292,106,068,000
46.693878
146
0.569961
false
embray/numpy
numpy/core/tests/test_ufunc.py
1
43039
from __future__ import division, absolute_import, print_function import sys import numpy as np from numpy.testing import * import numpy.core.umath_tests as umt import numpy.core.operand_flag_tests as opflag_tests from numpy.compat import asbytes from numpy.core.test_rational import * class TestUfunc(TestCase): def test_pickle(self): import pickle assert pickle.loads(pickle.dumps(np.sin)) is np.sin def test_pickle_withstring(self): import pickle astring = asbytes("cnumpy.core\n_ufunc_reconstruct\np0\n" "(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.") assert pickle.loads(astring) is np.cos def test_reduceat_shifting_sum(self) : L = 6 x = np.arange(L) idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel() assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7]) def test_generic_loops(self) : """Test generic loops. The loops to be tested are: PyUFunc_ff_f_As_dd_d PyUFunc_ff_f PyUFunc_dd_d PyUFunc_gg_g PyUFunc_FF_F_As_DD_D PyUFunc_DD_D PyUFunc_FF_F PyUFunc_GG_G PyUFunc_OO_O PyUFunc_OO_O_method PyUFunc_f_f_As_d_d PyUFunc_d_d PyUFunc_f_f PyUFunc_g_g PyUFunc_F_F_As_D_D PyUFunc_F_F PyUFunc_D_D PyUFunc_G_G PyUFunc_O_O PyUFunc_O_O_method PyUFunc_On_Om Where: f -- float d -- double g -- long double F -- complex float D -- complex double G -- complex long double O -- python object It is difficult to assure that each of these loops is entered from the Python level as the special cased loops are a moving target and the corresponding types are architecture dependent. We probably need to define C level testing ufuncs to get at them. For the time being, I've just looked at the signatures registered in the build directory to find relevant functions. Fixme, currently untested: PyUFunc_ff_f_As_dd_d PyUFunc_FF_F_As_DD_D PyUFunc_f_f_As_d_d PyUFunc_F_F_As_D_D PyUFunc_On_Om """ fone = np.exp ftwo = lambda x, y : x**y fone_val = 1 ftwo_val = 1 # check unary PyUFunc_f_f. msg = "PyUFunc_f_f" x = np.zeros(10, dtype=np.single)[0::2] assert_almost_equal(fone(x), fone_val, err_msg=msg) # check unary PyUFunc_d_d. msg = "PyUFunc_d_d" x = np.zeros(10, dtype=np.double)[0::2] assert_almost_equal(fone(x), fone_val, err_msg=msg) # check unary PyUFunc_g_g. msg = "PyUFunc_g_g" x = np.zeros(10, dtype=np.longdouble)[0::2] assert_almost_equal(fone(x), fone_val, err_msg=msg) # check unary PyUFunc_F_F. msg = "PyUFunc_F_F" x = np.zeros(10, dtype=np.csingle)[0::2] assert_almost_equal(fone(x), fone_val, err_msg=msg) # check unary PyUFunc_D_D. msg = "PyUFunc_D_D" x = np.zeros(10, dtype=np.cdouble)[0::2] assert_almost_equal(fone(x), fone_val, err_msg=msg) # check unary PyUFunc_G_G. msg = "PyUFunc_G_G" x = np.zeros(10, dtype=np.clongdouble)[0::2] assert_almost_equal(fone(x), fone_val, err_msg=msg) # check binary PyUFunc_ff_f. msg = "PyUFunc_ff_f" x = np.ones(10, dtype=np.single)[0::2] assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg) # check binary PyUFunc_dd_d. msg = "PyUFunc_dd_d" x = np.ones(10, dtype=np.double)[0::2] assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg) # check binary PyUFunc_gg_g. msg = "PyUFunc_gg_g" x = np.ones(10, dtype=np.longdouble)[0::2] assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg) # check binary PyUFunc_FF_F. msg = "PyUFunc_FF_F" x = np.ones(10, dtype=np.csingle)[0::2] assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg) # check binary PyUFunc_DD_D. msg = "PyUFunc_DD_D" x = np.ones(10, dtype=np.cdouble)[0::2] assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg) # check binary PyUFunc_GG_G. msg = "PyUFunc_GG_G" x = np.ones(10, dtype=np.clongdouble)[0::2] assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg) # class to use in testing object method loops class foo(object): def conjugate(self) : return np.bool_(1) def logical_xor(self, obj) : return np.bool_(1) # check unary PyUFunc_O_O msg = "PyUFunc_O_O" x = np.ones(10, dtype=np.object)[0::2] assert_(np.all(np.abs(x) == 1), msg) # check unary PyUFunc_O_O_method msg = "PyUFunc_O_O_method" x = np.zeros(10, dtype=np.object)[0::2] for i in range(len(x)) : x[i] = foo() assert_(np.all(np.conjugate(x) == True), msg) # check binary PyUFunc_OO_O msg = "PyUFunc_OO_O" x = np.ones(10, dtype=np.object)[0::2] assert_(np.all(np.add(x, x) == 2), msg) # check binary PyUFunc_OO_O_method msg = "PyUFunc_OO_O_method" x = np.zeros(10, dtype=np.object)[0::2] for i in range(len(x)) : x[i] = foo() assert_(np.all(np.logical_xor(x, x)), msg) # check PyUFunc_On_Om # fixme -- I don't know how to do this yet def test_all_ufunc(self) : """Try to check presence and results of all ufuncs. The list of ufuncs comes from generate_umath.py and is as follows: ===== ==== ============= =============== ======================== done args function types notes ===== ==== ============= =============== ======================== n 1 conjugate nums + O n 1 absolute nums + O complex -> real n 1 negative nums + O n 1 sign nums + O -> int n 1 invert bool + ints + O flts raise an error n 1 degrees real + M cmplx raise an error n 1 radians real + M cmplx raise an error n 1 arccos flts + M n 1 arccosh flts + M n 1 arcsin flts + M n 1 arcsinh flts + M n 1 arctan flts + M n 1 arctanh flts + M n 1 cos flts + M n 1 sin flts + M n 1 tan flts + M n 1 cosh flts + M n 1 sinh flts + M n 1 tanh flts + M n 1 exp flts + M n 1 expm1 flts + M n 1 log flts + M n 1 log10 flts + M n 1 log1p flts + M n 1 sqrt flts + M real x < 0 raises error n 1 ceil real + M n 1 trunc real + M n 1 floor real + M n 1 fabs real + M n 1 rint flts + M n 1 isnan flts -> bool n 1 isinf flts -> bool n 1 isfinite flts -> bool n 1 signbit real -> bool n 1 modf real -> (frac, int) n 1 logical_not bool + nums + M -> bool n 2 left_shift ints + O flts raise an error n 2 right_shift ints + O flts raise an error n 2 add bool + nums + O boolean + is || n 2 subtract bool + nums + O boolean - is ^ n 2 multiply bool + nums + O boolean * is & n 2 divide nums + O n 2 floor_divide nums + O n 2 true_divide nums + O bBhH -> f, iIlLqQ -> d n 2 fmod nums + M n 2 power nums + O n 2 greater bool + nums + O -> bool n 2 greater_equal bool + nums + O -> bool n 2 less bool + nums + O -> bool n 2 less_equal bool + nums + O -> bool n 2 equal bool + nums + O -> bool n 2 not_equal bool + nums + O -> bool n 2 logical_and bool + nums + M -> bool n 2 logical_or bool + nums + M -> bool n 2 logical_xor bool + nums + M -> bool n 2 maximum bool + nums + O n 2 minimum bool + nums + O n 2 bitwise_and bool + ints + O flts raise an error n 2 bitwise_or bool + ints + O flts raise an error n 2 bitwise_xor bool + ints + O flts raise an error n 2 arctan2 real + M n 2 remainder ints + real + O n 2 hypot real + M ===== ==== ============= =============== ======================== Types other than those listed will be accepted, but they are cast to the smallest compatible type for which the function is defined. The casting rules are: bool -> int8 -> float32 ints -> double """ pass def test_signature(self): # the arguments to test_signature are: nin, nout, core_signature # pass assert_equal(umt.test_signature(2, 1, "(i),(i)->()"), 1) # pass. empty core signature; treat as plain ufunc (with trivial core) assert_equal(umt.test_signature(2, 1, "(),()->()"), 0) # in the following calls, a ValueError should be raised because # of error in core signature # error: extra parenthesis msg = "core_sig: extra parenthesis" try: ret = umt.test_signature(2, 1, "((i)),(i)->()") assert_equal(ret, None, err_msg=msg) except ValueError: None # error: parenthesis matching msg = "core_sig: parenthesis matching" try: ret = umt.test_signature(2, 1, "(i),)i(->()") assert_equal(ret, None, err_msg=msg) except ValueError: None # error: incomplete signature. letters outside of parenthesis are ignored msg = "core_sig: incomplete signature" try: ret = umt.test_signature(2, 1, "(i),->()") assert_equal(ret, None, err_msg=msg) except ValueError: None # error: incomplete signature. 2 output arguments are specified msg = "core_sig: incomplete signature" try: ret = umt.test_signature(2, 2, "(i),(i)->()") assert_equal(ret, None, err_msg=msg) except ValueError: None # more complicated names for variables assert_equal(umt.test_signature(2, 1, "(i1,i2),(J_1)->(_kAB)"), 1) def test_get_signature(self): assert_equal(umt.inner1d.signature, "(i),(i)->()") def test_forced_sig(self): a = 0.5*np.arange(3, dtype='f8') assert_equal(np.add(a, 0.5), [0.5, 1, 1.5]) assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1]) assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1]) assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), [0, 0, 1]) assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'), casting='unsafe'), [0, 0, 1]) b = np.zeros((3,), dtype='f8') np.add(a, 0.5, out=b) assert_equal(b, [0.5, 1, 1.5]) b[:] = 0 np.add(a, 0.5, sig='i', out=b, casting='unsafe') assert_equal(b, [0, 0, 1]) b[:] = 0 np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe') assert_equal(b, [0, 0, 1]) b[:] = 0 np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe') assert_equal(b, [0, 0, 1]) b[:] = 0 np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe') assert_equal(b, [0, 0, 1]) def test_sum_stability(self): a = np.ones(500, dtype=np.float32) assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 4) a = np.ones(500, dtype=np.float64) assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13) def test_sum(self): for dt in (np.int, np.float16, np.float32, np.float64, np.longdouble): for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, 128, 1024, 1235): tgt = dt(v * (v + 1) / 2) d = np.arange(1, v + 1, dtype=dt) assert_almost_equal(np.sum(d), tgt) assert_almost_equal(np.sum(d[::-1]), tgt) d = np.ones(500, dtype=dt) assert_almost_equal(np.sum(d[::2]), 250.) assert_almost_equal(np.sum(d[1::2]), 250.) assert_almost_equal(np.sum(d[::3]), 167.) assert_almost_equal(np.sum(d[1::3]), 167.) assert_almost_equal(np.sum(d[::-2]), 250.) assert_almost_equal(np.sum(d[-1::-2]), 250.) assert_almost_equal(np.sum(d[::-3]), 167.) assert_almost_equal(np.sum(d[-1::-3]), 167.) # sum with first reduction entry != 0 d = np.ones((1,), dtype=dt) d += d assert_almost_equal(d, 2.) def test_sum_complex(self): for dt in (np.complex64, np.complex128, np.clongdouble): for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, 128, 1024, 1235): tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) *1j) d = np.empty(v, dtype=dt) d.real = np.arange(1, v + 1) d.imag = -np.arange(1, v + 1) assert_almost_equal(np.sum(d), tgt) assert_almost_equal(np.sum(d[::-1]), tgt) d = np.ones(500, dtype=dt) + 1j assert_almost_equal(np.sum(d[::2]), 250. + 250j) assert_almost_equal(np.sum(d[1::2]), 250. + 250j) assert_almost_equal(np.sum(d[::3]), 167. + 167j) assert_almost_equal(np.sum(d[1::3]), 167. + 167j) assert_almost_equal(np.sum(d[::-2]), 250. + 250j) assert_almost_equal(np.sum(d[-1::-2]), 250. + 250j) assert_almost_equal(np.sum(d[::-3]), 167. + 167j) assert_almost_equal(np.sum(d[-1::-3]), 167. + 167j) # sum with first reduction entry != 0 d = np.ones((1,), dtype=dt) + 1j d += d assert_almost_equal(d, 2. + 2j) def test_inner1d(self): a = np.arange(6).reshape((2, 3)) assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1)) a = np.arange(6) assert_array_equal(umt.inner1d(a, a), np.sum(a*a)) def test_broadcast(self): msg = "broadcast" a = np.arange(4).reshape((2, 1, 2)) b = np.arange(4).reshape((1, 2, 2)) assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) msg = "extend & broadcast loop dimensions" b = np.arange(4).reshape((2, 2)) assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) msg = "broadcast in core dimensions" a = np.arange(8).reshape((4, 2)) b = np.arange(4).reshape((4, 1)) assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) msg = "extend & broadcast core and loop dimensions" a = np.arange(8).reshape((4, 2)) b = np.array(7) assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) msg = "broadcast should fail" a = np.arange(2).reshape((2, 1, 1)) b = np.arange(3).reshape((3, 1, 1)) try: ret = umt.inner1d(a, b) assert_equal(ret, None, err_msg=msg) except ValueError: None def test_type_cast(self): msg = "type cast" a = np.arange(6, dtype='short').reshape((2, 3)) assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), err_msg=msg) msg = "type cast on one argument" a = np.arange(6).reshape((2, 3)) b = a+0.1 assert_array_almost_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), err_msg=msg) def test_endian(self): msg = "big endian" a = np.arange(6, dtype='>i4').reshape((2, 3)) assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), err_msg=msg) msg = "little endian" a = np.arange(6, dtype='<i4').reshape((2, 3)) assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), err_msg=msg) # Output should always be native-endian Ba = np.arange(1, dtype='>f8') La = np.arange(1, dtype='<f8') assert_equal((Ba+Ba).dtype, np.dtype('f8')) assert_equal((Ba+La).dtype, np.dtype('f8')) assert_equal((La+Ba).dtype, np.dtype('f8')) assert_equal((La+La).dtype, np.dtype('f8')) assert_equal(np.absolute(La).dtype, np.dtype('f8')) assert_equal(np.absolute(Ba).dtype, np.dtype('f8')) assert_equal(np.negative(La).dtype, np.dtype('f8')) assert_equal(np.negative(Ba).dtype, np.dtype('f8')) def test_incontiguous_array(self): msg = "incontiguous memory layout of array" x = np.arange(64).reshape((2, 2, 2, 2, 2, 2)) a = x[:, 0,:, 0,:, 0] b = x[:, 1,:, 1,:, 1] a[0, 0, 0] = -1 msg2 = "make sure it references to the original array" assert_equal(x[0, 0, 0, 0, 0, 0], -1, err_msg=msg2) assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) x = np.arange(24).reshape(2, 3, 4) a = x.T b = x.T a[0, 0, 0] = -1 assert_equal(x[0, 0, 0], -1, err_msg=msg2) assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) def test_output_argument(self): msg = "output argument" a = np.arange(12).reshape((2, 3, 2)) b = np.arange(4).reshape((2, 1, 2)) + 1 c = np.zeros((2, 3), dtype='int') umt.inner1d(a, b, c) assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg) c[:] = -1 umt.inner1d(a, b, out=c) assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg) msg = "output argument with type cast" c = np.zeros((2, 3), dtype='int16') umt.inner1d(a, b, c) assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg) c[:] = -1 umt.inner1d(a, b, out=c) assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg) msg = "output argument with incontiguous layout" c = np.zeros((2, 3, 4), dtype='int16') umt.inner1d(a, b, c[..., 0]) assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg) c[:] = -1 umt.inner1d(a, b, out=c[..., 0]) assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg) def test_innerwt(self): a = np.arange(6).reshape((2, 3)) b = np.arange(10, 16).reshape((2, 3)) w = np.arange(20, 26).reshape((2, 3)) assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) a = np.arange(100, 124).reshape((2, 3, 4)) b = np.arange(200, 224).reshape((2, 3, 4)) w = np.arange(300, 324).reshape((2, 3, 4)) assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) def test_innerwt_empty(self): """Test generalized ufunc with zero-sized operands""" a = np.array([], dtype='f8') b = np.array([], dtype='f8') w = np.array([], dtype='f8') assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) def test_matrix_multiply(self): self.compare_matrix_multiply_results(np.long) self.compare_matrix_multiply_results(np.double) def compare_matrix_multiply_results(self, tp): d1 = np.array(rand(2, 3, 4), dtype=tp) d2 = np.array(rand(2, 3, 4), dtype=tp) msg = "matrix multiply on type %s" % d1.dtype.name def permute_n(n): if n == 1: return ([0],) ret = () base = permute_n(n-1) for perm in base: for i in range(n): new = perm + [n-1] new[n-1] = new[i] new[i] = n-1 ret += (new,) return ret def slice_n(n): if n == 0: return ((),) ret = () base = slice_n(n-1) for sl in base: ret += (sl+(slice(None),),) ret += (sl+(slice(0, 1),),) return ret def broadcastable(s1, s2): return s1 == s2 or s1 == 1 or s2 == 1 permute_3 = permute_n(3) slice_3 = slice_n(3) + ((slice(None, None, -1),)*3,) ref = True for p1 in permute_3: for p2 in permute_3: for s1 in slice_3: for s2 in slice_3: a1 = d1.transpose(p1)[s1] a2 = d2.transpose(p2)[s2] ref = ref and a1.base != None ref = ref and a2.base != None if broadcastable(a1.shape[-1], a2.shape[-2]) and \ broadcastable(a1.shape[0], a2.shape[0]): assert_array_almost_equal( umt.matrix_multiply(a1, a2), np.sum(a2[..., np.newaxis].swapaxes(-3, -1) * a1[..., np.newaxis,:], axis=-1), err_msg = msg+' %s %s' % (str(a1.shape), str(a2.shape))) assert_equal(ref, True, err_msg="reference check") def test_object_logical(self): a = np.array([3, None, True, False, "test", ""], dtype=object) assert_equal(np.logical_or(a, None), np.array([x or None for x in a], dtype=object)) assert_equal(np.logical_or(a, True), np.array([x or True for x in a], dtype=object)) assert_equal(np.logical_or(a, 12), np.array([x or 12 for x in a], dtype=object)) assert_equal(np.logical_or(a, "blah"), np.array([x or "blah" for x in a], dtype=object)) assert_equal(np.logical_and(a, None), np.array([x and None for x in a], dtype=object)) assert_equal(np.logical_and(a, True), np.array([x and True for x in a], dtype=object)) assert_equal(np.logical_and(a, 12), np.array([x and 12 for x in a], dtype=object)) assert_equal(np.logical_and(a, "blah"), np.array([x and "blah" for x in a], dtype=object)) assert_equal(np.logical_not(a), np.array([not x for x in a], dtype=object)) assert_equal(np.logical_or.reduce(a), 3) assert_equal(np.logical_and.reduce(a), None) def test_object_array_reduction(self): # Reductions on object arrays a = np.array(['a', 'b', 'c'], dtype=object) assert_equal(np.sum(a), 'abc') assert_equal(np.max(a), 'c') assert_equal(np.min(a), 'a') a = np.array([True, False, True], dtype=object) assert_equal(np.sum(a), 2) assert_equal(np.prod(a), 0) assert_equal(np.any(a), True) assert_equal(np.all(a), False) assert_equal(np.max(a), True) assert_equal(np.min(a), False) def test_zerosize_reduction(self): # Test with default dtype and object dtype for a in [[], np.array([], dtype=object)]: assert_equal(np.sum(a), 0) assert_equal(np.prod(a), 1) assert_equal(np.any(a), False) assert_equal(np.all(a), True) assert_raises(ValueError, np.max, a) assert_raises(ValueError, np.min, a) def test_axis_out_of_bounds(self): a = np.array([False, False]) assert_raises(ValueError, a.all, axis=1) a = np.array([False, False]) assert_raises(ValueError, a.all, axis=-2) a = np.array([False, False]) assert_raises(ValueError, a.any, axis=1) a = np.array([False, False]) assert_raises(ValueError, a.any, axis=-2) def test_scalar_reduction(self): # The functions 'sum', 'prod', etc allow specifying axis=0 # even for scalars assert_equal(np.sum(3, axis=0), 3) assert_equal(np.prod(3.5, axis=0), 3.5) assert_equal(np.any(True, axis=0), True) assert_equal(np.all(False, axis=0), False) assert_equal(np.max(3, axis=0), 3) assert_equal(np.min(2.5, axis=0), 2.5) # Check scalar behaviour for ufuncs without an identity assert_equal(np.power.reduce(3), 3) # Make sure that scalars are coming out from this operation assert_(type(np.prod(np.float32(2.5), axis=0)) is np.float32) assert_(type(np.sum(np.float32(2.5), axis=0)) is np.float32) assert_(type(np.max(np.float32(2.5), axis=0)) is np.float32) assert_(type(np.min(np.float32(2.5), axis=0)) is np.float32) # check if scalars/0-d arrays get cast assert_(type(np.any(0, axis=0)) is np.bool_) # assert that 0-d arrays get wrapped class MyArray(np.ndarray): pass a = np.array(1).view(MyArray) assert_(type(np.any(a)) is MyArray) def test_casting_out_param(self): # Test that it's possible to do casts on output a = np.ones((200, 100), np.int64) b = np.ones((200, 100), np.int64) c = np.ones((200, 100), np.float64) np.add(a, b, out=c) assert_equal(c, 2) a = np.zeros(65536) b = np.zeros(65536, dtype=np.float32) np.subtract(a, 0, out=b) assert_equal(b, 0) def test_where_param(self): # Test that the where= ufunc parameter works with regular arrays a = np.arange(7) b = np.ones(7) c = np.zeros(7) np.add(a, b, out=c, where=(a % 2 == 1)) assert_equal(c, [0, 2, 0, 4, 0, 6, 0]) a = np.arange(4).reshape(2, 2) + 2 np.power(a, [2, 3], out=a, where=[[0, 1], [1, 0]]) assert_equal(a, [[2, 27], [16, 5]]) # Broadcasting the where= parameter np.subtract(a, 2, out=a, where=[True, False]) assert_equal(a, [[0, 27], [14, 5]]) def test_where_param_buffer_output(self): # This test is temporarily skipped because it requires # adding masking features to the nditer to work properly # With casting on output a = np.ones(10, np.int64) b = np.ones(10, np.int64) c = 1.5 * np.ones(10, np.float64) np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0]) assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5]) def check_identityless_reduction(self, a): # np.minimum.reduce is a identityless reduction # Verify that it sees the zero at various positions a[...] = 1 a[1, 0, 0] = 0 assert_equal(np.minimum.reduce(a, axis=None), 0) assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1]) assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1]) assert_equal(np.minimum.reduce(a, axis=(1, 2)), [1, 0]) assert_equal(np.minimum.reduce(a, axis=0), [[0, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) assert_equal(np.minimum.reduce(a, axis=1), [[1, 1, 1, 1], [0, 1, 1, 1]]) assert_equal(np.minimum.reduce(a, axis=2), [[1, 1, 1], [0, 1, 1]]) assert_equal(np.minimum.reduce(a, axis=()), a) a[...] = 1 a[0, 1, 0] = 0 assert_equal(np.minimum.reduce(a, axis=None), 0) assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1]) assert_equal(np.minimum.reduce(a, axis=(0, 2)), [1, 0, 1]) assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1]) assert_equal(np.minimum.reduce(a, axis=0), [[1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]]) assert_equal(np.minimum.reduce(a, axis=1), [[0, 1, 1, 1], [1, 1, 1, 1]]) assert_equal(np.minimum.reduce(a, axis=2), [[1, 0, 1], [1, 1, 1]]) assert_equal(np.minimum.reduce(a, axis=()), a) a[...] = 1 a[0, 0, 1] = 0 assert_equal(np.minimum.reduce(a, axis=None), 0) assert_equal(np.minimum.reduce(a, axis=(0, 1)), [1, 0, 1, 1]) assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1]) assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1]) assert_equal(np.minimum.reduce(a, axis=0), [[1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) assert_equal(np.minimum.reduce(a, axis=1), [[1, 0, 1, 1], [1, 1, 1, 1]]) assert_equal(np.minimum.reduce(a, axis=2), [[0, 1, 1], [1, 1, 1]]) assert_equal(np.minimum.reduce(a, axis=()), a) def test_identityless_reduction_corder(self): a = np.empty((2, 3, 4), order='C') self.check_identityless_reduction(a) def test_identityless_reduction_forder(self): a = np.empty((2, 3, 4), order='F') self.check_identityless_reduction(a) def test_identityless_reduction_otherorder(self): a = np.empty((2, 4, 3), order='C').swapaxes(1, 2) self.check_identityless_reduction(a) def test_identityless_reduction_noncontig(self): a = np.empty((3, 5, 4), order='C').swapaxes(1, 2) a = a[1:, 1:, 1:] self.check_identityless_reduction(a) def test_identityless_reduction_noncontig_unaligned(self): a = np.empty((3*4*5*8 + 1,), dtype='i1') a = a[1:].view(dtype='f8') a.shape = (3, 4, 5) a = a[1:, 1:, 1:] self.check_identityless_reduction(a) def test_identityless_reduction_nonreorderable(self): a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]]) res = np.divide.reduce(a, axis=0) assert_equal(res, [8.0, 4.0, 8.0]) res = np.divide.reduce(a, axis=1) assert_equal(res, [2.0, 8.0]) res = np.divide.reduce(a, axis=()) assert_equal(res, a) assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1)) def test_reduce_zero_axis(self): # If we have a n x m array and do a reduction with axis=1, then we are # doing n reductions, and each reduction takes an m-element array. For # a reduction operation without an identity, then: # n > 0, m > 0: fine # n = 0, m > 0: fine, doing 0 reductions of m-element arrays # n > 0, m = 0: can't reduce a 0-element array, ValueError # n = 0, m = 0: can't reduce a 0-element array, ValueError (for # consistency with the above case) # This test doesn't actually look at return values, it just checks to # make sure that error we get an error in exactly those cases where we # expect one, and assumes the calculations themselves are done # correctly. def ok(f, *args, **kwargs): f(*args, **kwargs) def err(f, *args, **kwargs): assert_raises(ValueError, f, *args, **kwargs) def t(expect, func, n, m): expect(func, np.zeros((n, m)), axis=1) expect(func, np.zeros((m, n)), axis=0) expect(func, np.zeros((n // 2, n // 2, m)), axis=2) expect(func, np.zeros((n // 2, m, n // 2)), axis=1) expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2)) expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2)) expect(func, np.zeros((m // 3, m // 3, m // 3, n // 2, n //2)), axis=(0, 1, 2)) # Check what happens if the inner (resp. outer) dimensions are a # mix of zero and non-zero: expect(func, np.zeros((10, m, n)), axis=(0, 1)) expect(func, np.zeros((10, n, m)), axis=(0, 2)) expect(func, np.zeros((m, 10, n)), axis=0) expect(func, np.zeros((10, m, n)), axis=1) expect(func, np.zeros((10, n, m)), axis=2) # np.maximum is just an arbitrary ufunc with no reduction identity assert_equal(np.maximum.identity, None) t(ok, np.maximum.reduce, 30, 30) t(ok, np.maximum.reduce, 0, 30) t(err, np.maximum.reduce, 30, 0) t(err, np.maximum.reduce, 0, 0) err(np.maximum.reduce, []) np.maximum.reduce(np.zeros((0, 0)), axis=()) # all of the combinations are fine for a reduction that has an # identity t(ok, np.add.reduce, 30, 30) t(ok, np.add.reduce, 0, 30) t(ok, np.add.reduce, 30, 0) t(ok, np.add.reduce, 0, 0) np.add.reduce([]) np.add.reduce(np.zeros((0, 0)), axis=()) # OTOH, accumulate always makes sense for any combination of n and m, # because it maps an m-element array to an m-element array. These # tests are simpler because accumulate doesn't accept multiple axes. for uf in (np.maximum, np.add): uf.accumulate(np.zeros((30, 0)), axis=0) uf.accumulate(np.zeros((0, 30)), axis=0) uf.accumulate(np.zeros((30, 30)), axis=0) uf.accumulate(np.zeros((0, 0)), axis=0) def test_safe_casting(self): # In old versions of numpy, in-place operations used the 'unsafe' # casting rules. In some future version, 'same_kind' will become the # default. a = np.array([1, 2, 3], dtype=int) # Non-in-place addition is fine assert_array_equal(assert_no_warnings(np.add, a, 1.1), [2.1, 3.1, 4.1]) assert_warns(DeprecationWarning, np.add, a, 1.1, out=a) assert_array_equal(a, [2, 3, 4]) def add_inplace(a, b): a += b assert_warns(DeprecationWarning, add_inplace, a, 1.1) assert_array_equal(a, [3, 4, 5]) # Make sure that explicitly overriding the warning is allowed: assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe") assert_array_equal(a, [4, 5, 6]) # There's no way to propagate exceptions from the place where we issue # this deprecation warning, so we must throw the exception away # entirely rather than cause it to be raised at some other point, or # trigger some other unsuspecting if (PyErr_Occurred()) { ...} at some # other location entirely. import warnings import sys if sys.version_info[0] >= 3: from io import StringIO else: from StringIO import StringIO with warnings.catch_warnings(): warnings.simplefilter("error") old_stderr = sys.stderr try: sys.stderr = StringIO() # No error, but dumps to stderr a += 1.1 # No error on the next bit of code executed either 1 + 1 assert_("Implicitly casting" in sys.stderr.getvalue()) finally: sys.stderr = old_stderr def test_ufunc_custom_out(self): # Test ufunc with built in input types and custom output type a = np.array([0, 1, 2], dtype='i8') b = np.array([0, 1, 2], dtype='i8') c = np.empty(3, dtype=rational) # Output must be specified so numpy knows what # ufunc signature to look for result = test_add(a, b, c) assert_equal(result, np.array([0, 2, 4], dtype=rational)) # no output type should raise TypeError assert_raises(TypeError, test_add, a, b) def test_operand_flags(self): a = np.arange(16, dtype='l').reshape(4, 4) b = np.arange(9, dtype='l').reshape(3, 3) opflag_tests.inplace_add(a[:-1, :-1], b) assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7], [14, 16, 18, 11], [12, 13, 14, 15]], dtype='l')) a = np.array(0) opflag_tests.inplace_add(a, 3) assert_equal(a, 3) opflag_tests.inplace_add(a, [3, 4]) assert_equal(a, 10) def test_struct_ufunc(self): import numpy.core.struct_ufunc_test as struct_ufunc a = np.array([(1, 2, 3)], dtype='u8,u8,u8') b = np.array([(1, 2, 3)], dtype='u8,u8,u8') result = struct_ufunc.add_triplet(a, b) assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8')) def test_custom_ufunc(self): a = np.array([rational(1, 2), rational(1, 3), rational(1, 4)], dtype=rational); b = np.array([rational(1, 2), rational(1, 3), rational(1, 4)], dtype=rational); result = test_add_rationals(a, b) expected = np.array([rational(1), rational(2, 3), rational(1, 2)], dtype=rational); assert_equal(result, expected); def test_custom_array_like(self): class MyThing(object): __array_priority__ = 1000 rmul_count = 0 getitem_count = 0 def __init__(self, shape): self.shape = shape def __len__(self): return self.shape[0] def __getitem__(self, i): MyThing.getitem_count += 1 if not isinstance(i, tuple): i = (i,) if len(i) > len(self.shape): raise IndexError("boo") return MyThing(self.shape[len(i):]) def __rmul__(self, other): MyThing.rmul_count += 1 return self np.float64(5)*MyThing((3, 3)) assert_(MyThing.rmul_count == 1, MyThing.rmul_count) assert_(MyThing.getitem_count <= 2, MyThing.getitem_count) def test_inplace_fancy_indexing(self): a = np.arange(10) np.add.at(a, [2, 5, 2], 1) assert_equal(a, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9]) a = np.arange(10) b = np.array([100, 100, 100]) np.add.at(a, [2, 5, 2], b) assert_equal(a, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9]) a = np.arange(9).reshape(3, 3) b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) np.add.at(a, (slice(None), [1, 2, 1]), b) assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]]) a = np.arange(27).reshape(3, 3, 3) b = np.array([100, 200, 300]) np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b) assert_equal(a, [[[0, 401, 202], [3, 404, 205], [6, 407, 208]], [[9, 410, 211], [12, 413, 214], [15, 416, 217]], [[18, 419, 220], [21, 422, 223], [24, 425, 226]]]) a = np.arange(9).reshape(3, 3) b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) np.add.at(a, ([1, 2, 1], slice(None)), b) assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]]) a = np.arange(27).reshape(3, 3, 3) b = np.array([100, 200, 300]) np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b) assert_equal(a, [[[0, 1, 2 ], [203, 404, 605], [106, 207, 308]], [[9, 10, 11 ], [212, 413, 614], [115, 216, 317]], [[18, 19, 20 ], [221, 422, 623], [124, 225, 326]]]) a = np.arange(9).reshape(3, 3) b = np.array([100, 200, 300]) np.add.at(a, (0, [1, 2, 1]), b) assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]]) a = np.arange(27).reshape(3, 3, 3) b = np.array([100, 200, 300]) np.add.at(a, ([1, 2, 1], 0, slice(None)), b) assert_equal(a, [[[0, 1, 2], [3, 4, 5], [6, 7, 8]], [[209, 410, 611], [12, 13, 14], [15, 16, 17]], [[118, 219, 320], [21, 22, 23], [24, 25, 26]]]) a = np.arange(27).reshape(3, 3, 3) b = np.array([100, 200, 300]) np.add.at(a, (slice(None), slice(None), slice(None)), b) assert_equal(a, [[[100, 201, 302], [103, 204, 305], [106, 207, 308]], [[109, 210, 311], [112, 213, 314], [115, 216, 317]], [[118, 219, 320], [121, 222, 323], [124, 225, 326]]]) a = np.arange(10) np.negative.at(a, [2, 5, 2]) assert_equal(a, [0, 1, 2, 3, 4, -5, 6, 7, 8, 9]) # Test 0-dim array a = np.array(0) np.add.at(a, (), 1) assert_equal(a, 1) assert_raises(IndexError, np.add.at, a, 0, 1) assert_raises(IndexError, np.add.at, a, [], 1) # Test mixed dtypes a = np.arange(10) np.power.at(a, [1, 2, 3, 2], 3.5) assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9])) # Test boolean indexing and boolean ufuncs a = np.arange(10) index = a % 2 == 0 np.equal.at(a, index, [0, 2, 4, 6, 8]) assert_equal(a, [1, 1, 1, 3, 1, 5, 1, 7, 1, 9]) # Test unary operator a = np.arange(10, dtype='u4') np.invert.at(a, [2, 5, 2]) assert_equal(a, [0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, 7, 8, 9]) # Test empty subspace orig = np.arange(4) a = orig[:, None][:, 0:0] np.add.at(a, [0, 1], 3) assert_array_equal(orig, np.arange(4)) # Test with swapped byte order index = np.array([1, 2, 1], np.dtype('i').newbyteorder()) values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder()) np.add.at(values, index, 3) assert_array_equal(values, [1, 8, 6, 4]) # Test exception thrown values = np.array(['a', 1], dtype=np.object) self.assertRaises(TypeError, np.add.at, values, [0, 1], 1) assert_array_equal(values, np.array(['a', 1], dtype=np.object)) if __name__ == "__main__": run_module_suite()
bsd-3-clause
5,173,443,560,941,612,000
38.777264
81
0.490299
false
diorcety/intellij-community
plugins/hg4idea/testData/bin/mercurial/help.py
91
18018
# help.py - help data for mercurial # # Copyright 2006 Matt Mackall <[email protected]> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from i18n import gettext, _ import itertools, sys, os, error import extensions, revset, fileset, templatekw, templatefilters, filemerge import encoding, util, minirst import cmdutil def listexts(header, exts, indent=1): '''return a text listing of the given extensions''' rst = [] if exts: rst.append('\n%s\n\n' % header) for name, desc in sorted(exts.iteritems()): rst.append('%s:%s: %s\n' % (' ' * indent, name, desc)) return rst def extshelp(): rst = loaddoc('extensions')().splitlines(True) rst.extend(listexts(_('enabled extensions:'), extensions.enabled())) rst.extend(listexts(_('disabled extensions:'), extensions.disabled())) doc = ''.join(rst) return doc def optrst(options, verbose): data = [] multioccur = False for option in options: if len(option) == 5: shortopt, longopt, default, desc, optlabel = option else: shortopt, longopt, default, desc = option optlabel = _("VALUE") # default label if _("DEPRECATED") in desc and not verbose: continue so = '' if shortopt: so = '-' + shortopt lo = '--' + longopt if default: desc += _(" (default: %s)") % default if isinstance(default, list): lo += " %s [+]" % optlabel multioccur = True elif (default is not None) and not isinstance(default, bool): lo += " %s" % optlabel data.append((so, lo, desc)) rst = minirst.maketable(data, 1) if multioccur: rst.append(_("\n[+] marked option can be specified multiple times\n")) return ''.join(rst) def indicateomitted(rst, omitted, notomitted=None): rst.append('\n\n.. container:: omitted\n\n %s\n\n' % omitted) if notomitted: rst.append('\n\n.. container:: notomitted\n\n %s\n\n' % notomitted) def topicmatch(kw): """Return help topics matching kw. Returns {'section': [(name, summary), ...], ...} where section is one of topics, commands, extensions, or extensioncommands. """ kw = encoding.lower(kw) def lowercontains(container): return kw in encoding.lower(container) # translated in helptable results = {'topics': [], 'commands': [], 'extensions': [], 'extensioncommands': [], } for names, header, doc in helptable: if (sum(map(lowercontains, names)) or lowercontains(header) or lowercontains(doc())): results['topics'].append((names[0], header)) import commands # avoid cycle for cmd, entry in commands.table.iteritems(): if cmd.startswith('debug'): continue if len(entry) == 3: summary = entry[2] else: summary = '' # translate docs *before* searching there docs = _(getattr(entry[0], '__doc__', None)) or '' if kw in cmd or lowercontains(summary) or lowercontains(docs): doclines = docs.splitlines() if doclines: summary = doclines[0] cmdname = cmd.split('|')[0].lstrip('^') results['commands'].append((cmdname, summary)) for name, docs in itertools.chain( extensions.enabled().iteritems(), extensions.disabled().iteritems()): # extensions.load ignores the UI argument mod = extensions.load(None, name, '') if lowercontains(name) or lowercontains(docs): # extension docs are already translated results['extensions'].append((name, docs.splitlines()[0])) for cmd, entry in getattr(mod, 'cmdtable', {}).iteritems(): if kw in cmd or (len(entry) > 2 and lowercontains(entry[2])): cmdname = cmd.split('|')[0].lstrip('^') if entry[0].__doc__: cmddoc = gettext(entry[0].__doc__).splitlines()[0] else: cmddoc = _('(no help text available)') results['extensioncommands'].append((cmdname, cmddoc)) return results def loaddoc(topic): """Return a delayed loader for help/topic.txt.""" def loader(): if util.mainfrozen(): module = sys.executable else: module = __file__ base = os.path.dirname(module) for dir in ('.', '..'): docdir = os.path.join(base, dir, 'help') if os.path.isdir(docdir): break path = os.path.join(docdir, topic + ".txt") doc = gettext(util.readfile(path)) for rewriter in helphooks.get(topic, []): doc = rewriter(topic, doc) return doc return loader helptable = sorted([ (["config", "hgrc"], _("Configuration Files"), loaddoc('config')), (["dates"], _("Date Formats"), loaddoc('dates')), (["patterns"], _("File Name Patterns"), loaddoc('patterns')), (['environment', 'env'], _('Environment Variables'), loaddoc('environment')), (['revisions', 'revs'], _('Specifying Single Revisions'), loaddoc('revisions')), (['multirevs', 'mrevs'], _('Specifying Multiple Revisions'), loaddoc('multirevs')), (['revsets', 'revset'], _("Specifying Revision Sets"), loaddoc('revsets')), (['filesets', 'fileset'], _("Specifying File Sets"), loaddoc('filesets')), (['diffs'], _('Diff Formats'), loaddoc('diffs')), (['merge-tools', 'mergetools'], _('Merge Tools'), loaddoc('merge-tools')), (['templating', 'templates', 'template', 'style'], _('Template Usage'), loaddoc('templates')), (['urls'], _('URL Paths'), loaddoc('urls')), (["extensions"], _("Using Additional Features"), extshelp), (["subrepos", "subrepo"], _("Subrepositories"), loaddoc('subrepos')), (["hgweb"], _("Configuring hgweb"), loaddoc('hgweb')), (["glossary"], _("Glossary"), loaddoc('glossary')), (["hgignore", "ignore"], _("Syntax for Mercurial Ignore Files"), loaddoc('hgignore')), (["phases"], _("Working with Phases"), loaddoc('phases')), ]) # Map topics to lists of callable taking the current topic help and # returning the updated version helphooks = {} def addtopichook(topic, rewriter): helphooks.setdefault(topic, []).append(rewriter) def makeitemsdoc(topic, doc, marker, items): """Extract docstring from the items key to function mapping, build a .single documentation block and use it to overwrite the marker in doc """ entries = [] for name in sorted(items): text = (items[name].__doc__ or '').rstrip() if not text: continue text = gettext(text) lines = text.splitlines() doclines = [(lines[0])] for l in lines[1:]: # Stop once we find some Python doctest if l.strip().startswith('>>>'): break doclines.append(' ' + l.strip()) entries.append('\n'.join(doclines)) entries = '\n\n'.join(entries) return doc.replace(marker, entries) def addtopicsymbols(topic, marker, symbols): def add(topic, doc): return makeitemsdoc(topic, doc, marker, symbols) addtopichook(topic, add) addtopicsymbols('filesets', '.. predicatesmarker', fileset.symbols) addtopicsymbols('merge-tools', '.. internaltoolsmarker', filemerge.internals) addtopicsymbols('revsets', '.. predicatesmarker', revset.symbols) addtopicsymbols('templates', '.. keywordsmarker', templatekw.dockeywords) addtopicsymbols('templates', '.. filtersmarker', templatefilters.filters) def help_(ui, name, unknowncmd=False, full=True, **opts): ''' Generate the help for 'name' as unformatted restructured text. If 'name' is None, describe the commands available. ''' import commands # avoid cycle def helpcmd(name): try: aliases, entry = cmdutil.findcmd(name, commands.table, strict=unknowncmd) except error.AmbiguousCommand, inst: # py3k fix: except vars can't be used outside the scope of the # except block, nor can be used inside a lambda. python issue4617 prefix = inst.args[0] select = lambda c: c.lstrip('^').startswith(prefix) rst = helplist(select) return rst rst = [] # check if it's an invalid alias and display its error if it is if getattr(entry[0], 'badalias', False): if not unknowncmd: ui.pushbuffer() entry[0](ui) rst.append(ui.popbuffer()) return rst # synopsis if len(entry) > 2: if entry[2].startswith('hg'): rst.append("%s\n" % entry[2]) else: rst.append('hg %s %s\n' % (aliases[0], entry[2])) else: rst.append('hg %s\n' % aliases[0]) # aliases if full and not ui.quiet and len(aliases) > 1: rst.append(_("\naliases: %s\n") % ', '.join(aliases[1:])) rst.append('\n') # description doc = gettext(entry[0].__doc__) if not doc: doc = _("(no help text available)") if util.safehasattr(entry[0], 'definition'): # aliased command if entry[0].definition.startswith('!'): # shell alias doc = _('shell alias for::\n\n %s') % entry[0].definition[1:] else: doc = _('alias for: hg %s\n\n%s') % (entry[0].definition, doc) doc = doc.splitlines(True) if ui.quiet or not full: rst.append(doc[0]) else: rst.extend(doc) rst.append('\n') # check if this command shadows a non-trivial (multi-line) # extension help text try: mod = extensions.find(name) doc = gettext(mod.__doc__) or '' if '\n' in doc.strip(): msg = _('use "hg help -e %s" to show help for ' 'the %s extension') % (name, name) rst.append('\n%s\n' % msg) except KeyError: pass # options if not ui.quiet and entry[1]: rst.append('\n%s\n\n' % _("options:")) rst.append(optrst(entry[1], ui.verbose)) if ui.verbose: rst.append('\n%s\n\n' % _("global options:")) rst.append(optrst(commands.globalopts, ui.verbose)) if not ui.verbose: if not full: rst.append(_('\nuse "hg help %s" to show the full help text\n') % name) elif not ui.quiet: omitted = _('use "hg -v help %s" to show more complete' ' help and the global options') % name notomitted = _('use "hg -v help %s" to show' ' the global options') % name indicateomitted(rst, omitted, notomitted) return rst def helplist(select=None): # list of commands if name == "shortlist": header = _('basic commands:\n\n') else: header = _('list of commands:\n\n') h = {} cmds = {} for c, e in commands.table.iteritems(): f = c.split("|", 1)[0] if select and not select(f): continue if (not select and name != 'shortlist' and e[0].__module__ != commands.__name__): continue if name == "shortlist" and not f.startswith("^"): continue f = f.lstrip("^") if not ui.debugflag and f.startswith("debug"): continue doc = e[0].__doc__ if doc and 'DEPRECATED' in doc and not ui.verbose: continue doc = gettext(doc) if not doc: doc = _("(no help text available)") h[f] = doc.splitlines()[0].rstrip() cmds[f] = c.lstrip("^") rst = [] if not h: if not ui.quiet: rst.append(_('no commands defined\n')) return rst if not ui.quiet: rst.append(header) fns = sorted(h) for f in fns: if ui.verbose: commacmds = cmds[f].replace("|",", ") rst.append(" :%s: %s\n" % (commacmds, h[f])) else: rst.append(' :%s: %s\n' % (f, h[f])) if not name: exts = listexts(_('enabled extensions:'), extensions.enabled()) if exts: rst.append('\n') rst.extend(exts) rst.append(_("\nadditional help topics:\n\n")) topics = [] for names, header, doc in helptable: topics.append((names[0], header)) for t, desc in topics: rst.append(" :%s: %s\n" % (t, desc)) optlist = [] if not ui.quiet: if ui.verbose: optlist.append((_("global options:"), commands.globalopts)) if name == 'shortlist': optlist.append((_('use "hg help" for the full list ' 'of commands'), ())) else: if name == 'shortlist': msg = _('use "hg help" for the full list of commands ' 'or "hg -v" for details') elif name and not full: msg = _('use "hg help %s" to show the full help ' 'text') % name else: msg = _('use "hg -v help%s" to show builtin aliases and ' 'global options') % (name and " " + name or "") optlist.append((msg, ())) if optlist: for title, options in optlist: rst.append('\n%s\n' % title) if options: rst.append('\n%s\n' % optrst(options, ui.verbose)) return rst def helptopic(name): for names, header, doc in helptable: if name in names: break else: raise error.UnknownCommand(name) rst = [minirst.section(header)] # description if not doc: rst.append(" %s\n" % _("(no help text available)")) if util.safehasattr(doc, '__call__'): rst += [" %s\n" % l for l in doc().splitlines()] if not ui.verbose: omitted = (_('use "hg help -v %s" to show more complete help') % name) indicateomitted(rst, omitted) try: cmdutil.findcmd(name, commands.table) rst.append(_('\nuse "hg help -c %s" to see help for ' 'the %s command\n') % (name, name)) except error.UnknownCommand: pass return rst def helpext(name): try: mod = extensions.find(name) doc = gettext(mod.__doc__) or _('no help text available') except KeyError: mod = None doc = extensions.disabledext(name) if not doc: raise error.UnknownCommand(name) if '\n' not in doc: head, tail = doc, "" else: head, tail = doc.split('\n', 1) rst = [_('%s extension - %s\n\n') % (name.split('.')[-1], head)] if tail: rst.extend(tail.splitlines(True)) rst.append('\n') if not ui.verbose: omitted = (_('use "hg help -v %s" to show more complete help') % name) indicateomitted(rst, omitted) if mod: try: ct = mod.cmdtable except AttributeError: ct = {} modcmds = set([c.split('|', 1)[0] for c in ct]) rst.extend(helplist(modcmds.__contains__)) else: rst.append(_('use "hg help extensions" for information on enabling ' 'extensions\n')) return rst def helpextcmd(name): cmd, ext, mod = extensions.disabledcmd(ui, name, ui.configbool('ui', 'strict')) doc = gettext(mod.__doc__).splitlines()[0] rst = listexts(_("'%s' is provided by the following " "extension:") % cmd, {ext: doc}, indent=4) rst.append('\n') rst.append(_('use "hg help extensions" for information on enabling ' 'extensions\n')) return rst rst = [] kw = opts.get('keyword') if kw: matches = topicmatch(kw) for t, title in (('topics', _('Topics')), ('commands', _('Commands')), ('extensions', _('Extensions')), ('extensioncommands', _('Extension Commands'))): if matches[t]: rst.append('%s:\n\n' % title) rst.extend(minirst.maketable(sorted(matches[t]), 1)) rst.append('\n') elif name and name != 'shortlist': i = None if unknowncmd: queries = (helpextcmd,) elif opts.get('extension'): queries = (helpext,) elif opts.get('command'): queries = (helpcmd,) else: queries = (helptopic, helpcmd, helpext, helpextcmd) for f in queries: try: rst = f(name) i = None break except error.UnknownCommand, inst: i = inst if i: raise i else: # program name if not ui.quiet: rst = [_("Mercurial Distributed SCM\n"), '\n'] rst.extend(helplist()) return ''.join(rst)
apache-2.0
3,370,541,433,408,960,500
34.75
80
0.513542
false
toslunar/chainerrl
chainerrl/links/empirical_normalization.py
1
3685
from __future__ import print_function from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import from builtins import * # NOQA from future import standard_library standard_library.install_aliases() # NOQA import chainer import numpy as np class EmpiricalNormalization(chainer.Link): """Normalize mean and variance of values based on emprical values. Args: shape (int or tuple of int): Shape of input values except batch axis. batch_axis (int): Batch axis. eps (float): Small value for stability. dtype (dtype): Dtype of input values. until (int or None): If this arg is specified, the link learns input values until the sum of batch sizes exceeds it. """ def __init__(self, shape, batch_axis=0, eps=1e-2, dtype=np.float32, until=None, clip_threshold=None): super(EmpiricalNormalization, self).__init__() dtype = np.dtype(dtype) self.batch_axis = batch_axis self.eps = dtype.type(eps) self.until = until self.clip_threshold = clip_threshold self._mean = np.expand_dims(np.zeros(shape, dtype=dtype), batch_axis) self._var = np.expand_dims(np.ones(shape, dtype=dtype), batch_axis) self.count = 0 self.register_persistent('_mean') self.register_persistent('_var') self.register_persistent('count') # cache self._cached_std_inverse = None @property def mean(self): return self.xp.squeeze(self._mean, self.batch_axis).copy() @property def std(self): xp = self.xp return xp.sqrt(xp.squeeze(self._var, self.batch_axis)) @property def _std_inverse(self): if self._cached_std_inverse is None: self._cached_std_inverse = (self._var + self.eps) ** -0.5 return self._cached_std_inverse def experience(self, x): """Learn input values without computing the output values of them""" if self.until is not None and self.count >= self.until: return if isinstance(x, chainer.Variable): x = x.array count_x = x.shape[self.batch_axis] if count_x == 0: return xp = self.xp self.count += count_x rate = x.dtype.type(count_x / self.count) mean_x = xp.mean(x, axis=self.batch_axis, keepdims=True) var_x = xp.var(x, axis=self.batch_axis, keepdims=True) delta_mean = mean_x - self._mean self._mean += rate * delta_mean self._var += rate * ( var_x - self._var + delta_mean * (mean_x - self._mean) ) # clear cache self._cached_std_inverse = None def __call__(self, x, update=True): """Normalize mean and variance of values based on emprical values. Args: x (ndarray or Variable): Input values update (bool): Flag to learn the input values Returns: ndarray or Variable: Normalized output values """ xp = self.xp mean = xp.broadcast_to(self._mean, x.shape) std_inv = xp.broadcast_to(self._std_inverse, x.shape) if update: self.experience(x) normalized = (x - mean) * std_inv if self.clip_threshold is not None: normalized = xp.clip( normalized, -self.clip_threshold, self.clip_threshold) return normalized def inverse(self, y): xp = self.xp mean = xp.broadcast_to(self._mean, y.shape) std = xp.broadcast_to(xp.sqrt(self._var + self.eps), y.shape) return y * std + mean
mit
1,164,823,547,270,884,000
30.495726
77
0.593487
false
jgiannuzzi/pypi-server
pypi_server/handlers/pypi/proxy/client.py
2
4592
# encoding: utf-8 import hashlib import logging from copy import copy from slimurl import URL from tornado.gen import coroutine, Return from tornado.httpclient import AsyncHTTPClient from tornado.ioloop import IOLoop from tornado.locks import Lock from tornado.options import options from tornado_xmlrpc.client import ServerProxy from pypi_server.cache import Cache, HOUR, MONTH from pypi_server.hash_version import HashVersion log = logging.getLogger(__name__) def normalize_package_name(name): return name.lower().replace("_", "-").replace(".", "-") class PYPIClient(object): CLIENT = None BACKEND = None THREAD_POOL = None INDEX = None XMLRPC = None LOCK = None @classmethod def configure(cls, backend, thread_pool): cls.CLIENT = AsyncHTTPClient(io_loop=IOLoop.current()) cls.BACKEND = backend cls.THREAD_POOL = thread_pool cls.XMLRPC = ServerProxy( str(copy(backend)(path="/pypi")), ) cls.LOCK = Lock() @classmethod @coroutine @Cache(HOUR, files_cache=True, ignore_self=True) def packages(cls): with (yield cls.LOCK.acquire()): index = dict( map( lambda x: (normalize_package_name(x), x), (yield cls.XMLRPC.list_packages()) ) ) log.info("Remote PYPI index updated: %d packages", len(index)) raise Return(index) @classmethod @coroutine @Cache(4 * HOUR, files_cache=True, ignore_self=True) def search(cls, names, descriptions, operator="or"): assert operator in ('or', 'and') result = yield cls.XMLRPC.search({'name': names, 'description': descriptions}, operator) raise Return(result) @classmethod @coroutine def exists(cls, name): try: real_name = yield cls.find_real_name(name) except LookupError: raise Return(False) releases = yield cls.releases(real_name) if not releases: raise Return(False) raise Return(True) @classmethod @coroutine def find_real_name(cls, name): if not options.pypi_proxy: raise LookupError("Proxying to PyPI disabled") name = normalize_package_name(name).lower() packages = yield cls.packages() real_name = packages.get(name) if real_name is None: raise LookupError("Package not found") raise Return(real_name) @classmethod @coroutine @Cache(4 * HOUR, files_cache=True, ignore_self=True) def releases(cls, name): process_versions = lambda x: set(HashVersion(i) for i in x) all_releases, current_releases = yield [ cls.XMLRPC.package_releases(name, True), cls.XMLRPC.package_releases(name) ] all_releases = process_versions(all_releases) current_releases = process_versions(current_releases) hidden_releases = all_releases - current_releases res = [] for x in current_releases: x.hidden = False res.append(x) for x in hidden_releases: x.hidden = True res.append(x) raise Return(set(res)) @classmethod @coroutine @Cache(MONTH, files_cache=True, ignore_self=True) def release_data(cls, name, version): info, files = yield [ cls.XMLRPC.release_data(str(name), str(version)), cls.XMLRPC.release_urls(str(name), str(version)) ] download_url = info.get('download_url') if download_url and not files: try: url = URL(download_url) filename = url.path.split('/')[-1] if "#" in filename: filename = filename.split("#")[0] response = yield cls.CLIENT.fetch(download_url) files = [{ 'filename': filename, 'md5_digest': hashlib.md5(response.body).hexdigest(), 'downloads': -1, 'url': download_url, 'size': len(response.body), 'comment_text': None, }] except Exception as e: files = [] log.error("Error when trying to download version %s of package %s", version, name) log.exception(e) else: files = sorted( files, key=lambda x: x['filename'] ) raise Return((info, files))
mit
4,552,339,786,821,855,700
27.7
98
0.567291
false
talha81/TACTIC-DEV
src/client/tactic_client_lib/tactic_server_stub.py
4
146424
########################################################### # # Copyright (c) 2005, Southpaw Technology # All Rights Reserved # # PROPRIETARY INFORMATION. This software is proprietary to # Southpaw Technology, and is not to be reproduced, transmitted, # or disclosed in any way without written permission. # # # # This is a stub for accessing the TACTIC server. It simplifies the access for # scripts using the client api. Thin wrapper to the client API. # These are meant to be copied to client directories. import datetime import re import xmlrpclib, os, getpass, shutil, httplib, sys, urllib, types, hashlib class TacticApiException(Exception): pass ''' Class: TacticServerStub It allows client to send commands to and receive information from the TACTIC server.''' class TacticServerStub(object): ''' Constructor: TacticServerStub ''' def __init__(my, login=None, setup=True, protocol=None, server=None, project=None, ticket=None, user=None, password=""): '''Function: __init__(login=None, setup=True, protocol=None, server=None, project=None, ticket=None, user=None, password="") Initialize the TacticServerStub @keyparam: login - login_code setup - if set to True, it runs the protocol set-up protocol - xmlrpc or local. it defaults to xmlrpc server - tactic server project - targeted project ticket - login ticket key user - tactic login_code that overrides the login password - password for login''' # initialize some variables if user: login = user my.login = login my.project_code = None my.server = None my.has_server = False my.server_name = None my.ticket = None # the ticket sent to the server my.login_ticket = None my.transaction_ticket = None # autodetect protocol if not protocol: protocol = 'xmlrpc' try: import tactic from pyasm.web import WebContainer web = WebContainer.get_web() if web: server_name = web.get_http_host() if server_name: protocol = 'local' except ImportError: pass my.protocol = protocol # if all of the necessary parameters are set, then if server and (ticket or login) and project: my.set_server(server) my.set_project(project) if ticket: my.set_ticket(ticket) elif login: # else try with no password (api_require_password) ticket = my.get_ticket(login, password) my.set_ticket(ticket) elif setup: my._setup(protocol) # cached handoff dir my.handoff_dir = None '''if the function does not exist, call this and make an attempt ''' def _call_missing_method(my, *args): # convert from tuple to sequence args = [x for x in args] args.insert(0, my.ticket) return my.server.missing_method(my.missing_method_name, args) ''' DISABLING for now def __getattr__(my, attr): my.missing_method_name = attr return my._call_missing_method ''' def test_error(my): return my.server.test_error(my.ticket) def get_protocol(my): '''Function: get_protocol() @return: string - local or xmlrpc''' return my.protocol def set_protocol(my, protocol): '''Function: get_protocol() @params string - local or xmlrpc''' my.protocol = protocol def set_ticket(my, ticket): '''set the login ticket''' my.set_login_ticket(ticket) # reset the handoff_dir my.handoff_dir = None def set_login_ticket(my, ticket): '''Function: set_login_ticket(ticket) Set the login ticket with the ticket key''' my.login_ticket = ticket my.set_transaction_ticket(ticket) def set_transaction_ticket(my, ticket): if not my.project_code: my.project_code = '' my.ticket = { 'ticket': ticket, 'project': my.project_code, 'language': 'python' } """ if my.project_code: my.ticket = { 'ticket': ticket, 'project': my.project_code, 'language': 'python' } else: raise TacticApiException("No project has been set. Please set a project using method TacticServerStub.set_project()") """ my.transaction_ticket = ticket def get_transaction_ticket(my): return my.transaction_ticket def get_login_ticket(my): return my.login_ticket def get_login(my): return my.login def set_server(my, server_name): '''Function: set_server(server_name) Set the server name for this XML-RPC server''' my.server_name = server_name if my.protocol == "local": from pyasm.prod.service import ApiXMLRPC my.server = ApiXMLRPC() my.server.set_protocol('local') my.has_server = True return if (my.server_name.startswith("http://") or my.server_name.startswith("https://")): url = "%s/tactic/default/Api/" % my.server_name else: url = "http://%s/tactic/default/Api/" % my.server_name #url = "http://localhost:8081/" # TODO: Not implmeneted: This is needed for isolation of transactions #if my.transaction_ticket: # url = '%s%s' % (url, my.transaction_ticket) my.server = xmlrpclib.Server(url, allow_none=True) try: pass #print my.server.test(my.ticket) except httplib.InvalidURL: raise TacticApiException("You have supplied an invalid server name [%s]" % my.server_name) my.has_server = True # WARNING: this is changing code in the xmlrpclib library. This # library is not sending a proper user agent. Hacking it in # so that at least the OS is sent if os.name == "nt": user_agent = 'xmlrpclib.py (Windows)' else: user_agent = 'xmlrpclib.py (Linux)' xmlrpclib.Transport.user_agent = user_agent def get_server_name(my): return my.server_name def get_server(my): return my.server def set_project(my, project_code): '''Function: set_project(project_code) Set the project code''' my.project_code = project_code if my.protocol == 'local': from pyasm.biz import Project Project.set_project(project_code) #my.set_project_state(project_code) # switch the project code on the ticket my.set_transaction_ticket(my.transaction_ticket) def get_project(my): return my.project_code def set_palette(my, palette): my.server.set_palette(palette) #----------------------------------- # API FUNCTIONS # # # # Building earch type functions # def build_search_type(my, search_type, project_code=None): '''API Function: build_search_type(search_type, project_code=None) Convenience method to build a search type from its components. It is a simple method that build the proper format for project scoped search types. A full search type has the form: prod/asset?project=bar. It uniquely defines a type of sobject in a project. @param: search_type - the unique identifier of a search type: ie prod/asset project_code (optional) - an optional project code. If this is not included, the project from get_ticket() is added. @return: search type string @example [code] search_type = "prod/asset" full_search_type = server.build_search_type(search_type) [/code] ''' # do not append project for sthpw/* search_type if search_type.startswith('sthpw/'): return search_type if not project_code: project_code = my.project_code assert project_code return "%s?project=%s" % (search_type, project_code) def build_search_key(my, search_type, code, project_code=None, column='code'): '''API Function: build_search_key(search_type, code, project_code=None, column='code') Convenience method to build a search key from its components. A search_key uniquely indentifies a specific sobject. This string that is returned is heavily used as an argument in the API to identify an sobject to operate one A search key has the form: "prod/shot?project=bar&code=XG001" where search_type = "prod/shot", project_code = "bar" and code = "XG001" @param: search_type - the unique identifier of a search type: ie prod/asset code - the unique code of the sobject @keyparam: project_code - an optional project code. If this is not included, the project from get_ticket() is added. @return: string - search key @example: [code] search_type = "prod/asset" code = "chr001" search_key = server.build_search_key(search_type, code) e.g. search_key = prod/asset?project=code=chr001 [/code] [code] search_type = "sthpw/login" code = "admin" search_key = server.build_search_key(search_type, code, column='code') e.g. search_key = sthpw/login?code=admin [/code] ''' if not project_code: if not search_type.startswith("sthpw/"): project_code = my.project_code assert project_code if search_type.find('?') == -1: if search_type.startswith('sthpw/'): search_key = "%s?%s=%s" %(search_type, column, code) else: search_key = "%s?project=%s&%s=%s" % (search_type, project_code, column, code) else: search_key = "%s&%s=%s" %(search_type, column, code) return search_key def split_search_key(my, search_key): '''API Function: split_search_key(search_key) Convenience method to split a search_key in into its search_type and search_code/id components. Note: only accepts the new form prod/asset?project=sample3d&code=chr001 @param: search_key - the unique identifier of a sobject @return: tuple - search type, search code/id ''' if search_key.find('&') != -1: search_type, code = search_key.split('&') else: # non project-based search_key search_type, code = search_key.split('?') codes = code.split('=') assert len(codes) == 2; return search_type, codes[1] def get_home_dir(my): '''API Function: get_home_dir() OS independent method to Get the home directory of the current user. @return: string - home directory ''' if os.name == "nt": dir = "%s%s" % (os.environ.get('HOMEDRIVE'), os.environ.get('HOMEPATH')) if os.path.exists(dir): return dir return os.path.expanduser('~') def create_resource_path(my, login=None): '''DEPRECATED: use create_resource_paths() or get_resource_path() Create the resource path''' # get the current user if not login: login = getpass.getuser() filename = "%s.tacticrc" % login # first check home directory dir = my.get_home_dir() is_dir_writeable = os.access(dir, os.W_OK) and os.path.isdir(dir) # if the home directory is not existent or writable, # use the temp directory if not os.path.exists(dir) or not is_dir_writeable: if os.name == "nt": dir = "C:/sthpw/etc" else: dir = "/tmp/sthpw/etc" if not os.path.exists(dir): os.makedirs(dir) else: dir = "%s/.tactic/etc" % dir if not os.path.exists(dir): os.makedirs(dir) # if an old resource path does exist, then remove it if os.name == "nt": old_dir = "C:/sthpw/etc" else: old_dir = "/tmp/sthpw/etc" old_path = "%s/%s" % (old_dir, filename) if os.path.exists(old_path): os.unlink(old_path) print "Removing deprectated resource file [%s]" % old_path path = "%s/%s" % (dir,filename) return path def create_resource_paths(my, login=None): '''Get the 1 or possiblly 2 the resource paths for creation''' # get the current user os_login = getpass.getuser() if not login: login = os_login filename = "%s.tacticrc" % login filename2 = "%s.tacticrc" % os_login # first check home directory dir = my.get_home_dir() is_dir_writeable = os.access(dir, os.W_OK) and os.path.isdir(dir) # if the home directory is not existent or writable, # use the temp directory if not os.path.exists(dir) or not is_dir_writeable: if os.name == "nt": dir = "C:/sthpw/etc" else: dir = "/tmp/sthpw/etc" if not os.path.exists(dir): os.makedirs(dir) else: dir = "%s/.tactic/etc" % dir if not os.path.exists(dir): os.makedirs(dir) # if an old resource path does exist, then remove it if os.name == "nt": old_dir = "C:/sthpw/etc" else: old_dir = "/tmp/sthpw/etc" old_path = "%s/%s" % (old_dir, filename) if os.path.exists(old_path): os.unlink(old_path) print "Removing deprectated resource file [%s]" % old_path path = "%s/%s" % (dir,filename) path2 = "%s/%s" % (dir,filename2) paths = [path] if path2 != path: paths.append(path2) return paths def get_resource_path(my, login=None): '''API Function: get_resource_path(login=None) Get the resource path of the current user. It differs from create_resource_paths() which actually create dir. The resource path identifies the location of the file which is used to cache connection information. An exmple of the contents is shown below: [code] login=admin server=localhost ticket=30818057bf561429f97af59243e6ef21 project=unittest [/code] The contents in the resource file represent the defaults to use when connection to the TACTIC server, but may be overriden by the API methods: set_ticket(), set_server(), set_project() or the environment variables: TACTIC_TICKET, TACTIC_SERVER, and TACTIC_PROJECT Typically this method is not explicitly called by API developers and is used automatically by the API server stub. It attempts to get from home dir first and then from temp_dir is it fails. @param: login (optional) - login code. If not provided, it gets the current system user @return: string - resource file path ''' # get the current user if not login: login = getpass.getuser() filename = "%s.tacticrc" % login # first check home directory dir = my.get_home_dir() is_dir_writeable = os.access(dir, os.W_OK) and os.path.isdir(dir) path = "%s/.tactic/etc/%s" % (dir,filename) # if the home directory path does not exist, check the temp directory if not is_dir_writeable or not os.path.exists(path): if os.name == "nt": dir = "C:/sthpw/etc" else: dir = "/tmp/sthpw/etc" else: dir = "%s/.tactic/etc" % dir path = "%s/%s" % (dir,filename) return path def get_ticket(my, login, password): '''API Function: get_ticket(login, password) Get an authentication ticket based on a login and password. This function first authenticates the user and the issues a ticket. The returned ticket is used on subsequent calls to the client api @param: login - the user that is used for authentications password - the password of that user @return: string - ticket key ''' return my.server.get_ticket(login, password) def get_info_from_user(my, force=False): '''API Function: get_info_from_user(force=False) Get input from the user about the users environment. Questions asked pertain to the location of the tactic server, the project worked on and the user's login and password. This information is stored in an .<login>.tacticrc file. @keyparam: force - if set to True, it will always ask for new infomation from the command prompt again ''' if my.protocol == "local": return old_server_name = my.server_name old_project_code = my.project_code old_ticket = my.login_ticket old_login = my.login default_login = getpass.getuser() if not force and old_server_name and old_project_code: return print print "TACTIC requires the following connection information:" print server_name = raw_input("Enter name of TACTIC server (%s): " % old_server_name) if not server_name: server_name = old_server_name print login = raw_input("Enter user name (%s): " % default_login) if not login: login = default_login print if login == old_login and old_ticket: password = getpass.getpass( "Enter password (or use previous ticket): ") else: password = getpass.getpass("Enter password: ") print project_code = raw_input("Project (%s): " % old_project_code) if not project_code: project_code = old_project_code my.set_server(server_name) # do the actual work if login != old_login or password: ticket = my.get_ticket(login, password) print "Got ticket [%s] for [%s]" % (ticket, login) else: ticket = old_ticket # commit info to a file paths = my.create_resource_paths(login) # this is needed when running get_ticket.py my.login = login for path in paths: file = open(path, 'w') file.write("login=%s\n" % login) file.write("server=%s\n" % server_name) file.write("ticket=%s\n" % ticket) if project_code: file.write("project=%s\n" % project_code) file.close() print "Saved to [%s]" % path # set up the server with the new information my._setup(my.protocol) # # Simple Ping Test # def ping(my): return my.server.ping(my.ticket) def fast_ping(my): return my.server.fast_ping(my.ticket) def fast_query(my, search_type, filters=[], limit=None): results = my.server.fast_query(my.ticket, search_type, filters, limit) return eval(results) def test_speed(my): return my.server.test_speed(my.ticket) def get_connection_info(my): '''simple test to get connection info''' return my.server.get_connection_info(my.ticket) # # Logging facilities # def log(my, level, message, category="default"): '''API Function: log(level, message, category="default") Log a message in the logging queue. It is often difficult to see output of a trigger unless you are running the server in debug mode. In production mode, the server sends the output to log files. The log files are general buffered. It cannot be predicted exactly when buffered output will be dumped to a file. This log() method will make a request to the server. The message will be immediately stored in the database in the debug log table. @param: level - critical|error|warning|info|debug - arbitrary debug level category message - freeform string describing the entry @keyparam: category - a label for the type of message being logged. It defaults to "default" ''' return my.server.log(my.ticket, level,message, category) def log_message(my, key, message, status="", category="default"): '''API Function: log_message(key, message, status=None, category="default") Log a message which will be seen by all who are subscribed to the message "key". Messages are often JSON strings of data. @params key - unique key for this message message - the message to be sent @keyparam status - arbitrary status for this message category - value to categorize this message @return string - "OK" ''' return my.server.log_message(my.ticket, key, message, status, category) def subscribe(my, key, category="default"): '''API Function: subscribe(key, category="default") Allow a user to subscribe to this message key. All messages belonging to the corresponding key will be available to users subscribed to it. @params key - unique key for this message @keyparam category - value to categorize this message @return subscription sobject ''' return my.server.subscribe(my.ticket, key, category) # # Transaction methods # def set_state(my, name, value): '''Set a state for this transaction @params name: name of state variable value: value of state variable ''' return my.server.set_state(my.ticket, name, value) def set_project_state(my, project): '''Covenience function to set the project state @params project: code of the project to set the state to ''' return my.set_state("project", project) def generate_ticket(my): '''API Function: generate_ticket() Ask the server to generate a ticket explicity used for your own commands @return - a string representing the transaction ticket ''' return my.server.generate_ticket(my.ticket) def start(my, title='', description='', transaction_ticket=''): '''API Function: start(title, description='') Start a transaction. All commands using the client API are bound in a transaction. The combination of start(), finish() and abort() makes it possible to group a series of API commands in a single transaction. The start/finish commands are not necessary for query operations (like query(...), get_snapshot(...), etc). @keyparam: title - the title of the command to be executed. This will show up on transaction log description - the description of the command. This is more detailed. transaction_ticket - optionally, one can provide the transaction ticket sequence @example: A full transaction inserting 10 shots. If an error occurs, all 10 inserts will be aborted. [code] server.start('Start adding shots') try: for i in range(0,10): server.insert("prod/shot", { 'code': 'XG%0.3d'%i } ) except: server.abort() else: server.finish("10 shots added") [/code] ''' my.get_info_from_user() if not my.has_server: raise TacticApiException("No server connected. If running a command line script, please execute get_ticket.py") ticket = my.server.start(my.login_ticket, my.project_code, \ title, description, transaction_ticket) my.set_transaction_ticket(ticket) #client_version = my.get_client_version() #server_version = my.get_server_version() # Switch to using api versions client_api_version = my.get_client_api_version() server_api_version = my.get_server_api_version() if client_api_version != server_api_version: raise TacticApiException("Server version [%s] does not match client api version [%s]" % (server_api_version, client_api_version)) my.set_server(my.server_name) # clear the handoff dir my.handoff_dir = None return ticket def finish(my, description=''): '''API Function: finish() End the current transaction and cleans it up @params: description: this will be recorded in the transaction log as the description of the transction @example: A full transaction inserting 10 shots. If an error occurs, all 10 inserts will be aborted. [code] server.start('Start adding shots') try: for i in range(0,10): server.insert("prod/shot", { 'code': 'XG%0.3d'%i } ) except: server.abort() else: server.finish("10 shots added") [/code] ''' if my.protocol == "local": return result = my.server.finish(my.ticket, description) my.set_login_ticket(my.login_ticket) #my.ticket = None #my.transaction_ticket = None return result def abort(my, ignore_files=False): '''API Function: abort(ignore_files=False) Abort the transaction. This undos all commands that occurred from the beginning of the transactions @keyparam: ignore_files: (boolean) - determines if any files moved into the repository are left as is. This is useful for very long processes where it is desireable to keep the files in the repository even on abort. @example: A full transaction inserting 10 shots. If an error occurs, all 10 inserts will be aborted. [code] server.start('Start adding shots') try: for i in range(0,10): server.insert("prod/shot", { 'code': 'XG%0.3d'%i } ) except: server.abort() else: server.finish("10 shots added") [/code] ''' if my.protocol == "local": return result = my.server.abort(my.ticket, ignore_files) my.ticket = None my.transaction_ticket = None return result # FIXME: have to fix these because these are post transaction!! def undo(my, transaction_ticket=None, transaction_id=None, ignore_files=False): '''API Function: undo(transaction_ticket=None, transaction_id=None, ignore_files=False) undo an operation. If no transaction id is given, then the last operation of this user on this project is undone @keyparam: transaction_ticket - explicitly undo a specific transaction transaction_id - explicitly undo a specific transaction by id ignore_files - flag which determines whether the files should also be undone. Useful for large preallcoated checkins. ''' if my.protocol == "local": return return my.server.undo(my.ticket, transaction_ticket, transaction_id, ignore_files) def redo(my, transaction_ticket=None, transaction_id=None): '''API Function: redo(transaction_ticket=None, transaction_id=None) Redo an operation. If no transaction id is given, then the last undone operation of this user on this project is redone @keyparam: transaction_ticket - explicitly redo a specific transaction transaction_id - explicitly redo a specific transaction by id ''' if my.protocol == "local": return return my.server.redo(my.ticket, transaction_ticket, transaction_id) # # Low Level Database methods # def get_column_info(my, search_type): '''API Function: get_column_info(search_type) Get column information of the table given a search type @param: search_type - the key identifying a type of sobject as registered in the search_type table. @return - a dictionary of info for each column ''' results = my.server.get_column_info(my.ticket, search_type) return results def get_table_info(my, search_type): '''API Function: get_table_info(search_type) Get column information of the table given a search type @param: search_type - the key identifying a type of sobject as registered in the search_type table. @return - a dictionary of info for each column ''' results = my.server.get_table_info(my.ticket, search_type) return results def get_related_types(my, search_type): '''API Function: get_related_types(search_type) Get related search types given a search type @param: search_type - the key identifying a type of sobject as registered in the search_type table. @return - list of search_types ''' results = my.server.get_related_types(my.ticket, search_type) return results def query(my, search_type, filters=[], columns=[], order_bys=[], show_retired=False, limit=None, offset=None, single=False, distinct=None, return_sobjects=False): '''API Function: query(search_type, filters=[], columns=[], order_bys=[], show_retired=False, limit=None, offset=None, single=False, distinct=None, return_sobjects=False) General query for sobject information @param: search_type - the key identifying a type of sobject as registered in the search_type table. @keyparam: filters - an array of filters to alter the search columns - an array of columns whose values should be retrieved order_bys - an array of order_by to alter the search show_retired - sets whether retired sobjects are also returned limit - sets the maximum number of results returned single - returns only a single object distinct - specify a distinct column return_sobjects - return sobjects instead of dictionary. This works only when using the API on the server. @return: list of dictionary/sobjects - Each array item represents an sobject and is a dictionary of name/value pairs @example: [code] filters = [] filters.append( ("code", "XG002") ) order_bys = ['timestamp desc'] columns = ['code'] server.query(ticket, "prod/shot", filters, columns, order_bys) [/code] The arguments "filters", "columns", and "order_bys" are optional The "filters" argument is a list. Each list item represents an individual filter. The forms are as follows: [code] (column, value) -> where column = value (column, (value1,value2)) -> where column in (value1, value2) (column, op, value) -> where column op value where op is ('like', '<=', '>=', '>', '<', 'is', '~', '!~','~*','!~*) (value) -> where value [/code] ''' #return my.server.query(my.ticket, search_type, filters, columns, order_bys, show_retired, limit, offset, single, return_sobjects) results = my.server.query(my.ticket, search_type, filters, columns, order_bys, show_retired, limit, offset, single, distinct, return_sobjects) if not return_sobjects and isinstance(results, basestring): results = eval(results) return results def insert(my, search_type, data, metadata={}, parent_key=None, info={}, use_id=False, triggers=True): '''API Function: insert(search_type, data, metadata={}, parent_key=None, info={}, use_id=False, triggers=True) General insert for creating a new sobject @param: search_type - the search_type attribute of the sType data - a dictionary of name/value pairs which will be used to update the sobject defined by the search_key. parent_key - set the parent key for this sobject @keyparam: metadata - a dictionary of values that will be stored in the metadata attribute if available info - a dictionary of info to pass to the ApiClientCmd use_id - use id in the returned search key triggers - boolean to fire trigger on insert @return: dictionary - represent the sobject with it's current data @example: insert a new asset [code] search_type = "prod/asset" data = { 'code': chr001, 'description': 'Main Character' } server.insert( search_type, data ) [/code] insert a new note with a shot parent [code] # get shot key shot_key = server.build_search_key(search_type='prod/shot',code='XG001') data = { 'context': 'model', 'note': 'This is a modelling note', 'login': server.get_login() } server.insert( search_type, data, parent_key=shot_key) [/code] insert a note without firing triggers [code] search_type = "sthpw/note" data = { 'process': 'roto', 'context': 'roto', 'note': 'The keys look good.', 'project_code': 'art' } server.insert( search_type, data, triggers=False ) [/code] ''' return my.server.insert(my.ticket, search_type, data, metadata, parent_key, info, use_id, triggers) def update(my, search_key, data={}, metadata={}, parent_key=None, info={}, use_id=False, triggers=True): '''API Function: update(search_key, data={}, metadata={}, parent_key=None, info={}, use_id=False, triggers=True) General update for updating sobject @param: search_key - a unique identifier key representing an sobject. Note: this can also be an array, in which case, the data will be updated to each sobject represented by this search key @keyparam: data - a dictionary of name/value pairs which will be used to update the sobject defined by the search_key Note: this can also be an array. Each data dictionary element in the array will be applied to the corresponding search key parent_key - set the parent key for this sobject info - a dictionary of info to pass to the ApiClientCmd metadata - a dictionary of values that will be stored in the metadata attribute if available use_id - use id in the returned search key triggers - boolean to fire trigger on update @return: dictionary - represent the sobject with its current data. If search_key is an array, This will be an array of dictionaries ''' return my.server.update(my.ticket, search_key, data, metadata, parent_key, info, use_id, triggers) def update_multiple(my, data, triggers=True): '''API Function: update_multiple(data, triggers=True) Update for several sobjects with different data in one function call. The data structure contains all the information needed to update and is formated as follows: data = { search_key1: { column1: value1, column2: value2 } search_key2: { column1: value1, column2: value2 } } @params: data - data structure containing update information for all sobjects @keyparam: data - a dictionary of name/value pairs which will be used to update the sobject defined by the search_key Note: this can also be an array. Each data dictionary element in the array will be applied to the corresponding search key triggers - boolean to fire trigger on insert @return: None ''' return my.server.update_multiple(my.ticket, data, triggers) def insert_multiple(my, search_type, data, metadata=[], parent_key=None, use_id=False, triggers=True): '''API Function: insert_multiple(data, metadata=[], parent_key=None, use_id=False, triggers=True) Insert for several sobjects in one function call. The data structure contains all the infon needed to update and is formated as follows: data = [ { column1: value1, column2: value2, column3: value3 }, { column1: value1, column2: value2, column3: value3 } } metadata = [ { color: blue, height: 180 }, { color: orange, height: 170 } ] @params: search_type - the search_type attribute of the sType data - a dictionary of name/value pairs which will be used to update the sobject defined by the search_key Note: this can also be an array. Each data dictionary element in the array will be applied to the corresponding search key @keyparam: parent_key - set the parent key for this sobject use_id - boolean to control if id is used in the search_key in returning sobject dict triggers - boolean to fire trigger on insert @return: a list of all the inserted sobjects ''' return my.server.insert_multiple(my.ticket, search_type, data, metadata, parent_key, use_id, triggers) def insert_update(my, search_key, data, metadata={}, parent_key=None, info={}, use_id=False, triggers=True): '''API Function: insert_update(search_key, data, metadata={}, parent_key=None, info={}, use_id=False, triggers=True) Insert if the entry does not exist, update otherwise @param: search_key - a unique identifier key representing an sobject. data - a dictionary of name/value pairs which will be used to update the sobject defined by the search_key @keyparam: metadata - a dictionary of values that will be stored in the metadata attribute if available parent_key - set the parent key for this sobject info - a dictionary of info to pass to the ApiClientCmd use_id - use id in the returned search key triggers - boolean to fire trigger on insert @return: dictionary - represent the sobject with its current data. ''' return my.server.insert_update(my.ticket, search_key, data, metadata, parent_key, info, use_id, triggers) def get_unique_sobject(my, search_type, data={}): '''API Function: get_unique_sobject(search_type, data={}) This is a special convenience function which will query for an sobject and if it doesn't exist, create it. It assumes that this object should exist and spares the developer the logic of having to query for the sobject, test if it doesn't exist and then create it. @param: search_type - the type of the sobject data - a dictionary of name/value pairs that uniquely identify this sobject @return: sobject - unique sobject matching the critieria in data ''' results = my.server.get_unique_sobject(my.ticket, search_type, data) return results def get_column_names(my, search_type): '''API Function: get_column_names(search_type) This method will get all of the column names associated with a search type @param: search_type - the search type used to query the columns for @return list of columns names ''' return my.server.get_column_names(my.ticket, search_type) # # Expression methods # def eval(my, expression, search_keys=[], mode=None, single=False, vars={}, show_retired=False): '''API Function: eval(expression, search_keys=[], mode=None, single=False, vars={}, show_retired=False) Evaluate the expression. This expression uses the TACTIC expression language to retrieve results. For more information, refer to the expression language documentation. @param: expression - string expression @keyparam: search_keys - the starting point for the expression. mode - string|expression - determines the starting mode of the expression single - True|False - True value forces a single return value vars - user defined variable show_retired - defaults to False to not return retired items @return: results of the expression. The results depend on the exact nature of the expression. @example: #1. Search for snapshots with context beginning with 'model' for the asset with the search key 'prod/asset?project=sample3d&id=96' [code] server = TacticServerStub.get() exp = "@SOBJECT(sthpw/snapshot['context','EQ','^model'])" result = server.eval(exp, search_keys=['prod/asset?project=sample3d&id=96']) [/code] Please refer to the expression language documentation for numerous examples on how to use the expression language. ''' #return my.server.eval(my.ticket, expression, search_keys, mode, single, vars) results = my.server.eval(my.ticket, expression, search_keys, mode, single, vars, show_retired) try: return eval(results) except: return results # # Higher Level Object methods # def create_search_type(my, search_type, title, description="", has_pipeline=False): '''API Function: create_search_type(search_type, title, description="", has_pipeline=False) Create a new search type @param: search_type - Newly defined search_type title - readable title to display this search type as @keyparam: description - a brief description of this search type has_pipeline - determines whether this search type goes through a pipeline. Simply puts a pipeline_code column in the table. @return string - the newly created search type ''' return my.server.create_search_type(my.ticket, search_type, title, description, has_pipeline) def add_column_to_search_type(my, search_type, column_name, column_type): '''Adds a new column to the search type @params search_type - the search type that the new column will be added to column_name - the name of the column to add to the database column_type - the type of the column to add to the database @return True if column was created, False if column exists ''' return my.server.add_column_to_search_type(my.ticket, search_type, column_name, column_type) def get_by_search_key(my, search_key): '''API Function: get_by_search_key(search_key) Get the info on an sobject based on search key @param: search_key - the key identifying a type of sobject as registered in the search_type table. @return: list of dictionary - sobjects that represent values of the sobject in the form of name:value pairs ''' return my.server.get_by_search_key(my.ticket, search_key) def get_by_code(my, search_type, code): '''API Function: get_by_code(search_type, search_code) Get the info on an sobject based on search code @param: search_type - the search_type of the sobject to search for code - the code of the sobject to search for @return: sobject - a dictionary that represents values of the sobject in the form name/value pairs ''' return my.server.get_by_code(my.ticket, search_type, code) def delete_sobject(my, search_key, include_dependencies=False): '''API Function: delete_sobject(search_key, include_dependencies=False) Invoke the delete method. Note: this function may fail due to dependencies. Tactic will not cascade delete. This function should be used with extreme caution because, if successful, it will permanently remove the existence of an sobject @param: search_key - a unique identifier key representing an sobject. Note: this can also be an array. @keyparam: include_dependencies - True/False @return: dictionary - a sobject that represents values of the sobject in the form name:value pairs ''' return my.server.delete_sobject(my.ticket, search_key, include_dependencies) def retire_sobject(my, search_key): '''API Function: retire_sobject(search_key) Invoke the retire method. This is preferred over delete_sobject if you are not sure whether other sobjects has dependency on this. @param: search_key - the unige key identifying the sobject. @return: dictionary - sobject that represents values of the sobject in the form name:value pairs ''' return my.server.retire_sobject(my.ticket, search_key) def reactivate_sobject(my, search_key): '''API Function: reactivate_sobject(search_key) Invoke the reactivate method. @param: search_key - the unige key identifying the sobject. @return: dictionary - sobject that represents values of the sobject in the form name:value pairs ''' return my.server.reactivate_sobject(my.ticket, search_key) def set_widget_setting(my, key, value): '''API Function: set_widget_settings(key, value) Set widget setting for current user and project @param key - unique key to identify this setting value - value the setting should be set to @return None ''' my.set_widget_setting(my.ticket, key, value) def get_widget_setting(my, key): '''API Function: set_widget_settings(key, value) Get widget setting for current user and project @param key - unique key to identify this setting @return value of setting ''' return my.get_widget_setting(my.ticket, key) # # sType Hierarchy methods # def get_parent(my, search_key, columns=[], show_retired=False): '''API Function: get_parent(search_key, columns=[], show_retired=True) Get the parent of an sobject. @param: search_key - a unique identifier key representing an sobject @keyparam: columns - the columns that will be returned in the sobject show_retired - it defaults to False so it does not show retired parent if that's the case @return: dictionary - the parent sobject ''' return my.server.get_parent(my.ticket, search_key, columns, show_retired) def get_all_children(my, search_key, child_type, filters=[], columns=[]): '''API Function: get_all_children(search_key, child_type, filters=[], columns=[]) Get all children of a particular child type of an sobject @param: search_key - a unique identifier key representing an sobject child_type - the search_type of the children to search for @keyparam: filters - extra filters on the query : see query method for examples columns - list of column names to be included in the returned dictionary @return: list of dictionary - a list of sobjects dictionaries ''' #filters = [] return my.server.get_all_children(my.ticket, search_key, child_type, filters, columns) def get_parent_type(my, search_key): '''API Function: get_parent_type(search_key) Get of the parent search type @param: search_key - a unique identifier key representing an sobject @return: list - a list of child search_types ''' return my.server.get_parent_type(my.ticket, search_key) def get_child_types(my, search_key): '''API Function: get_child_types(search_key) Get all the child search types @param: search_key - a unique identifier key representing an sobject @return: list - the child search types ''' return my.server.get_child_types(my.ticket, search_key) def get_types_from_instance(my, instance_type): '''API Function: get_types_from_instance(instance_type) Get the connector types from an instance type @param: instance_type - the search type of the instance @return: tuple - (from_type, parent_type) a tuple with the from_type and the parent_type. The from_type is the connector type and the parent type is the search type of the parent of the instance ''' return my.server.get_types_from_instance(my.ticket, instance_type) def connect_sobjects(my, src_sobject, dst_sobject, context='default'): '''API Function: connect_sobjects(src_sobject, dst_sobject, context='default') Connect two sobjects together @param: src_sobject - the original sobject from which the connection starts dst_sobject - the sobject to which the connection connects to @keyparam: context - an arbirarty parameter which defines type of connection @return: dictionary - the last connection sobject created ''' return my.server.connect_sobjects(my.ticket, src_sobject, dst_sobject, context) def get_connected_sobjects(my, src_sobject, context='default'): '''API Function: get_connected_sobjects(src_sobject, context='default') Get all of the connected sobjects @param: src_sobject - the original sobject from which the connection starts @keyparam: context - an arbitrary parameter which defines type of connection @return: list - a list of connected sobjects ''' return my.server.get_connected_sobjects(my.ticket, src_sobject, context) def get_connected_sobject(my, src_sobject, context='default'): '''API Function: get_connected_sobject(src_sobject, context='default') Get the connected sobject @params src_sobject - the original sobject from which the connection starts @keyparam: context - an arbirarty parameter which defines type of connection @return: dict - a single connected sobject ''' return my.server.get_connected_sobject(my.ticket, src_sobject, context) # # upload/download methods # def download(my, url, to_dir=".", filename='', md5_checksum=""): '''API Function: download(my, url, to_dir=".", filename='', md5_checksum="") Download a file from a given url @param: url - the url source location of the file @keyparam: to_dir - the directory to download to filename - the filename to download to, defaults to original filename md5_checksum - an md5 checksum to match the file against @return: string - path of the file donwloaded ''' # use url filename by default if not filename: filename = os.path.basename(url) # download to temp_dir #if not to_dir: # to_dir = my.get_tmpdir() # make sure the directory exists if not os.path.exists(to_dir): os.makedirs(to_dir) to_path = "%s/%s" % (to_dir, filename) # check if this file is already downloaded. if so, skip if os.path.exists(to_path): # if it exists, check the MD5 checksum if md5_checksum: if my._md5_check(to_path, md5_checksum): print "skipping '%s', already exists" % to_path return to_path else: # always download if no md5_checksum available pass f = urllib.urlopen(url) file = open(to_path, "wb") file.write( f.read() ) file.close() f.close() # check for downloaded file # COMMENTED OUT for now since it does not work well with icons #if md5_checksum and not my._md5_check(to_path, md5_checksum): # raise TacticException('Downloaded file [%s] in local repo failed md5 check. This file may be missing on the server or corrupted.'%to_path) """ print "starting download" try: import urllib2 file = open(to_path, "wb") req = urllib2.urlopen(url) try: while True: buffer = req.read(1024*100) print "read: ", len(buffer) if not buffer: break file.write( buffer ) finally: print "closing ...." req.close() file.close() except urllib2.URLError, e: raise Exception('%s - %s' % (e,url)) print "... done download" """ return to_path def upload_file(my, path, base_dir=None): '''API Function: upload_file(path) Use http protocol to upload a file through http @param: path - the name of the file that will be uploaded ''' from common import UploadMultipart upload = UploadMultipart() upload.set_ticket(my.transaction_ticket) if my.server_name.startswith("http://") or my.server_name.startswith("https://"): upload_server_url = "%s/tactic/default/UploadServer/" % my.server_name else: upload_server_url = "http://%s/tactic/default/UploadServer/" % my.server_name if base_dir: basename = os.path.basename(path) dirname = os.path.dirname(path) if not path.startswith(dirname): raise TacticApiException("Path [%s] does not start with base_dir [%s]" % (path, base_dir)) base_dir = base_dir.rstrip("/") sub_dir = dirname.replace("%s/" % base_dir, "") if sub_dir: upload.set_subdir(sub_dir) upload.set_upload_server(upload_server_url) #upload.set_subdir("blah") upload.execute(path) # upload a file #filename = os.path.basename(path) #file = open(path, 'rb') #data = xmlrpclib.Binary( file.read() ) #file.close() #return my.server.upload_file(my.transaction_ticket, filename, data) def upload_group(my, path, file_range): '''uses http protocol to upload a sequences of files through HTTP @params path - the name of the file that will be uploaded file_range - string describing range of frames in the form '1-5/1' ''' start, end = file_range.split("-") start = int(start) end = int(end) if path.find("####") != -1: path = path.replace("####", "%0.4d") # TODO: add full range functionality here for frame in range(start, end+1): full_path = path % frame my.upload_file(full_path) # file group functions def _get_file_range(my, file_range): '''get the file_range''' frame_by = 1 if file_range.find("/") != -1: file_range, frame_by = file_range.split("/") frame_by = int(frame_by) frame_start, frame_end = file_range.split("-") frame_start = int(frame_start) frame_end = int(frame_end) return frame_start, frame_end, frame_by def _expand_paths(my, file_path, file_range): '''expands the file paths, replacing # as specified in the file_range @param - file_path with #### or %0.4d notation @file_range - a tuple''' file_paths = [] frame_start, frame_end, frame_by = my._get_file_range(file_range) # support %0.4d notation if file_path.find('#') == -1: for i in range(frame_start, frame_end+1, frame_by): expanded = file_path % i file_paths.append( expanded ) else: # find out the number of #'s in the path padding = len( file_path[file_path.index('#'):file_path.rindex('#') ])+1 for i in range(frame_start, frame_end+1, frame_by): expanded = file_path.replace( '#'*padding, str(i).zfill(padding) ) file_paths.append(expanded) return file_paths # # Checkin/out methods # def create_snapshot(my, search_key, context, snapshot_type="file", description="No description", is_current=True, level_key=None, is_revision=False, triggers=True): '''API Function: create_snapshot(search_key, context, snapshot_type="file", description="No description", is_current=True, level_key=None, is_revision=False, triggers=True ) Create an empty snapshot @param: search_key - a unique identifier key representing an sobject context - the context of the checkin @keyparam: snapshot_type - [optional] descibes what kind of a snapshot this is. More information about a snapshot type can be found in the prod/snapshot_type sobject description - [optional] optional description for this checkin is_current - flag to determine if this checkin is to be set as current is_revision - flag to set this as a revision instead of a version level_key - the unique identifier of the level that this is to be checked into triggers - boolean to fire triggers on insert @return: dictionary - representation of the snapshot created for this checkin ''' return my.server.create_snapshot(my.ticket, search_key, context, snapshot_type, description, is_current, level_key, is_revision, triggers) def simple_checkin(my, search_key, context, file_path, snapshot_type="file", description="No description", use_handoff_dir=False, file_type="main", is_current=True, level_key=None, breadcrumb=False, metadata={}, mode='upload', is_revision=False, info={} , keep_file_name=False, create_icon=True, checkin_cls='pyasm.checkin.FileCheckin', context_index_padding=None, checkin_type="", source_path=None, version=None ): '''API Function: simple_checkin( search_key, context, file_path, snapshot_type="file", description="No description", use_handoff_dir=False, file_type="main", is_current=True, level_key=None, breadcrumb=False, metadata={}, mode=None, is_revision=False, info={}, keep_file_name=False, create_icon=True, checkin_cls='pyasm.checkin.FileCheckin', context_index_padding=None, checkin_type="strict", source_path=None, version=None ) Simple method that checks in a file. @param: search_key - a unique identifier key representing an sobject context - the context of the checkin file_path - path of the file that was previously uploaded @keyparam: snapshot_type - [optional] descibes what kind of a snapshot this is. More information about a snapshot type can be found in the prod/snapshot_type sobject description - [optional] optional description for this checkin file_type - [optional] optional description for this file_type is_current - flag to determine if this checkin is to be set as current level_key - the unique identifier of the level that this is to be checked into breadcrumb - flag to leave a .snapshot breadcrumb file containing information about what happened to a checked in file metadata - a dictionary of values that will be stored as metadata on the snapshot mode - inplace, upload, copy, move is_revision - flag to set this as a revision instead of a version create_icon - flag to create an icon on checkin info - dict of info to pass to the ApiClientCmd keep_file_name - keep the original file name checkin_cls - checkin class context_index_padding - determines the padding used for context indexing: ie: design/0001 checkin_type - auto or strict which controls whether to auto create versionless source_path - explicitly give the source path version - force a version for this check-in @return: dictionary - representation of the snapshot created for this checkin ''' mode_options = ['upload', 'uploaded', 'copy', 'move', 'local','inplace'] if mode: if mode not in mode_options: raise TacticApiException('Mode must be in %s' % mode_options) if mode == 'upload': my.upload_file(file_path) elif mode == 'uploaded': # remap file path: this mode is only used locally. from pyasm.common import Environment upload_dir = Environment.get_upload_dir() file_path = "%s/%s" % (upload_dir, file_path) elif mode in ['copy', 'move']: handoff_dir = my.get_handoff_dir() use_handoff_dir = True # make sure that handoff dir is empty try: shutil.rmtree(handoff_dir) os.makedirs(handoff_dir) os.chmod(handoff_dir, 0777) except OSError, e: sys.stderr.write("WARNING: could not cleanup handoff directory [%s]: %s" % (handoff_dir, e.__str__())) # copy or move the tree basename = os.path.basename(file_path) if mode == 'move': shutil.move(file_path, "%s/%s" % (handoff_dir, basename)) mode = 'create' elif mode == 'copy': shutil.copy(file_path, "%s/%s" % (handoff_dir, basename)) # it moves to repo from handoff dir later mode = 'create' elif mode in ['local']: # do nothing pass # check in the file to the server snapshot = my.server.simple_checkin(my.ticket, search_key, context, file_path, snapshot_type, description, use_handoff_dir, file_type, is_current, level_key, metadata, mode, is_revision, info, keep_file_name, create_icon, checkin_cls, context_index_padding, checkin_type, source_path, version) if mode == 'local': # get the naming conventions and move the file to the local repo files = my.server.eval(my.ticket, "@SOBJECT(sthpw/file)", snapshot) # FIXME: this only works on the python implementation .. should # use JSON files = eval(files) # TODO: maybe cache this?? base_dirs = my.server.get_base_dirs(my.ticket) if os.name == 'nt': client_repo_dir = base_dirs.get("win32_local_repo_dir") else: client_repo_dir = base_dirs.get("linux_local_repo_dir") if not client_repo_dir: raise TacticApiException('No local_repo_dir defined in server config file') for file in files: rel_path = "%s/%s" %( file.get('relative_dir'), file.get('file_name')) repo_path = "%s/%s" % (client_repo_dir, rel_path) repo_dir = os.path.dirname(repo_path) if not os.path.exists(repo_dir): os.makedirs(repo_dir) basename = os.path.basename(repo_path) dirname = os.path.dirname(repo_path) temp_repo_path = "%s/.%s.temp" % (dirname, basename) shutil.copy(file_path, temp_repo_path) shutil.move(temp_repo_path, repo_path) # leave a breadcrumb if breadcrumb: snapshot_code = snapshot.get('code') full_snapshot_xml = my.get_full_snapshot_xml(snapshot_code) snapshot_path = "%s.snapshot" % file_path file = open(snapshot_path, 'wb') file.write(full_snapshot_xml) file.close() return snapshot def group_checkin(my, search_key, context, file_path, file_range, snapshot_type="sequence", description="", file_type='main', metadata={}, mode=None, is_revision=False , info={} ): '''API Function: group_checkin(search_key, context, file_path, file_range, snapshot_type="sequence", description="", file_type='main', metadata={}, mode=None, is_revision=False, info={} ) Check in a range of files. A range of file is defined as any group of files that have some sequence of numbers grouping them together. An example of this includes a range frames that are rendered. Although it is possible to add each frame in a range using add_file, adding them as as sequence is lightweight, often significantly reducing the number of database entries required. Also, it is understood that test files form a range of related files, so that other optimizations and manipulations can be operated on these files accordingly. @param: search_key - a unique identifier key representing an sobject file_path - expression for file range: ./blah.####.jpg file_type - the typ of file this is checked in as. Default = 'main' file_range - string describing range of frames in the form '1-5/1' @keyparam: snapshot_type - type of snapshot this checkin will have description - description related to this checkin file_type - the type of file that will be associated with this group metadata - add metadata to snapshot mode - determines whether the files passed in should be copied, moved or uploaded. By default, this is a manual process (for backwards compatibility) is_revision - flag to set this as a revision instead of a version info - dict of info to pass to the ApiClientCmd @return: dictionary - snapshot ''' mode_options = ['upload', 'copy', 'move', 'inplace'] if mode: if mode not in mode_options: raise TacticApiException('Mode must be in %s' % mode_options) # brute force method if mode == 'move': handoff_dir = my.get_handoff_dir() expanded_paths = my._expand_paths(file_path, file_range) for path in expanded_paths: basename = os.path.basename(path) shutil.move(path, '%s/%s' %(handoff_dir, basename)) use_handoff_dir = True mode = 'create' elif mode == 'copy': handoff_dir = my.get_handoff_dir() expanded_paths = my._expand_paths(file_path, file_range) for path in expanded_paths: basename = os.path.basename(path) shutil.copy(path, '%s/%s' %(handoff_dir, basename)) use_handoff_dir = True # it moves to repo from handoff dir later mode = 'create' elif mode == 'upload': expanded_paths = my._expand_paths(file_path, file_range) for path in expanded_paths: my.upload_file(path) use_handoff_dir = False elif mode == 'inplace': use_handoff_dir = False # get the absolute path file_path = os.path.abspath(file_path) return my.server.group_checkin(my.ticket, search_key, context, file_path, file_range, snapshot_type, description, file_type, metadata, mode, is_revision, info ) def directory_checkin(my, search_key, context, dir, snapshot_type="directory", description="No description", file_type='main', is_current=True, level_key=None, metadata={}, mode="copy", is_revision=False, checkin_type='strict'): '''API Function: directory_checkin(search_key, context, dir, snapshot_type="directory", description="No description", file_type='main', is_current=True, level_key=None, metadata={}, mode="copy", is_revision=False, checkin_type="strict") Check in a directory of files. This informs TACTIC to treat the entire directory as single entity without regard to the structure of the contents. TACTIC will not know about the individual files and the directory hierarchy within the base directory and it it left up to the and external program to intepret and understand this. This is often used when logic on the exact file structure exists in some external source outside of TACTIC and it is deemed too complicated to map this into TACTIC's snapshot definition. @param: search_key - a unique identifier key representing an sobject dir - the directory that needs to be checked in @keyparam: snapshot_type - type of snapshot this checkin will have description - description related to this checkin file_type - the type of file that will be associated with this group is_current - makes this snapshot current level_key - the search key of the level if used metadata - add metadata to snapshot mode - determines whether the files passed in should be copied, moved or uploaded. By default, this is 'copy' is_revision - flag to set this as a revision instead of a version checkin_type - auto or strict which controls whether to auto create versionless @return: dictionary - snapshot ''' if mode not in ['copy', 'move', 'inplace', 'local']: raise TacticApiException('mode must be either [move] or [copy]') handoff_dir = my.get_handoff_dir() # make sure that handoff dir is empty try: shutil.rmtree(handoff_dir) os.makedirs(handoff_dir) os.chmod(handoff_dir, 0777) except OSError, e: sys.stderr.write("WARNING: could not cleanup handoff directory [%s]: %s" % (handoff_dir, e.__str__())) # strip the trailing / or \ if any m = re.match(r'(.*)([/|\\]$)', dir) if m: dir = m.groups()[0] # copy or move the tree to the handoff directory basename = os.path.basename(dir) if mode == 'move': shutil.move(dir, "%s/%s" % (handoff_dir, basename)) mode = 'create' elif mode == 'copy': shutil.copytree(dir, "%s/%s" % (handoff_dir, basename)) # it moves to repo from handoff dir later mode = 'create' use_handoff_dir = True # some default data info = {} keep_file_name = False create_icon = False checkin_cls = "pyasm.checkin.FileCheckin" context_index_padding = None source_path = None version = None snapshot = my.server.simple_checkin(my.ticket, search_key, context, dir, snapshot_type, description, use_handoff_dir, file_type, is_current, level_key, metadata, mode, is_revision, info, keep_file_name, create_icon, checkin_cls, context_index_padding, checkin_type, source_path, version) if mode == 'local': # get the naming conventions and move the file to the local repo files = my.server.eval(my.ticket, "@SOBJECT(sthpw/file)", snapshot) # FIXME: this only works on the python implementation files = eval(files) for file in files: rel_path = "%s/%s" %( file.get('relative_dir'), file.get('file_name')) base_dirs = my.server.get_base_dirs(my.ticket) if os.name == 'nt': client_repo_dir = base_dirs.get("win32_local_base_dir") else: client_repo_dir = base_dirs.get("linux_local_base_dir") repo_path = "%s/%s" % (client_repo_dir, rel_path) repo_dir = os.path.dirname(repo_path) if not os.path.exists(repo_dir): os.makedirs(repo_dir) shutil.copytree(dir,repo_path) return snapshot def add_dependency(my, snapshot_code, file_path, type='ref', tag='main'): '''API Function: add_dependency(snapshot_code, file_path, type='ref') Append a dependency referent to an existing check-in. All files are uniquely containe by a particular snapshot. Presently, this method does a reverse lookup by file name. This assumes that the filename is unique within the system, so it is not recommended unless it is known that naming conventions will produce unique file names for every this particular file. If this is not the case, it is recommended that add_dependency_by_code() is used. @param: snapshot_code - the unique code identifier of a snapshot file_path - the path of the dependent file. This function is able reverse map the file_path to the appropriate snapshot @keyparam: type - type of dependency. Values include 'ref' and 'input_ref' ref = hierarchical reference: ie A contains B input_ref = input reference: ie: A was used to create B tag - a tagged keyword can be added to a dependency to categorize the different dependencies that exist in a snapshot @return: dictionary - the resulting snapshot ''' return my.server.add_dependency(my.ticket, snapshot_code, file_path, type, tag) def add_dependency_by_code(my, to_snapshot_code, from_snapshot_code, type='ref', tag='main'): '''API Function: add_dependency_by_code(to_snapshot_code, from_snapshot_code, type='ref') Append a dependency reference to an existing checkin. This dependency is used to connect various checkins together creating a separate dependency tree for each checkin. @param: to_snapshot_code: the snapshot code which the dependency will be connected to from_snapshot_code: the snapshot code which the dependency will be connected from type - type of dependency. Values include 'ref' and 'input_ref' ref = hierarchical reference: ie A contains B input_ref - input reference: ie: A was used to create B tag - a tagged keyword can be added to a dependency to categorize the different dependencies that exist in a snapshot @return: dictionary - the resulting snapshot ''' return my.server.add_dependency_by_code(my.ticket, to_snapshot_code, from_snapshot_code, type, tag) def add_file(my, snapshot_code, file_path, file_type='main', use_handoff_dir=False, mode=None, create_icon=False, dir_naming='', file_naming='', checkin_type='strict'): '''API Function: add_file(snapshot_code, file_path, file_type='main', use_handoff_dir=False, mode=None, create_icon=False) Add a file to an already existing snapshot. This method is used in piecewise checkins. A blank snapshot can be created using create_snapshot(). This method can then be used to successively add files to the snapshot. In order to check in the file, the server will need to have access to these files. There are a number of ways of getting the files to the server. When using copy or move mode, the files are either copied or moved to the "handoff_dir". This directory is an agreed upon directory in which to handoff the files to the server. This mode is generally used for checking in user files. For heavy bandwidth checkins, it is recommended to user preallocated checkins. @param: snapshot_code - the unique code identifier of a snapshot file_path - path of the file to add to the snapshot. Optional: this can also be an array to add multiple files at once. This has much faster performance that adding one file at a time. Also, note that in this case, file_types must be an array of equal size. @keyparam: file_type - type of the file to be added. Optional: this can also be an array. See file_path argument for more information. use_handoff_dir - DEPRECATED: (use mode arg) use handoff dir to checkin file. The handoff dir is an agreed upon directory between the client and server to transfer files. mode - upload|copy|move|manual|inplace - determine the protocol which delievers the file to the server. create_icon - (True|False) determine whether to create an icon for this appended file. Only 1 icon should be created for each snapshot. dir_naming - explicitly set a dir_naming expression to use file_naming - explicitly set a file_naming expression to use checkin_type - auto or strict which controls whether to auto create versionless and adopt some default dir/file naming @return: dictionary - the resulting snapshot @example: This will create a blank model snapshot for character chr001 and add a file [code] search_type = 'prod/asset' code = 'chr001' search_key = server.build_search_type(search_type, code) context = 'model' path = "./my_model.ma" snapshot = server.create_snapshot(search_key, context) server.add_file( snapshot.get('code'), path ) [/code] Different files should be separated by file type. For example, to check in both a maya and houdin file in the same snapshot: [code] maya_path = "./my_model.ma" houdini_path = "./my_model.hip" server.add_file( snapshot_code, maya_path, file_type='maya' ) server.add_file( snapshot_code, houdini_path, file_type='houdini' ) [/code] To transfer files by uploading (using http protocol): [code] server.add_file( snapshot_code, maya_path, mode='upload' ) [/code] To create an icon for this file [code] path = 'image.jpg' server.add_file( snapshot_code, path, mode='upload', create_icon=True ) [/code] To add multiple files at once [code] file_paths = [maya_path, houdini_path] file_types ['maya', 'houdini'] server.add_file( snapshot_code, file_paths, file_types=file_types, mode='upload') [/code] ''' if type(file_path) != types.ListType: file_paths = [file_path] else: file_paths = file_path if type(file_type) != types.ListType: file_types = [file_type] else: file_types = file_type for path in file_paths: if os.path.isdir(path): raise TacticApiException('[%s] is a directory. Use add_directory() instead' %path) mode_options = ['upload', 'copy', 'move', 'preallocate','inplace'] if mode: if mode in ['copy', 'move']: handoff_dir = my.get_handoff_dir() use_handoff_dir = True # make sure that handoff dir is empty try: shutil.rmtree(handoff_dir) os.makedirs(handoff_dir) except OSError, e: sys.stderr.write("WARNING: could not cleanup handoff directory [%s]: %s" % (handoff_dir, e.__str__())) for i, file_path in enumerate(file_paths): file_type = file_types[i] if mode not in mode_options: raise TacticApiException('Mode must be in %s' % mode_options) if mode == 'upload': my.upload_file(file_path) use_handoff_dir = False elif mode in ['copy', 'move']: # copy or move the tree basename = os.path.basename(file_path) if mode == 'move': shutil.move(file_path, "%s/%s" % (handoff_dir, basename)) elif mode == 'copy': shutil.copy(file_path, "%s/%s" % (handoff_dir, basename)) mode = 'create' return my.server.add_file(my.ticket, snapshot_code, file_paths, file_types, use_handoff_dir, mode, create_icon, dir_naming, file_naming, checkin_type) def remove_file(my, snapshot_code, file_type): return my.server.remove_file(my.ticket, snapshot_code, file_type) def add_group(my, snapshot_code, file_path, file_type, file_range, use_handoff_dir=False, mode=None): '''API Function: add_group(snapshot_code, file_path, file_type, file_range, use_handoff_dir=False, mode=None) Add a file range to an already existing snapshot @param: snapshot_code - the unique code identifier of a snapshot file_path - path of the file to add to the snapshot file_type - type of the file to be added. file_range - range with format s-e/b @keyparam: use_handoff_dir - use handoff dir to checkin file mode - one of 'copy','move','preallocate' @return: dictionary - the resulting snapshot ''' mode_options = ['upload', 'copy', 'move', 'preallocate'] if mode: if mode not in mode_options: raise TacticApiException('Mode must be in %s' % mode_options) #dir = os.path.dirname(file_path) handoff_dir = my.get_handoff_dir() if mode == 'move': expanded_paths = my._expand_paths(file_path, file_range) for path in expanded_paths: basename = os.path.basename(path) shutil.move(path, '%s/%s' %(handoff_dir, basename)) use_handoff_dir = True mode = 'create' elif mode == 'copy': expanded_paths = my._expand_paths(file_path, file_range) for path in expanded_paths: basename = os.path.basename(path) shutil.copy(path, '%s/%s' %(handoff_dir, basename)) use_handoff_dir = True mode = 'create' elif mode == 'upload': my.upload_group(file_path, file_range) use_handoff_dir = False elif mode == 'preallocate': use_handoff_dir = True return my.server.add_group(my.ticket, snapshot_code, file_path, file_type, file_range, use_handoff_dir, mode) def add_directory(my, snapshot_code, dir, file_type='main', mode="copy", dir_naming='', file_naming=''): '''API Function: add_directory(snapshot_code, dir, file_type='main', mode="copy", dir_naming='', file_naming='') Add a full directory to an already existing checkin. This informs TACTIC to treat the entire directory as single entity without regard to the structure of the contents. TACTIC will not know about the individual files and the directory hierarchy within the base directory and it it left up to the and external program to intepret and understand this. This is often used when logic on the exact file structure exists in some external source outside of TACTIC and it is deemed to complictaed to map this into TACTIC's snapshot definition. @param: snapshot_code - a unique identifier key representing an sobject dir - the directory that needs to be checked in @keyparam: file_type - file type is used more as snapshot type here mode - copy, move, preallocate, manual, inplace dir_naming - explicitly set a dir_naming expression to use file_naming - explicitly set a file_naming expression to use @return: dictionary - snapshot @example: This will create a new snapshot for a search_key and add a directory using manual mode [code] dir = 'C:/images' handoff_dir = my.server.get_handoff_dir() shutil.copytree('%s/subfolder' %dir, '%s/images/subfolder' %handoff_dir) snapshot_dict = my.server.create_snapshot(search_key, context='render') snapshot_code = snapshot_dict.get('code') my.server.add_directory(snapshot_code, dir, file_type='dir', mode='manual') [/code] ''' if mode not in ['copy', 'move', 'preallocate', 'manual', 'inplace']: raise TacticApiException('Mode must be one of [move, copy, preallocate]') if mode in ['copy', 'move']: handoff_dir = my.get_handoff_dir() # make sure that handoff dir is empty try: shutil.rmtree(handoff_dir) os.makedirs(handoff_dir) except OSError, e: sys.stderr.write("WARNING: could not cleanup handoff directory [%s]: %s" % (handoff_dir, e.__str__())) # copy or move the tree basename = os.path.basename(dir) if mode == 'move': shutil.move(dir, "%s/%s" % (handoff_dir, basename)) elif mode == 'copy': shutil.copytree(dir, "%s/%s" % (handoff_dir, basename)) mode = 'create' use_handoff_dir = True create_icon = False return my.server.add_file(my.ticket, snapshot_code, dir, file_type, use_handoff_dir, mode, create_icon, dir_naming, file_naming ) def checkout(my, search_key, context="publish", version=-1, file_type='main', to_dir=".", level_key=None, to_sandbox_dir=False, mode='copy'): '''API Function: checkout(search_key, context, version=-1, file_type='main', dir='', level_key=None, to_sandbox_dir=False, mode='copy') Check out files defined in a snapshot from the repository. This will copy files to a particular directory so that a user can work on them. @param: search_key - a unique identifier key representing an sobject context - context of the snapshot @keyparam: version - version of the snapshot file_type - file type defaults to 'main'. If set to '*', all paths are checked out level_key - the unique identifier of the level in the form of a search key to_dir - destination directory defaults to '.' to_sandbox_dir - (True|False) destination directory defaults to sandbox_dir (overrides "to_dir" arg) mode - (copy|download) - determines the protocol that will be used to copy the files to the destination location @return: list - a list of paths that were checked out ''' if not os.path.isdir(to_dir): raise TacticApiException("[%s] does not exist or is not a directory" % to_dir) to_dir = to_dir.replace("\\","/") #repo_paths = my.server.checkout(my.ticket, search_key, context, version, file_type, level_key) paths = my.server.checkout(my.ticket, search_key, context, version, file_type, level_key) client_lib_paths = paths['client_lib_paths'] sandbox_paths = paths['sandbox_paths'] web_paths = paths['web_paths'] to_paths = [] for i, client_lib_path in enumerate(client_lib_paths): if to_sandbox_dir: to_path = sandbox_paths[i] filename = os.path.basename(to_path) else: filename = os.path.basename(client_lib_path) to_path = "%s/%s" % (to_dir, filename) to_paths.append(to_path) # copy the file from the repo to_dir = os.path.dirname(to_path) if not os.path.exists(to_dir): os.makedirs(to_dir) if mode == 'copy': if os.path.exists(client_lib_path): if os.path.isdir(client_lib_path): shutil.copytree(client_lib_path, to_path) else: shutil.copy(client_lib_path, to_path) else: raise TacticApiException("Path [%s] does not exist" % client_lib_path) elif mode == 'download': web_path = web_paths[i] my.download(web_path, to_dir=to_dir, filename=filename) else: raise TacticApiException("Checkout mode [%s] not supported" % mode) return to_paths def lock_sobject(my, search_key, context): '''Locks the context for checking in and out. Locking a context prevents the ability to checkout or checkin to that context for a particular sobject. @params search_key - the search key of the sobject context - the context that will be blocked @return None ''' return my.server.lock_sobject(my.ticket, search_key, context) def unlock_sobject(my, search_key, context): '''Unocks the context for checking in and out. Locking a context prevents the ability to checkout or checkin to that context for a particular sobject. @params search_key - the search key of the sobject context - the context that will be unblocked @return None ''' return my.server.unlock_sobject(my.ticket, search_key, context) def query_snapshots(my, filters=None, columns=None, order_bys=[], show_retired=False, limit=None, offset=None, single=False, include_paths=False, include_full_xml=False, include_paths_dict=False, include_parent=False, include_files=False): '''API Function: query_snapshots(filters=None, columns=None, order_bys=[], show_retired=False, limit=None, offset=None, single=False, include_paths=False, include_full_xml=False, include_paths_dict=False, include_parent=False, include_files=False) thin wrapper around query, but is specific to querying snapshots with some useful included flags that are specific to snapshots @params: ticket - authentication ticket filters - (optional) an array of filters to alter the search columns - (optional) an array of columns whose values should be retrieved order_bys - (optional) an array of order_by to alter the search show_retired - (optional) - sets whether retired sobjects are also returned limit - sets the maximum number of results returned single - returns a single sobject that is not wrapped up in an array include_paths - flag to specify whether to include a __paths__ property containing a list of all paths in the dependent snapshots include_paths_dict - flag to specify whether to include a __paths_dict__ property containing a dict of all paths in the dependent snapshots include_full_xml - flag to return the full xml definition of a snapshot include_parent - includes all of the parent attributes in a __parent__ dictionary include_files - includes all of the file objects referenced in the snapshots @return: list of snapshots ''' return my.server.query_snapshots(my.ticket, filters, columns, order_bys, show_retired, limit, offset, single, include_paths, include_full_xml, include_paths_dict, include_parent, include_files) def get_snapshot(my, search_key, context="publish", version='-1', revision=None, level_key=None, include_paths=False, include_full_xml=False, include_paths_dict=False, include_files=False, include_web_paths_dict=False, versionless=False, process=None): '''API Function: get_snapshot(search_key, context="publish", version='-1', level_key=None, include_paths=False, include_full_xml=False, include_paths_dict=False, include_files=False, include_web_paths_dict=False, versionless=False) Method to retrieve an sobject's snapshot Retrieve the latest snapshot @param: search_key - unique identifier of sobject whose snapshot we are looking for @keyparam: process - the process of the snapshot context - the context of the snapshot version - snapshot version revision - snapshot revision level_key - the unique identifier of the level in the form of a search key include_paths - flag to include a list of paths to the files in this snapshot. include_full_xml - whether to include full xml in the return include_paths_dict - flag to specify whether to include a __paths_dict__ property containing a dict of all paths in the dependent snapshots include_web_paths_dict - flag to specify whether to include a __web_paths_dict__ property containing a dict of all web paths in the returned snapshots include_files - includes all of the file objects referenced in the snapshots versionless - boolean to return the versionless snapshot, which takes a version of -1 (latest) or 0 (current) @return: dictionary - the resulting snapshot @example: [code] search_key = 'prod/asset?project=sample3d&code=chr001' snapshot = server.get_snapshot(search_key, context='icon', include_files=True) [/code] [code] # get the versionless snapshot search_key = 'prod/asset?project=sample3d&code=chr001' snapshot = server.get_snapshot(search_key, context='anim', include_paths_dict=True, versionless=True) [/code] ''' return my.server.get_snapshot(my.ticket, search_key, context, version, revision, level_key, include_paths, include_full_xml, include_paths_dict, include_files, include_web_paths_dict, versionless, process) def get_full_snapshot_xml(my, snapshot_code): '''API Function: get_full_snapshot_xml(snapshot_code) Retrieve a full snapshot xml. This snapshot definition contains all the information about a snapshot in xml @param: snapshot_code - unique code of snapshot @return: string - the resulting snapshot xml ''' return my.server.get_full_snapshot_xml(my.ticket, snapshot_code) def set_current_snapshot(my, snapshot_code): '''API Function: set_current_snapshot(snapshot_code) Set this snapshot as a "current" snapshot @param: snapshot_code - unique code of snapshot @return: string - the resulting snapshot xml ''' return my.server.set_current_snapshot(my.ticket, snapshot_code) def get_dependencies(my, snapshot_code, mode='explicit', tag='main', include_paths=False, include_paths_dict=False, include_files=False, repo_mode='client_repo', show_retired=False): '''API Function: get_dependencies(snapshot_code, mode='explicit', tag='main', include_paths=False, include_paths_dict=False, include_files=False, repo_mode='client_repo', show_retired=False): Return the dependent snapshots of a certain tag @params: snapshot_code - unique code of a snapshot @keyparams: mode - explict (get version as defined in snapshot) - latest - current tag - retrieve only dependencies that have this named tag include_paths - flag to specify whether to include a __paths__ property containing all of the paths in the dependent snapshots include_paths_dict - flag to specify whether to include a __paths_dict__ property containing a dict of all paths in the dependent snapshots include_files - includes all of the file objects referenced in the snapshots repo_mode - client_repo, web, lib, relative show_retired - defaults to False so that it doesn't show retired dependencies @return: a list of snapshots ''' return my.server.get_dependencies(my.ticket, snapshot_code, mode, tag, include_paths, include_paths_dict, include_files, repo_mode, show_retired) def get_all_dependencies(my, snapshot_code, mode='explicit', type='ref', include_paths=False, include_paths_dict=False, include_files=False, repo_mode='client_repo', show_retired=False): '''API Function: get_all_dependencies(snapshot_code, mode='explicit', type='ref', include_paths=False, include_paths_dict=False, include_files=False, repo_mode='client_repo', show_retired=False): Retrieve the latest dependent snapshots of the given snapshot @param: snapshot_code - the unique code of the snapshot @keyparam: mode - explicit (get version as defined in snapshot) - latest - current type - one of ref or input_ref include_paths - flag to specify whether to include a __paths__ property containing all of the paths in the dependent snapshots include_paths_dict - flag to specify whether to include a __paths_dict__ property containing a dict of all paths in the dependent snapshots include_files - includes all of the file objects referenced in the snapshots repo_mode - client_repo, web, lib, relative show_retired - defaults to False so that it doesn't show retired dependencies @return: list - snapshots ''' return my.server.get_all_dependencies(my.ticket, snapshot_code, mode, type, include_paths, include_paths_dict, include_files, repo_mode, show_retired) # # Task methods # def create_task(my, search_key, process="publish", subcontext=None, description=None, bid_start_date=None, bid_end_date=None, bid_duration=None, assigned=None): '''API Function: create_task(search_key, process="publish", subcontext=None, description=None, bid_start_date=None, bid_end_date=None, bid_duration=None, assigned=None) Create a task for a particular sobject @param: search_key - the key identifying a type of sobject as registered in the search_type table. @keyparam: process - process that this task belongs to subcontext - the subcontext of the process (context = procsss/subcontext) description - detailed description of the task bid_start_date - the expected start date for this task bid_end_date - the expected end date for this task bid_duration - the expected duration for this task assigned - the user assigned to this task @return: dictionary - task created ''' return my.server.create_task(my.ticket, search_key, process, subcontext, description, bid_start_date, bid_end_date, bid_duration, assigned) def add_initial_tasks(my, search_key, pipeline_code=None, processes=[], skip_duplicate=True, offset=0): '''API Function: add_initial_tasks(search_key, pipeline_code=None, processes=[], skip_duplicate=True, offset=0) Add initial tasks to an sobject @param: search_key - the key identifying a type of sobject as registered in the search_type table. @keyparam: pipeline_code - override the sobject's pipeline and use this one instead processes - create tasks for the given list of processes skip_duplicate - boolean to skip duplicated task offset - a number to offset the start date from today's date @return: list - tasks created ''' return my.server.add_initial_tasks(my.ticket, search_key, pipeline_code, processes, skip_duplicate, offset) def get_input_tasks(my, search_key): '''API Function: get_input_tasks(search_key) Get the input tasks of a task based on the pipeline associated with the sobject parent of the task @param: search_key - the key identifying an sobject as registered in the search_type table. @return: list of input tasks ''' return my.server.get_input_tasks(my.ticket, search_key) def get_output_tasks(my, search_key): '''API Function: get_output_tasks(search_key) Get the output tasks of a task based on the pipeline associated with the sobject parent of the task @param: search_key - the key identifying an sobject as registered in the search_type table. @return: list of output tasks ''' return my.server.get_output_tasks(my.ticket, search_key) # # Note methods # def create_note(my, search_key, note, process="publish", subcontext=None, user=None): ''''API Function: create_note(search_key, note, process="publish", subcontext=None, user=None) Add a note for a particular sobject @params: search_key - the key identifying a type of sobject as registered in the search_type table. note - detailed description of the task process - process that this task belongs to subcontext - the subcontex of the process (context = procsss/subcontext user - the user the note is attached to @return note that was created ''' return my.server.create_note(my.ticket, search_key, process, subcontext, note, user) # # Pipeline methods # def get_pipeline_xml(my, search_key): '''API Function: get_pipeline_xml(search_key) DEPRECATED: use get_pipeline_xml_info() Retrieve the pipeline of a specific sobject. The pipeline return is an xml document and an optional dictionary of information. @param: search_key - a unique identifier key representing an sobject @return: dictionary - xml and the optional hierarachy info ''' return my.server.get_pipeline_xml(my.ticket, search_key) def get_pipeline_processes(my, search_key, recurse=False): '''API Function: get_pipeline_processes(search_key, recurse=False) DEPRECATED: use get_pipeline_processes_info() Retrieve the pipeline processes information of a specific sobject. @param: search_key - a unique identifier key representing an sobject @keyparams: recurse - boolean to control whether to display sub pipeline processes @return: list - process names of the pipeline ''' return my.server.get_pipeline_processes(my.ticket, search_key, recurse) def get_pipeline_xml_info(my, search_key, include_hierarchy=False): '''API Function: get_pipeline_xml_info(search_key, include_hierarchy=False) Retrieve the pipeline of a specific sobject. The pipeline returned is an xml document and an optional dictionary of information. @param: search_key - a unique identifier key representing an sobject @keyparam: include_hierarchy - include a list of dictionary with key info on each process of the pipeline @return: dictionary - xml and the optional hierarachy info ''' return my.server.get_pipeline_xml_info(my.ticket, search_key, include_hierarchy) def get_pipeline_processes_info(my, search_key, recurse=False, related_process=None): '''API Function: get_pipeline_processes_info(search_key, recurse=False, related_process=None) Retrieve the pipeline processes information of a specific sobject. It provides information from the perspective of a particular process if related_process is specified. @param: search_key - a unique identifier key representing an sobject @keyparams: recurse - boolean to control whether to display sub pipeline processes related_process - given a process, it shows the input and output processes and contexts @return: dictionary - process names of the pipeline or a dictionary if related_process is specified ''' return my.server.get_pipeline_processes_info(my.ticket, search_key, recurse, related_process) def execute_pipeline(my, pipeline_xml, package): '''API Function: execute_pipeline(pipeline_xml, package) Spawn an execution of a pipeline as delivered from 'get_pipeline_xml()'. The pipeline is a xml document that describes a set of processes and their handlers @param: pipeline_xml - an xml document describing a standard Tactic pipeline. package - a dictionary of data delivered to the handlers @return: instance - a reference to the interpreter ''' # execute the pipeline from interpreter import PipelineInterpreter interpreter = PipelineInterpreter(pipeline_xml) interpreter.set_server(my) interpreter.set_package(package) interpreter.execute() return interpreter def commit_session(my, session_xml, pid): '''Takes a session xml and commits it. Also handles transfer to old style xml data. Generally, this is executed through the application package: tactic_client_lib/application/common/introspect.py. However, this can be done manually if the proper session xml is provided. @params ticket - authentication ticket session_xml - an xml document representing the session. This document format is described below @return session_content object The session_xml takes the form: <session> <ref search_key="prod/shot?project=bar&code=joe" context="model" version="3" revision="2" tactic_node="tactic_joe"/> </session> ''' return my.server.commit_session(my.ticket, session_xml, pid) # # Directory methods # def get_paths(my, search_key, context="publish", version=-1, file_type='main', level_key=None, single=False, versionless=False): '''API Function: get_paths( search_key, context="publish", version=-1, file_type='main', level_key=None, single=False, versionless=False) Get paths from an sobject @params: search_key - a unique identifier key representing an sobject @keyparams: context - context of the snapshot version - version of the snapshot file_type - file type defined for the file node in the snapshot level_key - the unique identifier of the level that this was checked into single - If set to True, the first of each path set is returned versionless - boolean to return the versionless snapshot, which takes a version of -1 (latest) or 0 (current) @return A dictionary of lists representing various paths. The paths returned are as follows: - client_lib_paths: all the paths to the repository relative to the client - lib_paths: all the paths to the repository relative to the server - sandbox_paths: all of the paths mapped to the sandbox - web: all of the paths relative to the http server ''' return my.server.get_paths(my.ticket, search_key, context, version, file_type, level_key, single, versionless) def get_base_dirs(my): ''''API Function: get_base_dirs() Get all of the base directories defined on the server @return: dictionary of all the important configured base directories with their keys ''' return my.server.get_base_dirs(my.ticket) def get_plugin_dir(my, plugin): '''API Function: get_plugin_dir(plugin) Return the web path for the specfied plugin @params: plugin - plugin name @return: string - the web path for the specified plugin ''' return my.server.get_plugin_dir(my.ticket, plugin) def get_handoff_dir(my): '''API Function: get_handoff_dir() Return a temporary path that files can be copied to @return: string - the directory to copy a file to handoff to TACTIC without having to go through http protocol ''' if my.handoff_dir: return my.handoff_dir handoff_dir = my.server.get_handoff_dir(my.ticket) if not os.path.exists(handoff_dir): os.makedirs(handoff_dir) my.handoff_dir = handoff_dir return handoff_dir def clear_upload_dir(my): '''API Function: clear_upload_dir() Clear the upload directory to ensure clean checkins @param: None @keyparam: None @return: None ''' return my.server.clear_upload_dir(my.ticket) def get_client_dir(my, snapshot_code, file_type='main', mode='client_repo'): '''API Function: get_client_dir(snapshot_code, file_type='main', mode='client_repo') Get a dir segment from a snapshot @param: snapshot_code - the unique code of the snapshot @keyparam: file_type - each file in a snapshot is identified by a file type. This parameter specifies which type. Defaults to 'main' mode - Forces the type of folder path returned to use the value from the appropriate tactic_<SERVER_OS>-conf.xml configuration file. Values include 'lib', 'web', 'local_repo', 'sandbox', 'client_repo', 'relative' lib = the NFS asset directory from the server point of view web = the http asset directory from the client point of view local_repo = the local sync of the TACTIC repository sandbox = the local sandbox (work area) designated by TACTIC client_repo (default) = the asset directory from the client point of view If there is no value for win32_client_repo_dir or linux_client_repo_dir in the config, then the value for asset_base_dir will be used instead. relative = the relative direcory without any base @return: string - directory segment for a snapshot and file type @example: If the tactic_<SERVER_OS>-conf.xml configuration file contains the following: [code] <win32_client_repo_dir>T:/assets</win32_client_repo_dir> [/code] and if the call to the method is as follows: [code] snapshot = server.create_snapshot(search_key, context) code = snapshot.get('code') server.get_path_from_snapshot(snapshot.get('code')) [/code] Then, on a Windows client, get_client_dir() will return: [code] T:/assets/sample3d/asset/chr/chr003/scenes [/code] ''' return my.server.get_client_dir(my.ticket, snapshot_code, file_type, mode) def get_path_from_snapshot(my, snapshot_code, file_type='main', mode='client_repo'): '''API Function: get_path_from_snapshot(snapshot_code, file_type='main', mode='client_repo') Get a full path from a snapshot @param: snapshot_code - the unique code / search_key of the snapshot @keyparam: file_type - each file in a snapshot is identified by a file type. This parameter specifies which type. Defaults to 'main' mode - Forces the type of folder path returned to use the value from the appropriate tactic_<SERVER_OS>-conf.xml configuration file. Values include 'lib', 'web', 'local_repo', 'sandbox', 'client_repo', 'relative' lib = the NFS asset directory from the server point of view web = the http asset directory from the client point of view local_repo = the local sync of the TACTIC repository sandbox = the local sandbox (work area) designated by TACTIC client_repo (default) = the asset directory from the client point of view If there is no value for win32_client_repo_dir or linux_client_repo_dir in the config, then the value for asset_base_dir will be used instead. relative = the relative direcory without any base @return: string - the directory to copy a file to handoff to Tactic without having to go through http protocol @example: If the tactic_<SERVER_OS>-conf.xml configuration file contains the following: [code] <win32_client_repo_dir>T:/assets</win32_client_repo_dir> [/code] and if the call to the method is as follows: [code] snapshot = server.create_snapshot(search_key, context) code = snapshot.get('code') server.get_path_from_snapshot(snapshot.get('code')) # in a trigger snapshot_key = my.get_input_value("search_key") server.get_path_from_snapshot(snapshot_key) [/code] Then, on a Windows client, get_path_from_snapshot() will return: [code] T:/assets/sample3d/asset/chr/chr003/scenes/chr003_rig_v003.txt [/code] ''' return my.server.get_path_from_snapshot(my.ticket, snapshot_code, file_type, mode) def get_expanded_paths_from_snapshot(my, snapshot_code, file_type='main'): '''API Function: get_expanded_paths_from_snapshot(snapshot_code, file_type='main') Return the expanded path of a snapshot (used for ranges of files) @param: snapshot_code - the unique code of the snapshot @keyparam: file_type - each file in a snapshot is identified by a file type. This parameter specifies which type. Defaults to 'main' @return: string - path ''' return my.server.get_expanded_paths_from_snapshot(my.ticket, snapshot_code, file_type) def get_all_paths_from_snapshot(my, snapshot_code, mode='client_repo', expand_paths=False, filename_mode='',file_types=[]): '''API Function: get_all_paths_from_snapshot(snapshot_code, mode='client_repo', expand_paths=False, filename_mode='') Get all paths from snapshot @param: snapshot_code - the unique code of the snapshot @keyparam: mode - forces the type of folder path returned to use the value from the appropriate tactic_<SERVER_OS>-conf.xml configuration file. Values include 'lib', 'web', 'local_repo', 'sandbox', 'client_repo', 'relative' lib = the NFS asset directory from the server point of view web = the http asset directory from the client point of view local_repo = the local sync of the TACTIC repository sandbox = the local sandbox (work area) designated by TACTIC client_repo (default) = the asset directory from the client point of view If there is no value for win32_client_repo_dir or linux_client_repo_dir in the config, then the value for asset_base_dir will be used instead. relative = the relative direcory without any base expand_paths - expand the paths of a sequence check-in or for a directory check-in, it will list the contents of the directory as well filename_mode - source or '', where source reveals the source_path of the check-in file_types - list: only return files in snapshot with these types @return: list - paths ''' return my.server.get_all_paths_from_snapshot(my.ticket, snapshot_code, mode, expand_paths, filename_mode, file_types) def get_preallocated_path(my, snapshot_code, file_type='main', file_name='', mkdir=True, protocol='client_repo', ext='', checkin_type='strict'): '''API Function: get_preallocated_path(snapshot_code, file_type='main', file_name='', mkdir=True, protocol='client_repo', ext='', checkin_type='strict') Get the preallocated path for this snapshot. It assumes that this checkin actually exists in the repository and will create virtual entities to simulate a checkin. This method can be used to determine where a checkin will go. However, the snapshot must exist using create_snapshot() or some other method. For a pure virtual naming simulator, use get_virtual_snapshot_path(). @param: snapshot_code - the code of a preallocated snapshot. This can be create by get_snapshot() @keyparam: file_type - the type of file that will be checked in. Some naming conventions make use of this information to separate directories for different file types file_name - the desired file name of the preallocation. This information may be ignored by the naming convention or it may use this as a base for the final file name mkdir - an option which determines whether the directory of the preallocation should be created protocol - It's either client_repo, sandbox, or None. It determines whether the path is from a client or server perspective ext - force the extension of the file name returned checkin_type - strict, auto , or '' can be used.. A naming entry in the naming, if found, will be used to determine the checkin type @return: string - the path where add_file() expects the file to be checked into @example: it saves time if you get the path and copy it to the final destination first. [code] snapshot = my.server.create_snapshot(search_key, context) snapshot_code = snapshot.get('code') file_name = 'input_file_name.txt' orig_path = 'C:/input_file_name.txt' path = my.server.get_preallocated_path(snapshot_code, file_type, file_name) # the path where it is supposed to go is generated new_dir = os.path.dirname(path) if not os.path.exists(new_dir): os.makedirs(new_dir) shutil.copy(orig_path, path) my.server.add_file(snapshot_code, path, file_type, mode='preallocate') [/code] ''' return my.server.get_preallocated_path(my.ticket, snapshot_code, file_type, file_name, mkdir, protocol, ext, checkin_type) def get_virtual_snapshot_path(my, search_key, context="publish", snapshot_type="file", level_key=None, file_type='main', file_name='', mkdirs=False, protocol='client_repo', ext='', checkin_type='strict'): '''API Function: get_virtual_snapshot_path(search_key, context, snapshot_type="file", level_key=None, file_type='main', file_name='', mkdirs=False, protocol='client_repo', ext='', checkin_type='strict') Create a virtual snapshot and returns a path that this snapshot would generate through the naming conventions. This is most useful testing naming conventions. @param: snapshot creation: ----------------- search_key - a unique identifier key representing an sobject context - the context of the checkin @keyparam: snapshot_type - [optional] descibes what kind of a snapshot this is. More information about a snapshot type can be found in the prod/snapshot_type sobject description - [optional] optional description for this checkin level_key - the unique identifier of the level that this is to be checked into @keyparam: path creation: -------------- file_type - the type of file that will be checked in. Some naming conventions make use of this information to separate directories for different file types file_name - the desired file name of the preallocation. This information may be ignored by the naming convention or it may use this as a base for the final file name mkdir - an option which determines whether the directory of the preallocation should be created protocol - It's either client_repo, sandbox, or None. It determines whether the path is from a client or server perspective ext - force the extension of the file name returned checkin_type - strict, auto, '' can be used to preset the checkin_type @return: string - path as determined by the naming conventions ''' return my.server.get_virtual_snapshot_path(my.ticket, search_key, context, snapshot_type, level_key, file_type, file_name, mkdirs, protocol, ext, checkin_type) # NOTE: this is very specific to the Maya tools and can be considered # deprecated def get_md5_info(my, md5_list, new_paths, parent_code, texture_cls, file_group_dict, project_code, mode): '''API Function: get_md5_info(md5_list, texture_codes, new_paths, parent_code, texture_cls, file_group_dict, project_code) Get md5 info for a given list of texture paths, mainly returning if this md5 is a match or not @param: md5_list - md5_list new_paths - list of file_paths parent_code - parent code texture_cls - Texture or ShotTexture file_group_dict - file group dictionary storing all the file groups project_code - project_code mode - texture matching mode (md5, file_name) @return: dictionary - a dictionary of path and a subdictionary of is_match, repo_file_code, repo_path, repo_file_range ''' return my.server.get_md5_info(my.ticket, md5_list, new_paths, parent_code, texture_cls, file_group_dict, project_code, mode ) # # UI methods # def get_widget(my, class_name, args={}, values={}): '''API Function: get_widget(class_name, args={}, values={}) Get a defined widget @params: class_name - the fully qualified class name of the widget @keyparams: args - keyword arguments required to create a specific widget values - form values that are passed in from the interface @return: string - html form of the widget @example: class_name = 'tactic.ui.panel.TableLayoutWdg' args = { 'view': 'task_list', 'search_type': 'sthpw/task', } filter = [{"prefix":"main_body","main_body_enabled":"on","main_body_column":"project_code","main_body_relation":"is","main_body_value":"{$PROJECT}"}, {"prefix":"main_body","main_body_enabled":"on","main_body_column":"search_type","main_body_relation":"is not","main_body_value":"sthpw/project"}] from simplejson import dumps values = {'json': dumps(filter)} widget_html = server.get_widget(class_name, args, values) ''' return my.server.get_widget(my.ticket, class_name, args, values) def class_exists(my, class_path): '''determines if a class exists on the server @params class_path - fully qualified python class path @return boolean: true if class exists and can be seen ''' return my.server.class_exists(my.ticket, class_path) def execute_python_script(my, script_path, kwargs={}): '''API Function: execute_python_script(script_path, kwargs) Execute a python script defined in Script Editor @param: script_path - script path in Script Editor, e.g. test/eval_sobj @keyparam: kwargs - keyword arguments for this script @return: dictionary - returned data structure ''' return my.server.execute_python_script(my.ticket, script_path, kwargs) def execute_cmd(my, class_name, args={}, values={}): '''API Function: execute_cmd(class_name, args={}, values={}) Execute a command @param: class_name - the fully qualified class name of the widget @keyparam: args - keyword arguments required to create a specific widget values - form values that are passed in from the interface @return: string - description of command ''' return my.server.execute_cmd(my.ticket, class_name, args, values) def execute_transaction(my, transaction_xml, file_mode=None): '''Run a tactic transaction a defined by the instructions in the given transaction xml. The format of the xml is identical to the format of how transactions are stored internally @params ticket - authentication ticket transaction_xml - transction instructions @return None @usage transaction_xml = """<?xml version='1.0' encoding='UTF-8'?> <transaction> <sobject search_type="project/asset?project=gbs" search_code="shot01" action="update"> <column name="description" from="" to="Big Money Shot"/> </sobject> </transaction> """ server.execute_transaction(transaction_xml) ''' return my.server.execute_transaction(my.ticket, transaction_xml, file_mode) # # Widget Config methods # def set_config_definition(my, search_type, element_name, config_xml="", login=None): '''API Function: set_config_definition(search_type, element_name, config_xml="", login=None) Set the widget configuration definition for an element @param: search_type - search type that this config relates to element_name - name of the element @keyparam: config_xml - The configuration xml to be set login - A user's login name, if specifically choosing one @return: True on success, exception message on failure ''' return my.server.set_config_definition(my.ticket, search_type, element_name, config_xml, login) def get_config_definition(my, search_type, view, element_name, personal=False): '''API Function: get_config_definition(search_type, view, element_name, personal=False) Get the widget configuration definition for an element @param: search_type - search type that this config relates to view - view to look for the element element_name - name of the element @keyparam: personal - True if it is a personal definition @return: string - xml of the configuration ''' return my.server.get_config_definition(my.ticket, search_type, view, element_name, personal) def update_config(my, search_type, view, element_names): '''API Function: update_config(search_type, view, element_names) Update the widget configuration like ordering for a view @param: search_type - search type that this config relates to view - view to look for the element element_names - element names in a list @return: string - updated config xml snippet ''' return my.server.update_config(my.ticket, search_type, view, element_names) def add_config_element(my, search_type, view, name, class_name=None, display_options={}, action_class_name=None, action_options={}, element_attrs={},login=None, unique=True, auto_unique_name=False, auto_unique_view=False): '''API Function: add_config_element(search_type, view, name, class_name=None, display_options={}, action_class_name=None, action_options={}, element_attrs={},login=None, unique=True, auto_unique_name=False, auto_unique_view=False) This method adds an element into a config. It is used by various UI components to add new widget element to a particular view. @param: search_type - the search type that this config belongs to view - the specific view of the search type name - the name of the element @keyparam: class_name - the fully qualified class of the display action_class_name - the fully qualified class of the action display_options - keyward options in a dictionary to construct the specific display action_options - keyward options in a dictionary to construct the specific action element_attrs - element attributes in a dictionary login - login name if it is for a specific user unique - add an unique element if True. update the element if False. auto_unique_name - auto generate a unique element and display view name auto_unique_view - auto generate a unique display view name @return: boolean - True @example: This will add a new element to the "character" view for a 3D asset [code] search_type = 'prod/asset' view = 'characters' class_name = 'tactic.ui.common.SimpleElementWdg' server.add_config_element(search_type, view, class_name) [/code] This will add a new element named "user" to the "definition" view. It contains detailed display and action nodes [code] data_dict = {} # some data here search_type = 'prod/asset' server.add_config_element(search_type, 'definition', 'user', class_name = data_dict['class_name'], display_options=data_dict['display_options'], element_attrs=data_dict['element_attrs'], unique=True, action_class_name=data_dict['action_class_name'], action_options=data_dict['action_options']) [/code] ''' return my.server.add_config_element(my.ticket, search_type, view, name, class_name, display_options, action_class_name, action_options, element_attrs, login, unique, auto_unique_name, auto_unique_view) def _setup(my, protocol="xmlrpc"): # if this is being run in the tactic server, have the option # to use TACTIC code directly if protocol == 'local': # import some server libraries from pyasm.biz import Project from pyasm.common import Environment from pyasm.prod.service import ApiXMLRPC from pyasm.web import WebContainer # set the ticket security = Environment.get_security() if not security: raise TacticApiException("Security not initialized. This may be because you are running the client API in 'local' mode without run initializing Batch") # set the project project_code = Project.get_project_code() my.set_project(project_code) # set the ticket ticket = security.get_ticket_key() my.set_ticket(ticket) # set the protocol to local for the api class # note ticket has to be set first my.server = ApiXMLRPC() my.server.set_protocol(protocol) # if server name has already been set, use that one if my.server_name: my.has_server = True return web = WebContainer.get_web() if web: my.server_name = web.get_http_host() if my.server_name: my.has_server = True else: # else guess that it is localhost my.server_name = "localhost" my.has_server = True return elif protocol =='xmlrpc': # get the env variables env_user = os.environ.get('TACTIC_USER') env_password = os.environ.get('TACTIC_PASSWORD') env_server = os.environ.get('TACTIC_SERVER') env_ticket = os.environ.get('TACTIC_TICKET') env_project = os.environ.get('TACTIC_PROJECT') # if all three are set, then it is not necessary to look at # the resource file if not (env_server and (env_user or env_ticket) and env_project): # need to scope by user # this is dealt with in get_resource_path already #if not my.login: # my.login = getpass.getuser() file_path = my.get_resource_path() if not os.path.exists(file_path): msg = "[%s] does not exist yet. There is not enough information to authenticate the server. Either set the appropriate environment variables or run get_ticket.py" %file_path raise TacticApiException(msg) # try to open the resource file file = open(file_path) lines = file.readlines() file.close() rc_server = None rc_ticket = None rc_project = None rc_login = None for line in lines: line = line.strip() if line.startswith("#"): continue name, value = line.split("=") if name == "server": #my.set_server(value) rc_server = value elif name == "ticket": #my.set_ticket(value) rc_ticket = value elif name == "project": #my.set_project(value) rc_project = value elif name == "login": #my.set_project(value) rc_login = value # these have to be issued in the correct order if rc_server: my.set_server(rc_server) if rc_project: my.set_project(rc_project) if rc_ticket: # get the project project = my.get_project() # set a default if one does not exist if not project: my.set_project("admin") my.set_ticket(rc_ticket) if rc_login: my.login = rc_login # override with any environment variables that are set if env_server: my.set_server(env_server) if env_project: my.set_project(env_project) if env_user: # try to get a ticket with a set password ticket = my.get_ticket(env_user, env_password) my.set_ticket(ticket) if env_ticket: my.set_ticket(env_ticket) #my.server.set_protocol(protocol) # # Doc methods # def get_doc_link(my, alias): return my.server.get_doc_link(my.ticket, alias); # # API/Server Version functions # def get_release_version(my): # DEPRECATED print "WARNING: Deprecated function 'get_release_version'" return my.server.get_release_version(my.ticket) def get_server_version(my): '''API Function: get_server_version() @return: string - server version''' return my.server.get_server_version(my.ticket) def get_server_api_version(my): '''API Function: get_server_api_version() @return: string - server API version''' version = my.server.get_server_api_version(my.ticket) return version def get_client_version(my): '''API Function: get_client_version() @return: string - Version of TACTIC that this client came from''' # may use pkg_resources in 2.6 if '.zip' in __file__: import zipfile parts = __file__.split('.zip') zip_name = '%s.zip'%parts[0] if zipfile.is_zipfile(zip_name): z = zipfile.ZipFile(zip_name) version = z.read('pyasm/application/common/interpreter/tactic_client_lib/VERSION') version = version.strip() z.close() else: dir = os.path.dirname(__file__) f = open('%s/VERSION' % dir, 'r') version = f.readline().strip() f.close() return version def get_client_api_version(my): '''API Function: get_client_api_version() @return: string - client api version''' # may use pkg_resources in 2.6 if '.zip' in __file__: import zipfile parts = __file__.split('.zip') zip_name = '%s.zip'%parts[0] if zipfile.is_zipfile(zip_name): z = zipfile.ZipFile(zip_name) version = z.read('pyasm/application/common/interpreter/tactic_client_lib/VERSION_API') version = version.strip() z.close() else: dir = os.path.dirname(__file__) f = open('%s/VERSION_API' % dir, 'r') version = f.readline().strip() f.close() return version server = None def get(cls, protocol='', setup=True): '''get function which treats the server stub as a singleton''' try: from pyasm.common import Container server = Container.get("TacticServerStub") if not server: from pyasm.common import Environment app_server = Environment.get_app_server() if protocol: server = TacticServerStub(protocol=protocol, setup=setup) elif app_server in ["batch", "xmlrpc"]: server = TacticServerStub(protocol='local', setup=setup) else: server = TacticServerStub(setup=setup) Container.put("TacticServerStub", server) return server except ImportError, e: if not cls.server: cls.server = TacticServerStub(protocol='xmlrpc', setup=setup) return cls.server get = classmethod(get) def set(cls, server=None): try: from pyasm.common import Container Container.put("TacticServerStub", server) except ImportError: cls.server = server set = classmethod(set) # # Objects # class Command(object): def get_description(my): return "No description" def execute_cmd(my): my.server = TacticServerStub() my.server.start(my.get_description()) try: my.execute() except Exception, e: my.server.abort() raise else: my.server.finish() def execute(my): my.execute() class Search(object): pass class SObject(dict): def get_search_key(my): return my['__search_key__']
epl-1.0
8,299,705,416,211,653,000
36.439018
433
0.579768
false
JaronArmiger/tenshi
angel-player/src/chrome/content/angelic/test.py
11
4815
#!/usr/bin/env python # Licensed to Pioneers in Engineering under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Pioneers in Engineering licenses # this file to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License from __future__ import print_function import os import subprocess as subp import shutil import argparse import re import traceback import sys def is_subpath_of_set(path, pathset): part_path = '' for part in path.split(os.path.sep): part_path = os.path.join(part_path, part) if part_path in pathset: return True return False def setup(): os.chdir('tests') try: os.mkdir('tmp') except IOError as e: # One reason we would get here is that the tmp directory was not # cleaned up. Delete the directory and try again. shutil.rmtree('tmp') os.mkdir('tmp') os.chdir('tmp') def cleanup(): os.chdir(os.pardir) shutil.rmtree('tmp') os.chdir(os.pardir) def get_test(path): return os.path.join(os.pardir, os.pardir, path) def get_test_base(root, path): return os.path.join(root, os.path.dirname(path)) EXT_TO_CMD = {'py': ['python'], 'js': ['node', '--harmony'], 'sh': ['bash']} def get_tests(): ''' Get all files under the tests directory, skipping directories that have tests files with the same name. ''' tests = set() dirs_to_skip = set(['tests/tmp']) for dirpath, dirnames, filenames in os.walk('tests', topdown=True): if not is_subpath_of_set(dirpath, dirs_to_skip): for filename in filenames: if filename[0] == '.': continue fullname = os.path.join(dirpath, filename) tests.add(fullname) base, ext = os.path.splitext(fullname) dirs_to_skip.add(base) return tests def run_test(name, root, failed_tests, stdout_logs): _, ext = os.path.splitext(name) cmd = EXT_TO_CMD[ext[1:]] args = cmd + [get_test(name), root, get_test_base(root, name)] p = subp.Popen(args, stdout=subp.PIPE, stderr=subp.STDOUT) stdout_logs[name] = p.communicate() ret = p.returncode if ret == 0: print('.', end='') else: failed_tests.append(name) print('x', end='') # Flush out the . or x we just printed, instead of waiting for a # newline to flush them. sys.stdout.flush() def main(): parser = argparse.ArgumentParser(description='Run angelic tests.') parser.add_argument('--no-cleanup', action='store_true') parser.add_argument('--verbose', action='store_true') parser.add_argument('--matching', action='store', default='') args = parser.parse_args() pattern = None if args.matching: pattern = re.compile(args.matching) tests = get_tests() failed_tests = [] stdout_logs = dict() root = os.getcwd() tests_run = 0 setup() try: for test in sorted(tests): if not pattern or pattern.search(test): tests_run += 1 try: run_test(test, root, failed_tests, stdout_logs) except KeyboardInterrupt as e: print('Encountered exception while running test{}:' .format(test)) traceback.print_exc() if args.verbose: print(stdout_logs[test][0].decode()) except Exception as e: print('Encountered exception while running tests:') traceback.print_exc() finally: if not args.no_cleanup: cleanup() if not failed_tests: print() print('OK (Ran {0} test)'.format(tests_run)) else: print() for failure in failed_tests: print('FAILED:', failure) print(' BEGIN TEST OUTPUT '.center(80, '*')) print(stdout_logs[failure][0].decode(), end='') print(' END TEST OUTPUT '.center(80, '*')) print() print('TEST FAILED ({0}/{1} tests failed)' .format(len(failed_tests), tests_run)) if __name__ == '__main__': main()
apache-2.0
2,831,676,987,181,664,000
28.539877
76
0.593977
false
binhqnguyen/lena-local
src/visualizer/visualizer/higcontainer.py
189
3560
import gtk import gobject try: from gazpacho.widgets.base.base import SimpleContainerAdaptor except ImportError: pass #root_library = 'hig' class HIGContainer(gtk.Bin): __gtype_name__ = 'HIGContainer' __gproperties__ = { 'title': (str, 'Group Title', 'the group title', '', gobject.PARAM_READWRITE|gobject.PARAM_CONSTRUCT), } def __init__(self, title=None): self.__title_text = None gtk.widget_push_composite_child() self.__title = gobject.new(gtk.Label, visible=True, xalign=0, yalign=0.5) self.__indent = gobject.new(gtk.Label, visible=True, label=' ') gtk.widget_pop_composite_child() gtk.Bin.__init__(self) self.__title.set_parent(self) self.__indent.set_parent(self) if title is not None: self.props.title = title def do_size_request(self, requisition): title_req = gtk.gdk.Rectangle(0, 0, *self.__title.size_request()) indent_req = gtk.gdk.Rectangle(0, 0, *self.__indent.size_request()) if self.child is None: child_req = gtk.gdk.Rectangle() else: child_req = gtk.gdk.Rectangle(0, 0, *self.child.size_request()) requisition.height = (title_req.height + 6 + max(child_req.height, indent_req.height)) requisition.width = max(title_req.width, indent_req.width + child_req.width) def do_size_allocate(self, allocation): self.allocation = allocation ## title title_req = gtk.gdk.Rectangle(0, 0, *self.__title.get_child_requisition()) title_alloc = gtk.gdk.Rectangle() title_alloc.x = allocation.x title_alloc.y = allocation.y title_alloc.width = min(title_req.width, allocation.width) title_alloc.height = min(title_req.height, allocation.height) self.__title.size_allocate(title_alloc) ## child if self.child is None: return indent_req = gtk.gdk.Rectangle(0, 0, *self.__indent.get_child_requisition()) child_req = gtk.gdk.Rectangle(0, 0, *self.child.get_child_requisition()) child_alloc = gtk.gdk.Rectangle() child_alloc.x = allocation.x + indent_req.width child_alloc.y = allocation.y + title_alloc.height + 6 child_alloc.width = allocation.width - indent_req.width child_alloc.height = allocation.height - 6 - title_alloc.height self.child.size_allocate(child_alloc) def do_forall(self, internal, callback, data): if internal: callback(self.__title, data) callback(self.__indent, data) if self.child is not None: callback(self.child, data) def do_set_property(self, pspec, value): if pspec.name == 'title': self.__title.set_markup('<span weight="bold">%s</span>' % gobject.markup_escape_text(value)) self.__title_text = value else: raise AttributeError, 'unknown property %s' % pspec.name def do_get_property(self, pspec): if pspec.name == 'title': return self.__title_text else: raise AttributeError, 'unknown property %s' % pspec.name if __name__ == '__main__': frame = gtk.Frame() group = gobject.new(HIGContainer, title="Hello") frame.add(group) check = gtk.CheckButton("foobar") group.add(check) w = gtk.Window() w.add(frame) w.show_all() w.connect("destroy", lambda w: gtk.main_quit()) gtk.main()
gpl-2.0
-3,056,126,745,749,747,700
35.701031
84
0.595225
false
dednal/chromium.src
net/data/ssl/scripts/crlsetutil.py
75
5856
#!/usr/bin/env python # Copyright (c) 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ This utility takes a JSON input that describes a CRLSet and produces a CRLSet from it. The input is taken on stdin and is a dict with the following keys: - BlockedBySPKI: An array of strings, where each string is a filename containing a PEM certificate, from which an SPKI will be extracted. - BlockedByHash: A dict of string to an array of ints, where the string is a filename containing a PEM format certificate, and the ints are the serial numbers. The listed serial numbers will be blocked when issued by the given certificate. For example: { "BlockedBySPKI": ["/tmp/blocked-certificate"], "BlockedByHash": { "/tmp/intermediate-certificate": [1, 2, 3] } } """ import hashlib import json import optparse import struct import sys def _pem_cert_to_binary(pem_filename): """Decodes the first PEM-encoded certificate in a given file into binary Args: pem_filename: A filename that contains a PEM-encoded certificate. It may contain additional data (keys, textual representation) which will be ignored Returns: A byte array containing the decoded certificate data """ base64 = "" started = False with open(pem_filename, 'r') as pem_file: for line in pem_file: if not started: if line.startswith('-----BEGIN CERTIFICATE'): started = True else: if line.startswith('-----END CERTIFICATE'): break base64 += line[:-1].strip() return base64.decode('base64') def _parse_asn1_element(der_bytes): """Parses a DER-encoded tag/Length/Value into its component parts Args: der_bytes: A DER-encoded ASN.1 data type Returns: A tuple of the ASN.1 tag value, the length of the ASN.1 header that was read, the sequence of bytes for the value, and then any data from der_bytes that was not part of the tag/Length/Value. """ tag = ord(der_bytes[0]) length = ord(der_bytes[1]) header_length = 2 if length & 0x80: num_length_bytes = length & 0x7f length = 0 for i in xrange(2, 2 + num_length_bytes): length <<= 8 length += ord(der_bytes[i]) header_length = 2 + num_length_bytes contents = der_bytes[:header_length + length] rest = der_bytes[header_length + length:] return (tag, header_length, contents, rest) class ASN1Iterator(object): """Iterator that parses and iterates through a ASN.1 DER structure""" def __init__(self, contents): self._tag = 0 self._header_length = 0 self._rest = None self._contents = contents self.step_into() def step_into(self): """Begins processing the inner contents of the next ASN.1 element""" (self._tag, self._header_length, self._contents, self._rest) = ( _parse_asn1_element(self._contents[self._header_length:])) def step_over(self): """Skips/ignores the next ASN.1 element""" (self._tag, self._header_length, self._contents, self._rest) = ( _parse_asn1_element(self._rest)) def tag(self): """Returns the ASN.1 tag of the current element""" return self._tag def contents(self): """Returns the raw data of the current element""" return self._contents def _der_cert_to_spki(der_bytes): """Returns the subjectPublicKeyInfo of a DER-encoded certificate Args: der_bytes: A DER-encoded certificate (RFC 5280) Returns: A byte array containing the subjectPublicKeyInfo """ iterator = ASN1Iterator(der_bytes) iterator.step_into() # enter certificate structure iterator.step_into() # enter TBSCertificate iterator.step_over() # over version iterator.step_over() # over serial iterator.step_over() # over signature algorithm iterator.step_over() # over issuer name iterator.step_over() # over validity iterator.step_over() # over subject name return iterator.contents() def pem_cert_file_to_spki_hash(pem_filename): """Gets the SHA-256 hash of the subjectPublicKeyInfo of a cert in a file Args: pem_filename: A file containing a PEM-encoded certificate. Returns: The SHA-256 hash of the first certificate in the file, as a byte sequence """ return hashlib.sha256( _der_cert_to_spki(_pem_cert_to_binary(pem_filename))).digest() def main(): parser = optparse.OptionParser(description=sys.modules[__name__].__doc__) parser.add_option('-o', '--output', help='Specifies the output file. The default is stdout.') options, _ = parser.parse_args() outfile = sys.stdout if options.output and options.output != '-': outfile = open(options.output, 'wb') config = json.load(sys.stdin) blocked_spkis = [ pem_cert_file_to_spki_hash(pem_file).encode('base64').strip() for pem_file in config.get('BlockedBySPKI', [])] parents = { pem_cert_file_to_spki_hash(pem_file): serials for pem_file, serials in config.get('BlockedByHash', {}).iteritems() } header_json = { 'Version': 0, 'ContentType': 'CRLSet', 'Sequence': 0, 'DeltaFrom': 0, 'NumParents': len(parents), 'BlockedSPKIs': blocked_spkis, } header = json.dumps(header_json) outfile.write(struct.pack('<H', len(header))) outfile.write(header) for spki, serials in sorted(parents.iteritems()): outfile.write(spki) outfile.write(struct.pack('<I', len(serials))) for serial in serials: raw_serial = [] if not serial: raw_serial = ['\x00'] else: while serial: raw_serial.insert(0, chr(serial & 0xff)) serial >>= 8 outfile.write(struct.pack('<B', len(raw_serial))) outfile.write(''.join(raw_serial)) return 0 if __name__ == '__main__': sys.exit(main())
bsd-3-clause
5,961,189,099,905,720,000
28.28
79
0.663593
false
apagac/cfme_tests
cfme/scripting/appliance.py
2
6850
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Script to encrypt config files. Usage: scripts/encrypt_conf.py confname1 confname2 ... confnameN scripts/encrypt_conf.py credentials """ from functools import partial import click from cached_property import cached_property from cfme.utils.conf import cfme_data from cfme.utils.conf import env def get_appliance(appliance_ip): """Checks an appliance is not None and if so, loads the appropriate things""" from cfme.utils.appliance import IPAppliance, load_appliances_from_config, stack if not appliance_ip: app = load_appliances_from_config(env)[0] else: app = IPAppliance(hostname=appliance_ip) stack.push(app) # ensure safety from bad code, phase out later return app @click.group(help='Helper commands for appliances') def main(): """Main appliance group""" pass @main.command('upgrade', help='Upgrades an appliance to latest Z-stream') @click.argument('appliance-ip', default=None, required=False) @click.option('--cfme-only', is_flag=True, help='Upgrade cfme packages only') @click.option('--update-to', default='5.9.z', help='Supported versions 5.9.z,' ' 5.10.z (.z means latest and default is 5.9.z)') # leaving 59z support for upgrades def upgrade_appliance(appliance_ip, cfme_only, update_to): """Upgrades an appliance""" supported_version_repo_map = { '5.9.z': 'update_url_59', '5.10.z': 'update_url_510', } assert update_to in supported_version_repo_map, "{} is not a supported version".format( update_to ) update_url = supported_version_repo_map[update_to] if appliance_ip: print('Connecting to {}'.format(appliance_ip)) else: print('Fetching appliance from env.local.yaml') app = get_appliance(appliance_ip) assert app.version > '5.7', "{} is not supported, must be 5.7 or higher".format(app.version) print('Extending appliance partitions') app.db.extend_partition() urls = cfme_data['basic_info'][update_url] print('Adding update repo to appliance') app.ssh_client.run_command( "curl {} -o /etc/yum.repos.d/update.repo".format(urls) ) cfme = '-y' if cfme_only: cfme = 'cfme -y' print('Stopping EVM') app.evmserverd.stop() print('Running yum update') result = app.ssh_client.run_command('yum update {}'.format(cfme), timeout=3600) assert result.success, "update failed {}".format(result.output) print('Running database migration') app.db.migrate() app.db.automate_reset() print('Restarting postgres service') app.db_service.restart() print('Starting EVM') app.evmserverd.start() print('Waiting for webui') app.wait_for_web_ui() print('Appliance upgrade completed') @main.command('migrate', help='Restores/migrates database from file or downloaded') @click.argument('appliance-ip', default=None, required=True) @click.option('--db-url', default=None, help='Download a backup file') @click.option('--keys-url', default=None, help='URL for matching db v2key and GUID if available') @click.option('--backup', default=None, help='Location of local backup file, including file name') def backup_migrate(appliance_ip, db_url, keys_url, backup): """Restores and migrates database backup on an appliance""" print('Connecting to {}'.format(appliance_ip)) app = get_appliance(appliance_ip) if db_url: print('Downloading database backup') result = app.ssh_client.run_command( 'curl -o "/evm_db.backup" "{}"'.format(db_url), timeout=30) assert result.success, "Failed to download database: {}".format(result.output) backup = '/evm_db.backup' else: backup = backup print('Stopping EVM') app.evmserverd.stop() print('Dropping/Creating database') app.db.drop() app.db.create() print('Restoring database from backup') result = app.ssh_client.run_command( 'pg_restore -v --dbname=vmdb_production {}'.format(backup), timeout=600) assert result.success, "Failed to restore new database: {}".format(result.output) print('Running database migration') app.db.migrate() app.db.automate_reset() if keys_url: result = app.ssh_client.run_command( 'curl -o "/var/www/miq/vmdb/certs/v2_key" "{}v2_key"'.format(keys_url), timeout=15) assert result.success, "Failed to download v2_key: {}".format(result.output) result = app.ssh_client.run_command( 'curl -o "/var/www/miq/vmdb/GUID" "{}GUID"'.format(keys_url), timeout=15) assert result.success, "Failed to download GUID: {}".format(result.output) else: app.db.fix_auth_key() app.db.fix_auth_dbyml() print('Restarting postgres service') app.db_service.restart() print('Starting EVM') app.evmserverd.start() print('Waiting for webui') app.wait_for_web_ui() print('Appliance upgrade completed') @main.command('reboot', help='Reboots the appliance') @click.argument('appliance_ip', default=None, required=False) @click.option('--wait-for-ui', is_flag=True, default=True) def reboot_appliance(appliance_ip, wait_for_ui): """Reboots an appliance""" app = get_appliance(appliance_ip) app.reboot(wait_for_ui) @main.command('setup-webmks', help='Setups VMware WebMKS on an appliance by downloading' 'and extracting SDK to required location') @click.argument('appliance_ip', default=None, required=False) def config_webmks(appliance_ip): appliance = get_appliance(appliance_ip) server_settings = appliance.server.settings server_settings.update_vmware_console({'console_type': 'VMware WebMKS'}) roles = server_settings.server_roles_db if 'websocket' in roles and not roles['websocket']: server_settings.enable_server_roles('websocket') # Useful Properties methods_to_install = [ 'is_db_enabled', 'managed_provider_names', 'miqqe_version', 'os_version', 'swap', 'miqqe_patch_applied'] def fn(method, *args, **kwargs): """Helper to access the right properties""" from cfme.utils.appliance import IPAppliance appliance_ip = kwargs.get('appliance_ip', None) app = get_appliance(appliance_ip) descriptor = getattr(IPAppliance, method) if isinstance(descriptor, (cached_property, property)): out = getattr(app, method) else: out = getattr(app, method)(*args, **kwargs) if out is not None: print(out) for method in methods_to_install: command = click.Command( method.replace('_', '-'), short_help='Returns the {} property'.format(method), callback=partial(fn, method), params=[ click.Argument(['appliance_ip'], default=None, required=False)]) main.add_command(command) if __name__ == "__main__": main()
gpl-2.0
-6,823,417,460,662,708,000
36.027027
99
0.668613
false
glemaitre/UnbalancedDataset
imblearn/ensemble/tests/test_classifier.py
2
17981
"""Test the module ensemble classifiers.""" # Authors: Guillaume Lemaitre <[email protected]> # Christos Aridas # License: MIT import numpy as np from sklearn.datasets import load_iris, make_hastie_10_2 from sklearn.model_selection import (GridSearchCV, ParameterGrid, train_test_split) from sklearn.dummy import DummyClassifier from sklearn.linear_model import Perceptron, LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.feature_selection import SelectKBest from sklearn.utils.testing import (assert_array_equal, assert_array_almost_equal, assert_raises, assert_warns, assert_warns_message) from imblearn.datasets import make_imbalance from imblearn.ensemble import BalancedBaggingClassifier from imblearn.pipeline import make_pipeline from imblearn.under_sampling import RandomUnderSampler iris = load_iris() def test_balanced_bagging_classifier(): # Check classification for various parameter settings. X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50}, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) grid = ParameterGrid({"max_samples": [0.5, 1.0], "max_features": [1, 2, 4], "bootstrap": [True, False], "bootstrap_features": [True, False]}) for base_estimator in [None, DummyClassifier(), Perceptron(), DecisionTreeClassifier(), KNeighborsClassifier(), SVC()]: for params in grid: BalancedBaggingClassifier( base_estimator=base_estimator, random_state=0, **params).fit(X_train, y_train).predict(X_test) def test_bootstrap_samples(): # Test that bootstrapping samples generate non-perfect base estimators. X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50}, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) base_estimator = DecisionTreeClassifier().fit(X_train, y_train) # without bootstrap, all trees are perfect on the training set # disable the resampling by passing an empty dictionary. ensemble = BalancedBaggingClassifier( base_estimator=DecisionTreeClassifier(), max_samples=1.0, bootstrap=False, n_estimators=10, ratio={}, random_state=0).fit(X_train, y_train) assert (ensemble.score(X_train, y_train) == base_estimator.score(X_train, y_train)) # with bootstrap, trees are no longer perfect on the training set ensemble = BalancedBaggingClassifier( base_estimator=DecisionTreeClassifier(), max_samples=1.0, bootstrap=True, random_state=0).fit(X_train, y_train) assert (ensemble.score(X_train, y_train) < base_estimator.score(X_train, y_train)) def test_bootstrap_features(): # Test that bootstrapping features may generate duplicate features. X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50}, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) ensemble = BalancedBaggingClassifier( base_estimator=DecisionTreeClassifier(), max_features=1.0, bootstrap_features=False, random_state=0).fit(X_train, y_train) for features in ensemble.estimators_features_: assert np.unique(features).shape[0] == X.shape[1] ensemble = BalancedBaggingClassifier( base_estimator=DecisionTreeClassifier(), max_features=1.0, bootstrap_features=True, random_state=0).fit(X_train, y_train) unique_features = [np.unique(features).shape[0] for features in ensemble.estimators_features_] assert np.median(unique_features) < X.shape[1] def test_probability(): # Predict probabilities. X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50}, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) with np.errstate(divide="ignore", invalid="ignore"): # Normal case ensemble = BalancedBaggingClassifier( base_estimator=DecisionTreeClassifier(), random_state=0).fit(X_train, y_train) assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test), axis=1), np.ones(len(X_test))) assert_array_almost_equal(ensemble.predict_proba(X_test), np.exp(ensemble.predict_log_proba(X_test))) # Degenerate case, where some classes are missing ensemble = BalancedBaggingClassifier( base_estimator=LogisticRegression(), random_state=0, max_samples=5).fit(X_train, y_train) assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test), axis=1), np.ones(len(X_test))) assert_array_almost_equal(ensemble.predict_proba(X_test), np.exp(ensemble.predict_log_proba(X_test))) def test_oob_score_classification(): # Check that oob prediction is a good estimation of the generalization # error. X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50}, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) for base_estimator in [DecisionTreeClassifier(), SVC()]: clf = BalancedBaggingClassifier( base_estimator=base_estimator, n_estimators=100, bootstrap=True, oob_score=True, random_state=0).fit(X_train, y_train) test_score = clf.score(X_test, y_test) assert abs(test_score - clf.oob_score_) < 0.1 # Test with few estimators assert_warns(UserWarning, BalancedBaggingClassifier( base_estimator=base_estimator, n_estimators=1, bootstrap=True, oob_score=True, random_state=0).fit, X_train, y_train) def test_single_estimator(): # Check singleton ensembles. X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50}, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf1 = BalancedBaggingClassifier( base_estimator=KNeighborsClassifier(), n_estimators=1, bootstrap=False, bootstrap_features=False, random_state=0).fit(X_train, y_train) clf2 = make_pipeline(RandomUnderSampler( random_state=clf1.estimators_[0].steps[0][1].random_state), KNeighborsClassifier()).fit(X_train, y_train) assert_array_equal(clf1.predict(X_test), clf2.predict(X_test)) def test_error(): # Test that it gives proper exception on deficient input. X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50}) base = DecisionTreeClassifier() # Test n_estimators assert_raises(ValueError, BalancedBaggingClassifier(base, n_estimators=1.5).fit, X, y) assert_raises(ValueError, BalancedBaggingClassifier(base, n_estimators=-1).fit, X, y) # Test max_samples assert_raises(ValueError, BalancedBaggingClassifier(base, max_samples=-1).fit, X, y) assert_raises(ValueError, BalancedBaggingClassifier(base, max_samples=0.0).fit, X, y) assert_raises(ValueError, BalancedBaggingClassifier(base, max_samples=2.0).fit, X, y) assert_raises(ValueError, BalancedBaggingClassifier(base, max_samples=1000).fit, X, y) assert_raises(ValueError, BalancedBaggingClassifier(base, max_samples="foobar").fit, X, y) # Test max_features assert_raises(ValueError, BalancedBaggingClassifier(base, max_features=-1).fit, X, y) assert_raises(ValueError, BalancedBaggingClassifier(base, max_features=0.0).fit, X, y) assert_raises(ValueError, BalancedBaggingClassifier(base, max_features=2.0).fit, X, y) assert_raises(ValueError, BalancedBaggingClassifier(base, max_features=5).fit, X, y) assert_raises(ValueError, BalancedBaggingClassifier(base, max_features="foobar").fit, X, y) # Test support of decision_function assert not (hasattr(BalancedBaggingClassifier(base).fit(X, y), 'decision_function')) def test_gridsearch(): # Check that bagging ensembles can be grid-searched. # Transform iris into a binary classification task X, y = iris.data, iris.target.copy() y[y == 2] = 1 # Grid search with scoring based on decision_function parameters = {'n_estimators': (1, 2), 'base_estimator__C': (1, 2)} GridSearchCV(BalancedBaggingClassifier(SVC()), parameters, scoring="roc_auc").fit(X, y) def test_base_estimator(): # Check base_estimator and its default values. X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50}, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) ensemble = BalancedBaggingClassifier(None, n_jobs=3, random_state=0).fit(X_train, y_train) assert isinstance(ensemble.base_estimator_.steps[-1][1], DecisionTreeClassifier) ensemble = BalancedBaggingClassifier(DecisionTreeClassifier(), n_jobs=3, random_state=0).fit(X_train, y_train) assert isinstance(ensemble.base_estimator_.steps[-1][1], DecisionTreeClassifier) ensemble = BalancedBaggingClassifier(Perceptron(), n_jobs=3, random_state=0).fit(X_train, y_train) assert isinstance(ensemble.base_estimator_.steps[-1][1], Perceptron) def test_bagging_with_pipeline(): X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50}, random_state=0) estimator = BalancedBaggingClassifier( make_pipeline(SelectKBest(k=1), DecisionTreeClassifier()), max_features=2) estimator.fit(X, y).predict(X) def test_warm_start(random_state=42): # Test if fitting incrementally with warm start gives a forest of the # right size and the same results as a normal fit. X, y = make_hastie_10_2(n_samples=20, random_state=1) clf_ws = None for n_estimators in [5, 10]: if clf_ws is None: clf_ws = BalancedBaggingClassifier(n_estimators=n_estimators, random_state=random_state, warm_start=True) else: clf_ws.set_params(n_estimators=n_estimators) clf_ws.fit(X, y) assert len(clf_ws) == n_estimators clf_no_ws = BalancedBaggingClassifier(n_estimators=10, random_state=random_state, warm_start=False) clf_no_ws.fit(X, y) assert (set([pipe.steps[-1][1].random_state for pipe in clf_ws]) == set([pipe.steps[-1][1].random_state for pipe in clf_no_ws])) def test_warm_start_smaller_n_estimators(): # Test if warm start'ed second fit with smaller n_estimators raises error. X, y = make_hastie_10_2(n_samples=20, random_state=1) clf = BalancedBaggingClassifier(n_estimators=5, warm_start=True) clf.fit(X, y) clf.set_params(n_estimators=4) assert_raises(ValueError, clf.fit, X, y) def test_warm_start_equal_n_estimators(): # Test that nothing happens when fitting without increasing n_estimators X, y = make_hastie_10_2(n_samples=20, random_state=1) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43) clf = BalancedBaggingClassifier(n_estimators=5, warm_start=True, random_state=83) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # modify X to nonsense values, this should not change anything X_train += 1. assert_warns_message(UserWarning, "Warm-start fitting without increasing n_estimators" " does not", clf.fit, X_train, y_train) assert_array_equal(y_pred, clf.predict(X_test)) def test_warm_start_equivalence(): # warm started classifier with 5+5 estimators should be equivalent to # one classifier with 10 estimators X, y = make_hastie_10_2(n_samples=20, random_state=1) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43) clf_ws = BalancedBaggingClassifier(n_estimators=5, warm_start=True, random_state=3141) clf_ws.fit(X_train, y_train) clf_ws.set_params(n_estimators=10) clf_ws.fit(X_train, y_train) y1 = clf_ws.predict(X_test) clf = BalancedBaggingClassifier(n_estimators=10, warm_start=False, random_state=3141) clf.fit(X_train, y_train) y2 = clf.predict(X_test) assert_array_almost_equal(y1, y2) def test_warm_start_with_oob_score_fails(): # Check using oob_score and warm_start simultaneously fails X, y = make_hastie_10_2(n_samples=20, random_state=1) clf = BalancedBaggingClassifier(n_estimators=5, warm_start=True, oob_score=True) assert_raises(ValueError, clf.fit, X, y) def test_oob_score_removed_on_warm_start(): X, y = make_hastie_10_2(n_samples=2000, random_state=1) clf = BalancedBaggingClassifier(n_estimators=50, oob_score=True) clf.fit(X, y) clf.set_params(warm_start=True, oob_score=False, n_estimators=100) clf.fit(X, y) assert_raises(AttributeError, getattr, clf, "oob_score_") def test_oob_score_consistency(): # Make sure OOB scores are identical when random_state, estimator, and # training data are fixed and fitting is done twice X, y = make_hastie_10_2(n_samples=200, random_state=1) bagging = BalancedBaggingClassifier(KNeighborsClassifier(), max_samples=0.5, max_features=0.5, oob_score=True, random_state=1) assert bagging.fit(X, y).oob_score_ == bagging.fit(X, y).oob_score_ # FIXME: uncomment when #9723 is merged in scikit-learn # def test_estimators_samples(): # # Check that format of estimators_samples_ is correct and that results # # generated at fit time can be identically reproduced at a later time # # using data saved in object attributes. # X, y = make_hastie_10_2(n_samples=200, random_state=1) # # remap the y outside of the BalancedBaggingclassifier # # _, y = np.unique(y, return_inverse=True) # bagging = BalancedBaggingClassifier(LogisticRegression(), # max_samples=0.5, # max_features=0.5, random_state=1, # bootstrap=False) # bagging.fit(X, y) # # Get relevant attributes # estimators_samples = bagging.estimators_samples_ # estimators_features = bagging.estimators_features_ # estimators = bagging.estimators_ # # Test for correct formatting # assert len(estimators_samples) == len(estimators) # assert len(estimators_samples[0]) == len(X) # assert estimators_samples[0].dtype.kind == 'b' # # Re-fit single estimator to test for consistent sampling # estimator_index = 0 # estimator_samples = estimators_samples[estimator_index] # estimator_features = estimators_features[estimator_index] # estimator = estimators[estimator_index] # X_train = (X[estimator_samples])[:, estimator_features] # y_train = y[estimator_samples] # orig_coefs = estimator.steps[-1][1].coef_ # estimator.fit(X_train, y_train) # new_coefs = estimator.steps[-1][1].coef_ # assert_array_almost_equal(orig_coefs, new_coefs) def test_max_samples_consistency(): # Make sure validated max_samples and original max_samples are identical # when valid integer max_samples supplied by user max_samples = 100 X, y = make_hastie_10_2(n_samples=2*max_samples, random_state=1) bagging = BalancedBaggingClassifier(KNeighborsClassifier(), max_samples=max_samples, max_features=0.5, random_state=1) bagging.fit(X, y) assert bagging._max_samples == max_samples
mit
2,519,711,170,101,688,300
38.693157
78
0.58395
false
iddqd1/django-cms
cms/models/titlemodels.py
50
5426
# -*- coding: utf-8 -*- from datetime import timedelta from django.db import models from django.utils import timezone from django.utils.encoding import python_2_unicode_compatible from django.utils.translation import ugettext_lazy as _ from cms.constants import PUBLISHER_STATE_DIRTY from cms.models.managers import TitleManager from cms.models.pagemodel import Page from cms.utils.helpers import reversion_register @python_2_unicode_compatible class Title(models.Model): language = models.CharField(_("language"), max_length=15, db_index=True) title = models.CharField(_("title"), max_length=255) page_title = models.CharField(_("title"), max_length=255, blank=True, null=True, help_text=_("overwrite the title (html title tag)")) menu_title = models.CharField(_("title"), max_length=255, blank=True, null=True, help_text=_("overwrite the title in the menu")) meta_description = models.TextField(_("description"), max_length=155, blank=True, null=True, help_text=_("The text displayed in search engines.")) slug = models.SlugField(_("slug"), max_length=255, db_index=True, unique=False) path = models.CharField(_("Path"), max_length=255, db_index=True) has_url_overwrite = models.BooleanField(_("has url overwrite"), default=False, db_index=True, editable=False) redirect = models.CharField(_("redirect"), max_length=2048, blank=True, null=True) page = models.ForeignKey(Page, verbose_name=_("page"), related_name="title_set") creation_date = models.DateTimeField(_("creation date"), editable=False, default=timezone.now) # Publisher fields published = models.BooleanField(_("is published"), blank=True, default=False) publisher_is_draft = models.BooleanField(default=True, editable=False, db_index=True) # This is misnamed - the one-to-one relation is populated on both ends publisher_public = models.OneToOneField('self', related_name='publisher_draft', null=True, editable=False) publisher_state = models.SmallIntegerField(default=0, editable=False, db_index=True) objects = TitleManager() class Meta: unique_together = (('language', 'page'),) app_label = 'cms' def __str__(self): return u"%s (%s, %s)" % (self.title, self.slug, self.language) def update_path(self): # Build path from parent page's path and slug slug = u'%s' % self.slug if not self.has_url_overwrite: self.path = u'%s' % slug if self.page.parent_id: parent_page = self.page.parent_id parent_title = Title.objects.get_title(parent_page, language=self.language, language_fallback=True) if parent_title: self.path = u'%s/%s' % (parent_title.path, slug) @property def overwrite_url(self): """Return overwritten url, or None """ if self.has_url_overwrite: return self.path return None def is_dirty(self): return self.publisher_state == PUBLISHER_STATE_DIRTY def save_base(self, *args, **kwargs): """Overridden save_base. If an instance is draft, and was changed, mark it as dirty. Dirty flag is used for changed nodes identification when publish method takes place. After current changes are published, state is set back to PUBLISHER_STATE_DEFAULT (in publish method). """ keep_state = getattr(self, '_publisher_keep_state', None) # Published pages should always have a publication date # if the page is published we set the publish date if not set yet. if self.page.publication_date is None and self.published: self.page.publication_date = timezone.now() - timedelta(seconds=5) if self.publisher_is_draft and not keep_state and self.is_new_dirty(): self.publisher_state = PUBLISHER_STATE_DIRTY if keep_state: delattr(self, '_publisher_keep_state') ret = super(Title, self).save_base(*args, **kwargs) return ret def is_new_dirty(self): if self.pk: fields = [ 'title', 'page_title', 'menu_title', 'meta_description', 'slug', 'has_url_overwrite', 'redirect' ] try: old_title = Title.objects.get(pk=self.pk) except Title.DoesNotExist: return True for field in fields: old_val = getattr(old_title, field) new_val = getattr(self, field) if not old_val == new_val: return True return False return True class EmptyTitle(object): def __init__(self, language): self.language = language """Empty title object, can be returned from Page.get_title_obj() if required title object doesn't exists. """ title = "" slug = "" path = "" meta_description = "" redirect = "" has_url_overwrite = False application_urls = "" menu_title = "" page_title = "" published = False @property def overwrite_url(self): return None def _reversion(): exclude_fields = ['publisher_is_draft', 'publisher_public', 'publisher_state'] reversion_register( Title, exclude_fields=exclude_fields ) _reversion()
bsd-3-clause
-3,959,758,635,465,392,000
36.427586
115
0.623111
false
savfod/ptset
ptset.py
1
14571
#!/usr/bin/python3 DESCRIPTION = ''' Program for ptset drawing. Some examples of usage: python ptset.py -h python ptset.py --curve 9 --draw_points A,0.199,True B,0.412,True python ptset.py --curve 8 --draw_points A,-0.36,True X1,-0.26 B,0,True X2,0.26 C,0.36,True python ptset.py --curve 7 --tangent_curve python ptset.py --curve 4 --points_count 50 ''' from vec import Vec from line import Line from curve import Curve from drawer import Drawer import argparse import math import random import sys RADIUS = 2 RAD_ADD = 3 COUNT = None # getting from params START_X = -1 FINISH_X = 1 FILL1 = "green" FILL2 = "blue" FILL3 = "red" drawer = None # useful for debugging DRAW_POINTS = [] # getting from params POINTS_MULTIPLIER = 1000 def init_tk_drawer(): global drawer drawer = Drawer() return drawer.tk, drawer def vertical_line(x): return Line(Vec(x, 0), Vec(0, 1)) class Interface: def __init__(self, drawer, function, tangent_function=None): self.drawer = drawer self.function = function self.tangent_function = tangent_function self.prev_points = {"tang_point":None, "tang_pair_point":None} self.current_index = 0 self.to_remove = [] points = [] for i in range(COUNT): x = START_X + (FINISH_X-START_X)*i/float(COUNT-1) y = function(x) points.append(Vec(x,y)) self.curve = Curve(points, closed=False) self.drawer.draw_curve(self.curve, fill=FILL1) semiplane = [] HOR_COUNT = COUNT * 2 def i_to_x(i): return START_X + (FINISH_X-START_X)*i/float(HOR_COUNT-1) def j_to_x(j): return START_X + (FINISH_X-START_X)*j/float(VER_COUNT-1) VER_COUNT = COUNT * 2 for j in range(VER_COUNT): semiplane.append([]) px = j_to_x(j) px_line = vertical_line(px) for i in range(HOR_COUNT): tx = i_to_x(i) ty = function(tx) T = Vec(tx, ty) dx = 0.001 der = (function(tx + dx) - function(tx - dx))/(2 * dx) tangent = Line(T, Vec(1, der)) t_value = px_line.intersect(tangent).y t_value_2 = ty + der * (px - tx) # print(t_value, t_value_2) semiplane_value = self.function(px) < t_value semiplane[-1].append(semiplane_value) # self.drawer.draw_circle(Vec(px,tx), r=1, img_index=1) #draw edges def draw_edge(i1, i2, j1, j2): def to_vec(i, j): return Vec(i_to_x(i), j_to_x(j)) self.drawer.draw_line(to_vec(i1, j1), to_vec(i2, j2), fill=FILL2, img_index=2, width=2) self.drawer.draw_line(to_vec(i1, j1), to_vec(i2, j2), fill=FILL2, img_index=3, width=2) for i in range(VER_COUNT - 1): for j in range(HOR_COUNT - 1): four_value = ( semiplane[i][j], semiplane[i+1][j], semiplane[i][j+1], semiplane[i+1][j+1] ) #horizontal_edges if four_value == (True, True, False, False): draw_edge(i, i+1, j, j) elif four_value == (False, False, True, True): draw_edge(i, i+1, j+1, j+1) #vertical_edges elif four_value == (True, False, True, False): draw_edge(i, i, j, j+1) elif four_value == (False, True, False, True): draw_edge(i+1, i+1, j, j+1) #diagonal_edge else: d1 = four_value[0], four_value[3] d2 = four_value[1], four_value[2] if d1 == (True, True) and False in d2: draw_edge(i, i+1, j, j+1) elif d2 == (True, True) and False in d1: draw_edge(i, i+1, j+1, j) DIAG_COUNT = COUNT // 5 def diag_x(i): return START_X + (FINISH_X-START_X)*i/float(DIAG_COUNT-1) for i in range(DIAG_COUNT): x1 = diag_x(i) x2 = diag_x(i+1) self.drawer.draw_line(Vec(x1, x1), Vec(x2, x2), width=3, fill=FILL1, img_index=2) self.drawer.draw_line(Vec(x1, x1), Vec(x2, x2), width=3, fill=FILL1, img_index=3) self.points = [] self.is_drawing = True self.selected_point = None self.tangent_points = self.calc_tangent_points(function) # for x in [-0.65, -0.45, -0.25, -0.05]: # x -= 0.02 # self.drawer.draw_line(Vec(x, 0.2), Vec(x, 0.4), img_index=2, fill=FILL3, width=1) def calc_tangent_points(self, function): DIFF = 0.1 max_skip = (FINISH_X - START_X)*3 / float(POINTS_MULTIPLIER*COUNT) average_skip = (FINISH_X - START_X) / float(POINTS_MULTIPLIER*COUNT) min_skip = (FINISH_X - START_X) / float(5*POINTS_MULTIPLIER*COUNT) points = [START_X] while points[-1] < FINISH_X: x = points[-1] der2 = (function(x - DIFF) + function(x + DIFF) - 2*function(x)) / DIFF**2 skip = 100 * average_skip / (abs(der2)**2 + 0.00001) # if min_skip < skip < max_skip: # print ("Success") #DEBUG. TO CALC GOOD COEFFICIENT # else: # if min_skip < skip: # print("Small") # else: # print("Big") skip = min(skip, max_skip) skip = max(min_skip, skip) points.append(x + skip) return points def draw_point(self, x, label, with_tangent=False): l = vertical_line(x) points = self.curve.find_intersections(l) p = points[0] tangents = self.curve.find_intersections_tangents(l) t = tangents[0] self.drawer.draw_circle(p, fill=FILL2, label=label) if with_tangent: self.drawer.draw_line(t.start - t.vec*(10/abs(t.vec)), t.start + t.vec*(10/abs(t.vec)), dash=[8,4]) self.drawer.draw_line(Vec(p.x, p.x) - Vec(10,0), Vec(p.x, p.x) + Vec(10,0) , img_index=2, dash=[8,4]) self.drawer.draw_circle(Vec(p.x, p.x), fill=FILL2, img_index=2, label=label) def draw_pic(self): self.is_drawing = True self.drawer.tk.after(10, self.draw_pic_iteration) def parse_str(s): try: parts = s.strip().split(",") if len(parts) == 2: parts = parts + [""] # bool("") == False return parts[0].strip(), float(parts[1]), bool(parts[2]) except: raise ValueError('Not expected point params. Expected string in format x_coordinate,label[,draw_tangent]. E.g. "A,0" or "B,-0.5,True")') if DRAW_POINTS: for s in DRAW_POINTS: label, x, with_tangent = parse_str(s) self.draw_point(x, label, with_tangent) def image2(self, vec): return Vec(vec.x + 2, vec.y) def draw_pic_iteration(self): self.drawer.remove_tmp() if self.current_index + 1 < len(self.tangent_points): self.current_index += 1 else: self.current_index = 0 for k in self.prev_points.keys(): self.prev_points[k] = None i = self.current_index skip = self.tangent_points[i+1] - self.tangent_points[i] if i+1 < len(self.tangent_points) else self.tangent_points[i] - self.tangent_points[i-1] x = self.tangent_points[i] + random.random()*skip # print("iteration, x=", x) l = vertical_line(x) self.drawer.draw_line(Vec(START_X,x), Vec(FINISH_X,x), tmp_object=True, img_index=2) tangents = self.curve.find_intersections_tangents(l) points = self.curve.find_intersections(l) if len(tangents) == 1: self.drawer.draw_line(tangents[0].start - tangents[0].vec*(10/abs(tangents[0].vec)), tangents[0].start + tangents[0].vec*(10/abs(tangents[0].vec)), tmp_object=True) self.drawer.draw_circle(points[0], r=RAD_ADD+RADIUS, fill=FILL1, tmp_object=True) points = self.curve.find_intersections(tangents[0]) for (ind,p) in enumerate(points): self.drawer.draw_circle(p, r=RAD_ADD+ind+RADIUS, fill=FILL2, tmp_object=True) # self.drawer.draw_circle(Vec(p.x, x), img_index=2) # self.drawer.draw_circle(Vec(p.x, x), img_index=3) self.drawer.draw_circle(Vec(p.x, x), r=(RAD_ADD+ind)+RADIUS, fill=FILL2, img_index=2, tmp_object=True) self.drawer.draw_circle(Vec(p.x, x), r=(RAD_ADD+ind)+RADIUS, fill=FILL2, img_index=3, tmp_object=True) if self.tangent_function: l2 = vertical_line(self.tangent_function(x)) tang_p = tangents[0].intersect(l2) self.drawer.draw_circle(Vec(tang_p.x, x), r=2*RADIUS, fill=FILL3, img_index=2, tmp_object=True) self.drawer.draw_circle(Vec(tang_p.x, x), r=2*RADIUS, fill=FILL3, img_index=3, tmp_object=True) #self.drawer.draw_circle(Vec(tang_p.x, x), r=RADIUS//2, fill=FILL3, img_index=2) if self.prev_points["tang_pair_point"]: self.drawer.draw_line(self.prev_points["tang_pair_point"], Vec(tang_p.x, x), fill=FILL3, img_index=2) self.prev_points["tang_pair_point"] = Vec(tang_p.x, x) self.drawer.draw_circle(tang_p, r=2*RADIUS, fill=FILL3, tmp_object=True) # self.drawer.draw_circle(tang_p, r=RADIUS//2, fill=FILL3) if self.prev_points["tang_point"]: self.drawer.draw_line(self.prev_points["tang_point"], tang_p, fill=FILL3) self.prev_points["tang_point"] = Vec(tang_p.x, tang_p.y) else: #print(x, len(tangents), len(points)) pass self.drawer.draw_circle(Vec(x,x), r=RAD_ADD+RADIUS, fill=FILL1, img_index=2, tmp_object=True) self.drawer.draw_circle(Vec(x,x), r=RAD_ADD+RADIUS, fill=FILL1, img_index=3, tmp_object=True) # self.drawer.draw_circle(Vec(x,x), fill=FILL1, img_index=2) # self.drawer.draw_circle(Vec(x,x), fill=FILL1, img_index=3) if self.is_drawing: self.drawer.tk.after(10, self.draw_pic_iteration) # for v in self.drawer.canvases.values(): # v.update_idletasks() def start_drawing(self, event): self.is_drawing = True self.draw_pic() # self.add_point(event.x, event.y) def stop_drawing(self, event): self.is_drawing = False def remove_tmp(self): self.is_drawing = False self.drawer.remove_tmp() def zoom(self, event): print("Hello windows/macos! Not-tested scaling.") self.drawer.scale(1.1 ** event.delta, event.x, event.y) def zoom_in(self, event): self.drawer.scale(1.1, event.x, event.y) def zoom_out(self, event): self.drawer.scale(1.1 ** (-1), event.x, event.y) def parse_args(): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=DESCRIPTION, ) # parser.add_argument('--rounds', type=int, default=2, help='how many rounds each pair plays') parser.add_argument('--curve', type=int, default="9", help='curve funciton index') parser.add_argument('--points_multiplier', type=int, default="2", help='how many points to use') parser.add_argument('--tangent_curve', action="store_true", help='draw tangent curve') parser.add_argument('--points_count', type=int, default=180, help='how many points to use (more points is slower)') # parser.add_argument('--cyclic', action="store_true", default="False", help='draw tangent curve') # parser.add_argument('--draw_points', action="store_true", default=False, help='draw selected points') parser.add_argument('--draw_points', nargs="+", help='draw selected points. format: x_coordinate,label[,draw_tangent]') parsed_args = parser.parse_args() global POINTS_MULTIPLIER POINTS_MULTIPLIER = parsed_args.points_multiplier global DRAW_POINTS DRAW_POINTS = parsed_args.draw_points global COUNT COUNT = parsed_args.points_count return parsed_args def func(x, ind=9): # several types of prepared functions x *= 2 if ind == 1: return (x**6 - 5*x**4 + 6*x**2 - 1)/2 elif ind == 2: return (x**6 - 5*x**4 + 6*x**2 - 1)/2/(1 + (2*x)**8) elif ind == 3: return (128*x**8 - 256*x**6 + 160*x**4 - 32*x**2 + 1) elif ind == 4: return (128*x**8 - 256*x**6 + 160*x**4 - 32*x**2 + 1)/(1 + 128*x**12) elif ind == 5: return (x**6 - 5*x**4 + 6*x**2 - 1)/2 elif ind == 6: x = 1.3*x return (15*x**5 - 29*x**3 + 7*x)/(3 + 30*x**10) + 0.01 elif ind == 7: return (x**3 - x) / (10*x**4 + 1) elif ind == 8: return (x) / (10*x**6 + 1) + 0.01 elif ind == 9: # special curve with isolated closed curves in ptset x *= 10 x += 2 x1 = x + 8 x2 = x - 8 x3 = x2 + 3.5 res = 1/(0.01*x1**6 + 0.03*x1**2 + 0.8) \ - 1/(0.01*x2**6 - 0.01*(x3)**2 + 0.8) \ - 0.04 return res / 2 elif ind == 10: x = 2*x return (x)/(0.1*x**6 + 0.8) - x/(10*x**2 + 1) + 0.01 else: raise ValueError("no function with such index") def main(): args = parse_args() tk, drawer = init_tk_drawer() def function(x): return func(x, args.curve) tang_func = (lambda x: x+2/(100*x**2 + 4)) if args.tangent_curve else None interface = Interface(drawer, function, tang_func) # interface.is_drawing = args.cyclic tk.bind("<Button-1>", interface.start_drawing) # tk.bind("<ButtonRelease-1>", interface.stop_drawing) # tk.bind("<Motion>", interface.draw) tk.bind("<ButtonRelease-2>", interface.stop_drawing) tk.bind("<ButtonRelease-3>", interface.stop_drawing) tk.bind("<MouseWheel>", interface.zoom) tk.bind("<Button-4>", interface.zoom_in) tk.bind("<Button-5>", interface.zoom_out) # tk.focus_set() #comment this line for image without (with pale) edge tk.bind("<Escape>", lambda x: interface.remove_tmp()) tk.after(100, lambda: interface.start_drawing(None)) tk.mainloop() if __name__ == "__main__": main()
mit
-2,547,403,635,658,296,000
33.858852
176
0.547732
false
nwchandler/ansible
lib/ansible/cli/__init__.py
15
33623
# (c) 2012-2014, Michael DeHaan <[email protected]> # (c) 2016, Toshio Kuratomi <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import getpass import operator import optparse import os import subprocess import re import sys import time import yaml from abc import ABCMeta, abstractmethod import ansible from ansible import constants as C from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.inventory.manager import InventoryManager from ansible.module_utils.six import with_metaclass, string_types from ansible.module_utils._text import to_bytes, to_text from ansible.parsing.dataloader import DataLoader from ansible.release import __version__ from ansible.utils.path import unfrackpath from ansible.utils.vars import load_extra_vars, load_options_vars from ansible.vars.manager import VariableManager try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class SortedOptParser(optparse.OptionParser): '''Optparser which sorts the options by opt before outputting --help''' def format_help(self, formatter=None, epilog=None): self.option_list.sort(key=operator.methodcaller('get_opt_string')) return optparse.OptionParser.format_help(self, formatter=None) # Note: Inherit from SortedOptParser so that we get our format_help method class InvalidOptsParser(SortedOptParser): '''Ignore invalid options. Meant for the special case where we need to take care of help and version but may not know the full range of options yet. (See it in use in set_action) ''' def __init__(self, parser): # Since this is special purposed to just handle help and version, we # take a pre-existing option parser here and set our options from # that. This allows us to give accurate help based on the given # option parser. SortedOptParser.__init__(self, usage=parser.usage, option_list=parser.option_list, option_class=parser.option_class, conflict_handler=parser.conflict_handler, description=parser.description, formatter=parser.formatter, add_help_option=False, prog=parser.prog, epilog=parser.epilog) self.version = parser.version def _process_long_opt(self, rargs, values): try: optparse.OptionParser._process_long_opt(self, rargs, values) except optparse.BadOptionError: pass def _process_short_opts(self, rargs, values): try: optparse.OptionParser._process_short_opts(self, rargs, values) except optparse.BadOptionError: pass class CLI(with_metaclass(ABCMeta, object)): ''' code behind bin/ansible* programs ''' VALID_ACTIONS = [] _ITALIC = re.compile(r"I\(([^)]+)\)") _BOLD = re.compile(r"B\(([^)]+)\)") _MODULE = re.compile(r"M\(([^)]+)\)") _URL = re.compile(r"U\(([^)]+)\)") _CONST = re.compile(r"C\(([^)]+)\)") PAGER = 'less' # -F (quit-if-one-screen) -R (allow raw ansi control chars) # -S (chop long lines) -X (disable termcap init and de-init) LESS_OPTS = 'FRSX' def __init__(self, args, callback=None): """ Base init method for all command line programs """ self.args = args self.options = None self.parser = None self.action = None self.callback = callback def set_action(self): """ Get the action the user wants to execute from the sys argv list. """ for i in range(0, len(self.args)): arg = self.args[i] if arg in self.VALID_ACTIONS: self.action = arg del self.args[i] break if not self.action: # if we're asked for help or version, we don't need an action. # have to use a special purpose Option Parser to figure that out as # the standard OptionParser throws an error for unknown options and # without knowing action, we only know of a subset of the options # that could be legal for this command tmp_parser = InvalidOptsParser(self.parser) tmp_options, tmp_args = tmp_parser.parse_args(self.args) if not(hasattr(tmp_options, 'help') and tmp_options.help) or (hasattr(tmp_options, 'version') and tmp_options.version): raise AnsibleOptionsError("Missing required action") def execute(self): """ Actually runs a child defined method using the execute_<action> pattern """ fn = getattr(self, "execute_%s" % self.action) fn() @abstractmethod def run(self): """Run the ansible command Subclasses must implement this method. It does the actual work of running an Ansible command. """ display.vv(self.parser.get_version()) if C.CONFIG_FILE: display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE)) else: display.v(u"No config file found; using defaults") @staticmethod def ask_vault_passwords(): ''' prompt for vault password and/or password change ''' vault_pass = None try: vault_pass = getpass.getpass(prompt="Vault password: ") except EOFError: pass # enforce no newline chars at the end of passwords if vault_pass: vault_pass = to_bytes(vault_pass, errors='surrogate_or_strict', nonstring='simplerepr').strip() return vault_pass @staticmethod def ask_new_vault_passwords(): new_vault_pass = None try: new_vault_pass = getpass.getpass(prompt="New Vault password: ") new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ") if new_vault_pass != new_vault_pass2: raise AnsibleError("Passwords do not match") except EOFError: pass if new_vault_pass: new_vault_pass = to_bytes(new_vault_pass, errors='surrogate_or_strict', nonstring='simplerepr').strip() return new_vault_pass def ask_passwords(self): ''' prompt for connection and become passwords if needed ''' op = self.options sshpass = None becomepass = None become_prompt = '' try: if op.ask_pass: sshpass = getpass.getpass(prompt="SSH password: ") become_prompt = "%s password[defaults to SSH password]: " % op.become_method.upper() if sshpass: sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr') else: become_prompt = "%s password: " % op.become_method.upper() if op.become_ask_pass: becomepass = getpass.getpass(prompt=become_prompt) if op.ask_pass and becomepass == '': becomepass = sshpass if becomepass: becomepass = to_bytes(becomepass) except EOFError: pass return (sshpass, becomepass) def normalize_become_options(self): ''' this keeps backwards compatibility with sudo/su self.options ''' self.options.become_ask_pass = self.options.become_ask_pass or self.options.ask_sudo_pass or self.options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS self.options.become_user = self.options.become_user or self.options.sudo_user or self.options.su_user or C.DEFAULT_BECOME_USER def _dep(which): display.deprecated('The %s command line option has been deprecated in favor of the "become" command line arguments' % which, '2.6') if self.options.become: pass elif self.options.sudo: self.options.become = True self.options.become_method = 'sudo' _dep('sudo') elif self.options.su: self.options.become = True self.options.become_method = 'su' _dep('su') # other deprecations: if self.options.ask_sudo_pass or self.options.sudo_user: _dep('sudo') if self.options.ask_su_pass or self.options.su_user: _dep('su') def validate_conflicts(self, vault_opts=False, runas_opts=False, fork_opts=False): ''' check for conflicting options ''' op = self.options if vault_opts: # Check for vault related conflicts if (op.ask_vault_pass and op.vault_password_file): self.parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive") if runas_opts: # Check for privilege escalation conflicts if ((op.su or op.su_user) and (op.sudo or op.sudo_user) or (op.su or op.su_user) and (op.become or op.become_user) or (op.sudo or op.sudo_user) and (op.become or op.become_user)): self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') and su arguments ('--su', '--su-user', and '--ask-su-pass') " "and become arguments ('--become', '--become-user', and '--ask-become-pass') are exclusive of each other") if fork_opts: if op.forks < 1: self.parser.error("The number of processes (--forks) must be >= 1") @staticmethod def unfrack_paths(option, opt, value, parser): if isinstance(value, string_types): setattr(parser.values, option.dest, [unfrackpath(x) for x in value.split(os.pathsep)]) elif isinstance(value, list): setattr(parser.values, option.dest, [unfrackpath(x) for x in value]) else: pass # FIXME: should we raise options error? @staticmethod def unfrack_path(option, opt, value, parser): setattr(parser.values, option.dest, unfrackpath(value)) @staticmethod def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, module_opts=False, async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False, runas_prompt_opts=False, desc=None): ''' create an options parser for most ansible scripts ''' # base opts parser = SortedOptParser(usage, version=CLI.version("%prog"), description=desc, epilog=epilog) parser.add_option('-v', '--verbose', dest='verbosity', default=C.DEFAULT_VERBOSITY, action="count", help="verbose mode (-vvv for more, -vvvv to enable connection debugging)") if inventory_opts: parser.add_option('-i', '--inventory', '--inventory-file', dest='inventory', action="append", help="specify inventory host path (default=[%s]) or comma separated host list. " "--inventory-file is deprecated" % C.DEFAULT_HOST_LIST) parser.add_option('--list-hosts', dest='listhosts', action='store_true', help='outputs a list of matching hosts; does not execute anything else') parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset', help='further limit selected hosts to an additional pattern') if module_opts: parser.add_option('-M', '--module-path', dest='module_path', default=None, help="prepend path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH, action="callback", callback=CLI.unfrack_path, type='str') if runtask_opts: parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", help="set additional variables as key=value or YAML/JSON, if filename prepend with @", default=[]) if fork_opts: parser.add_option('-f', '--forks', dest='forks', default=C.DEFAULT_FORKS, type='int', help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS) if vault_opts: parser.add_option('--ask-vault-pass', default=C.DEFAULT_ASK_VAULT_PASS, dest='ask_vault_pass', action='store_true', help='ask for vault password') parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE, dest='vault_password_file', help="vault password file", action="callback", callback=CLI.unfrack_path, type='string') parser.add_option('--new-vault-password-file', dest='new_vault_password_file', help="new vault password file for rekey", action="callback", callback=CLI.unfrack_path, type='string') parser.add_option('--output', default=None, dest='output_file', help='output file name for encrypt or decrypt; use - for stdout', action="callback", callback=CLI.unfrack_path, type='string') if subset_opts: parser.add_option('-t', '--tags', dest='tags', default=[], action='append', help="only run plays and tasks tagged with these values") parser.add_option('--skip-tags', dest='skip_tags', default=[], action='append', help="only run plays and tasks whose tags do not match these values") if output_opts: parser.add_option('-o', '--one-line', dest='one_line', action='store_true', help='condense output') parser.add_option('-t', '--tree', dest='tree', default=None, help='log output to this directory') if connect_opts: connect_group = optparse.OptionGroup(parser, "Connection Options", "control as whom and how to connect to hosts") connect_group.add_option('-k', '--ask-pass', default=C.DEFAULT_ASK_PASS, dest='ask_pass', action='store_true', help='ask for connection password') connect_group.add_option('--private-key', '--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file', help='use this file to authenticate the connection', action="callback", callback=CLI.unfrack_path, type='string') connect_group.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user', help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER) connect_group.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT, help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT) connect_group.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout', help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT) connect_group.add_option('--ssh-common-args', default='', dest='ssh_common_args', help="specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand)") connect_group.add_option('--sftp-extra-args', default='', dest='sftp_extra_args', help="specify extra arguments to pass to sftp only (e.g. -f, -l)") connect_group.add_option('--scp-extra-args', default='', dest='scp_extra_args', help="specify extra arguments to pass to scp only (e.g. -l)") connect_group.add_option('--ssh-extra-args', default='', dest='ssh_extra_args', help="specify extra arguments to pass to ssh only (e.g. -R)") parser.add_option_group(connect_group) runas_group = None rg = optparse.OptionGroup(parser, "Privilege Escalation Options", "control how and which user you become as on target hosts") if runas_opts: runas_group = rg # priv user defaults to root later on to enable detecting when this option was given here runas_group.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo', help="run operations with sudo (nopasswd) (deprecated, use become)") runas_group.add_option('-U', '--sudo-user', dest='sudo_user', default=None, help='desired sudo user (default=root) (deprecated, use become)') runas_group.add_option('-S', '--su', default=C.DEFAULT_SU, action='store_true', help='run operations with su (deprecated, use become)') runas_group.add_option('-R', '--su-user', default=None, help='run operations with su as this user (default=%s) (deprecated, use become)' % C.DEFAULT_SU_USER) # consolidated privilege escalation (become) runas_group.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become', help="run operations with become (does not imply password prompting)") runas_group.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD, type='choice', choices=C.BECOME_METHODS, help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS))) runas_group.add_option('--become-user', default=None, dest='become_user', type='string', help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER) if runas_opts or runas_prompt_opts: if not runas_group: runas_group = rg runas_group.add_option('--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true', help='ask for sudo password (deprecated, use become)') runas_group.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true', help='ask for su password (deprecated, use become)') runas_group.add_option('-K', '--ask-become-pass', default=False, dest='become_ask_pass', action='store_true', help='ask for privilege escalation password') if runas_group: parser.add_option_group(runas_group) if async_opts: parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int', dest='poll_interval', help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL) parser.add_option('-B', '--background', dest='seconds', type='int', default=0, help='run asynchronously, failing after X seconds (default=N/A)') if check_opts: parser.add_option("-C", "--check", default=False, dest='check', action='store_true', help="don't make any changes; instead, try to predict some of the changes that may occur") parser.add_option('--syntax-check', dest='syntax', action='store_true', help="perform a syntax check on the playbook, but do not execute it") parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true', help="when changing (small) files and templates, show the differences in those files; works great with --check") if meta_opts: parser.add_option('--force-handlers', default=C.DEFAULT_FORCE_HANDLERS, dest='force_handlers', action='store_true', help="run handlers even if a task fails") parser.add_option('--flush-cache', dest='flush_cache', action='store_true', help="clear the fact cache") return parser @abstractmethod def parse(self): """Parse the command line args This method parses the command line arguments. It uses the parser stored in the self.parser attribute and saves the args and options in self.args and self.options respectively. Subclasses need to implement this method. They will usually create a base_parser, add their own options to the base_parser, and then call this method to do the actual parsing. An implementation will look something like this:: def parse(self): parser = super(MyCLI, self).base_parser(usage="My Ansible CLI", inventory_opts=True) parser.add_option('--my-option', dest='my_option', action='store') self.parser = parser super(MyCLI, self).parse() # If some additional transformations are needed for the # arguments and options, do it here. """ self.options, self.args = self.parser.parse_args(self.args[1:]) # process tags if hasattr(self.options, 'tags') and not self.options.tags: # optparse defaults does not do what's expected self.options.tags = ['all'] if hasattr(self.options, 'tags') and self.options.tags: if not C.MERGE_MULTIPLE_CLI_TAGS: if len(self.options.tags) > 1: display.deprecated('Specifying --tags multiple times on the command line currently uses the last specified value. ' 'In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.', version=2.5, removed=False) self.options.tags = [self.options.tags[-1]] tags = set() for tag_set in self.options.tags: for tag in tag_set.split(u','): tags.add(tag.strip()) self.options.tags = list(tags) # process skip_tags if hasattr(self.options, 'skip_tags') and self.options.skip_tags: if not C.MERGE_MULTIPLE_CLI_TAGS: if len(self.options.skip_tags) > 1: display.deprecated('Specifying --skip-tags multiple times on the command line currently uses the last specified value. ' 'In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.', version=2.5, removed=False) self.options.skip_tags = [self.options.skip_tags[-1]] skip_tags = set() for tag_set in self.options.skip_tags: for tag in tag_set.split(u','): skip_tags.add(tag.strip()) self.options.skip_tags = list(skip_tags) # process inventory options if hasattr(self.options, 'inventory'): if self.options.inventory: # should always be list if isinstance(self.options.inventory, string_types): self.options.inventory = [self.options.inventory] # Ensure full paths when needed self.options.inventory = [unfrackpath(opt) if ',' not in opt else opt for opt in self.options.inventory] else: # set default if it exists if os.path.exists(C.DEFAULT_HOST_LIST): self.options.inventory = [C.DEFAULT_HOST_LIST] @staticmethod def version(prog): ''' return ansible version ''' result = "{0} {1}".format(prog, __version__) gitinfo = CLI._gitinfo() if gitinfo: result = result + " {0}".format(gitinfo) result += "\n config file = %s" % C.CONFIG_FILE if C.DEFAULT_MODULE_PATH is None: cpath = "Default w/o overrides" else: cpath = C.DEFAULT_MODULE_PATH result = result + "\n configured module search path = %s" % cpath result = result + "\n ansible python module location = %s" % ':'.join(ansible.__path__) result = result + "\n executable location = %s" % sys.argv[0] result = result + "\n python version = %s" % ''.join(sys.version.splitlines()) return result @staticmethod def version_info(gitinfo=False): ''' return full ansible version info ''' if gitinfo: # expensive call, user with care ansible_version_string = CLI.version('') else: ansible_version_string = __version__ ansible_version = ansible_version_string.split()[0] ansible_versions = ansible_version.split('.') for counter in range(len(ansible_versions)): if ansible_versions[counter] == "": ansible_versions[counter] = 0 try: ansible_versions[counter] = int(ansible_versions[counter]) except: pass if len(ansible_versions) < 3: for counter in range(len(ansible_versions), 3): ansible_versions.append(0) return {'string': ansible_version_string.strip(), 'full': ansible_version, 'major': ansible_versions[0], 'minor': ansible_versions[1], 'revision': ansible_versions[2]} @staticmethod def _git_repo_info(repo_path): ''' returns a string containing git branch, commit id and commit date ''' result = None if os.path.exists(repo_path): # Check if the .git is a file. If it is a file, it means that we are in a submodule structure. if os.path.isfile(repo_path): try: gitdir = yaml.safe_load(open(repo_path)).get('gitdir') # There is a possibility the .git file to have an absolute path. if os.path.isabs(gitdir): repo_path = gitdir else: repo_path = os.path.join(repo_path[:-4], gitdir) except (IOError, AttributeError): return '' f = open(os.path.join(repo_path, "HEAD")) line = f.readline().rstrip("\n") if line.startswith("ref:"): branch_path = os.path.join(repo_path, line[5:]) else: branch_path = None f.close() if branch_path and os.path.exists(branch_path): branch = '/'.join(line.split('/')[2:]) f = open(branch_path) commit = f.readline()[:10] f.close() else: # detached HEAD commit = line[:10] branch = 'detached HEAD' branch_path = os.path.join(repo_path, "HEAD") date = time.localtime(os.stat(branch_path).st_mtime) if time.daylight == 0: offset = time.timezone else: offset = time.altzone result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, time.strftime("%Y/%m/%d %H:%M:%S", date), int(offset / -36)) else: result = '' return result @staticmethod def _gitinfo(): basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..') repo_path = os.path.join(basedir, '.git') result = CLI._git_repo_info(repo_path) submodules = os.path.join(basedir, '.gitmodules') if not os.path.exists(submodules): return result f = open(submodules) for line in f: tokens = line.strip().split(' ') if tokens[0] == 'path': submodule_path = tokens[2] submodule_info = CLI._git_repo_info(os.path.join(basedir, submodule_path, '.git')) if not submodule_info: submodule_info = ' not found - use git submodule update --init ' + submodule_path result += "\n {0}: {1}".format(submodule_path, submodule_info) f.close() return result def pager(self, text): ''' find reasonable way to display text ''' # this is a much simpler form of what is in pydoc.py if not sys.stdout.isatty(): display.display(text, screen_only=True) elif 'PAGER' in os.environ: if sys.platform == 'win32': display.display(text, screen_only=True) else: self.pager_pipe(text, os.environ['PAGER']) else: p = subprocess.Popen('less --version', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.communicate() if p.returncode == 0: self.pager_pipe(text, 'less') else: display.display(text, screen_only=True) @staticmethod def pager_pipe(text, cmd): ''' pipe text through a pager ''' if 'LESS' not in os.environ: os.environ['LESS'] = CLI.LESS_OPTS try: cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout) cmd.communicate(input=to_bytes(text)) except IOError: pass except KeyboardInterrupt: pass @classmethod def tty_ify(cls, text): t = cls._ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word' t = cls._BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word* t = cls._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word] t = cls._URL.sub(r"\1", t) # U(word) => word t = cls._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word' return t @staticmethod def read_vault_password_file(vault_password_file, loader): """ Read a vault password from a file or if executable, execute the script and retrieve password from STDOUT """ this_path = os.path.realpath(os.path.expanduser(vault_password_file)) if not os.path.exists(this_path): raise AnsibleError("The vault password file %s was not found" % this_path) if loader.is_executable(this_path): try: # STDERR not captured to make it easier for users to prompt for input in their scripts p = subprocess.Popen(this_path, stdout=subprocess.PIPE) except OSError as e: raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, " "remove the executable bit from the file." % (' '.join(this_path), e)) stdout, stderr = p.communicate() if p.returncode != 0: raise AnsibleError("Vault password script %s returned non-zero (%s): %s" % (this_path, p.returncode, p.stderr)) vault_pass = stdout.strip(b'\r\n') else: try: f = open(this_path, "rb") vault_pass = f.read().strip() f.close() except (OSError, IOError) as e: raise AnsibleError("Could not read vault password file %s: %s" % (this_path, e)) return vault_pass @staticmethod def _play_prereqs(options): # all needs loader loader = DataLoader() # vault b_vault_pass = None if options.vault_password_file: # read vault_pass from a file b_vault_pass = CLI.read_vault_password_file(options.vault_password_file, loader=loader) elif options.ask_vault_pass: b_vault_pass = CLI.ask_vault_passwords() if b_vault_pass is not None: loader.set_vault_password(b_vault_pass) # create the inventory, and filter it based on the subset specified (if any) inventory = InventoryManager(loader=loader, sources=options.inventory) # create the variable manager, which will be shared throughout # the code, ensuring a consistent view of global variables variable_manager = VariableManager(loader=loader, inventory=inventory) # load vars from cli options variable_manager.extra_vars = load_extra_vars(loader=loader, options=options) variable_manager.options_vars = load_options_vars(options, CLI.version_info(gitinfo=False)) return loader, inventory, variable_manager
gpl-3.0
8,905,803,994,542,137,000
46.223315
160
0.574785
false
kaustubh-kabra/modified-xen
tools/python/logging/logging-0.4.9.2/test/log_test12.py
42
1951
#!/usr/bin/env python # # Copyright 2001-2002 by Vinay Sajip. All Rights Reserved. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose and without fee is hereby granted, # provided that the above copyright notice appear in all copies and that # both that copyright notice and this permission notice appear in # supporting documentation, and that the name of Vinay Sajip # not be used in advertising or publicity pertaining to distribution # of the software without specific, written prior permission. # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # # This file is part of the Python logging distribution. See # http://www.red-dove.com/python_logging.html # """ A test harness for the logging module. Tests HTTPHandler. Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved. """ import sys, string, logging, logging.handlers def main(): import pdb host = "localhost:%d" % logging.handlers.DEFAULT_HTTP_LOGGING_PORT gh = logging.handlers.HTTPHandler(host, '/log', 'GET') ph = logging.handlers.HTTPHandler(host, '/log', 'POST') logger = logging.getLogger("log_test12") logger.propagate = 0 logger.addHandler(gh) logger.addHandler(ph) logging.getLogger("").setLevel(logging.DEBUG) logger.info("Jackdaws love my big %s of %s", "sphinx", "quartz") logger.debug("Pack my %s with twelve dozen %s", "box", "liquor jugs") gh.close() ph.close() logger.removeHandler(gh) logger.removeHandler(ph) if __name__ == "__main__": main()
gpl-2.0
-3,919,620,158,034,893,300
40.510638
78
0.737058
false
markpudd/logistic_regression
logisticReg.py
1
1904
# Helper functions to do logistic regression # To use the logRegCost function needs to be minimised, use the logRegGrad method to provide derivative # import cv2 import numpy as np import scipy.io as sio import csv as csv from sklearn.preprocessing import normalize def featureNormalize(data): mu = data.mean(0) data_norm = data.__sub__(mu) sigma = np.std(data_norm, axis=0,ddof=1) data_norm = data_norm.__div__(sigma) return data_norm; def addFirstOnes(data): return np.concatenate((np.ones((np.size(data,0),1)),data),1) def sigmoid(z): return 1/(1+np.exp(-z)) def logRegGrad(theta, data_x, data_y, lamb): m = float(np.size(data_y)) theta=np.array([theta]).T temp = np.array(theta); temp[0] = 0; ha = data_x.dot(theta) h=sigmoid(ha); grad = 1/m * ((h-data_y).T.dot(data_x)).T; grad = grad + ((lamb/m)*temp); return grad.T[0] def logRegCost(theta, data_x, data_y, lamb): m = float(np.size(data_y)) theta=np.array([theta]).T ha = data_x.dot(theta) h=sigmoid(ha); J = 1/m *((-data_y.T.dot(np.log(h))-(1-data_y.T).dot(np.log(1-h)))); temp = np.array(theta); temp[0] = 0; # because we don't add anything for j = 0 J = J + (lamb/(2*m))*sum(np.power(temp,2)); return J[0,0] def predict(theta, data_x): n = np.size(data_x,1) theta=np.array([theta]).T ha = data_x.dot(theta) p=sigmoid(ha); for i in range(0,np.size(data_x,0)): if p[i]>=0.5: p[i]=1 else: p[i]=0 return p def testError(theta, data_x,data_y): m = float(np.size(data_y)) sum =0 p=predict(theta, data_x); for i in range(0,np.size(data_x,0)): if p[i,0]==1 and data_y[0,i]==0: sum = sum+1; elif p[i,0]==0 and data_y[0,i]==1: sum = sum+1; return 1/m * sum
mit
8,888,893,005,371,620,000
21.666667
103
0.56145
false
tinloaf/home-assistant
homeassistant/components/media_player/cast.py
3
24944
""" Provide functionality to interact with Cast devices on the network. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/media_player.cast/ """ import asyncio import logging import threading from typing import Optional, Tuple import attr import voluptuous as vol from homeassistant.components.cast import DOMAIN as CAST_DOMAIN from homeassistant.components.media_player import ( MEDIA_TYPE_MOVIE, MEDIA_TYPE_MUSIC, MEDIA_TYPE_TVSHOW, PLATFORM_SCHEMA, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, MediaPlayerDevice) from homeassistant.const import ( CONF_HOST, EVENT_HOMEASSISTANT_STOP, STATE_IDLE, STATE_OFF, STATE_PAUSED, STATE_PLAYING) from homeassistant.core import callback from homeassistant.exceptions import PlatformNotReady import homeassistant.helpers.config_validation as cv from homeassistant.helpers.dispatcher import ( async_dispatcher_connect, dispatcher_send) from homeassistant.helpers.typing import ConfigType, HomeAssistantType import homeassistant.util.dt as dt_util DEPENDENCIES = ('cast',) _LOGGER = logging.getLogger(__name__) CONF_IGNORE_CEC = 'ignore_cec' CAST_SPLASH = 'https://home-assistant.io/images/cast/splash.png' DEFAULT_PORT = 8009 SUPPORT_CAST = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \ SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PREVIOUS_TRACK | \ SUPPORT_NEXT_TRACK | SUPPORT_PLAY_MEDIA | SUPPORT_STOP | SUPPORT_PLAY # Stores a threading.Lock that is held by the internal pychromecast discovery. INTERNAL_DISCOVERY_RUNNING_KEY = 'cast_discovery_running' # Stores all ChromecastInfo we encountered through discovery or config as a set # If we find a chromecast with a new host, the old one will be removed again. KNOWN_CHROMECAST_INFO_KEY = 'cast_known_chromecasts' # Stores UUIDs of cast devices that were added as entities. Doesn't store # None UUIDs. ADDED_CAST_DEVICES_KEY = 'cast_added_cast_devices' # Dispatcher signal fired with a ChromecastInfo every time we discover a new # Chromecast or receive it through configuration SIGNAL_CAST_DISCOVERED = 'cast_discovered' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_HOST): cv.string, vol.Optional(CONF_IGNORE_CEC, default=[]): vol.All(cv.ensure_list, [cv.string]), }) @attr.s(slots=True, frozen=True) class ChromecastInfo: """Class to hold all data about a chromecast for creating connections. This also has the same attributes as the mDNS fields by zeroconf. """ host = attr.ib(type=str) port = attr.ib(type=int) uuid = attr.ib(type=Optional[str], converter=attr.converters.optional(str), default=None) # always convert UUID to string if not None manufacturer = attr.ib(type=str, default='') model_name = attr.ib(type=str, default='') friendly_name = attr.ib(type=Optional[str], default=None) @property def is_audio_group(self) -> bool: """Return if this is an audio group.""" return self.port != DEFAULT_PORT @property def is_information_complete(self) -> bool: """Return if all information is filled out.""" return all(attr.astuple(self)) @property def host_port(self) -> Tuple[str, int]: """Return the host+port tuple.""" return self.host, self.port def _fill_out_missing_chromecast_info(info: ChromecastInfo) -> ChromecastInfo: """Fill out missing attributes of ChromecastInfo using blocking HTTP.""" if info.is_information_complete or info.is_audio_group: # We have all information, no need to check HTTP API. Or this is an # audio group, so checking via HTTP won't give us any new information. return info # Fill out missing information via HTTP dial. from pychromecast import dial http_device_status = dial.get_device_status(info.host) if http_device_status is None: # HTTP dial didn't give us any new information. return info return ChromecastInfo( host=info.host, port=info.port, uuid=(info.uuid or http_device_status.uuid), friendly_name=(info.friendly_name or http_device_status.friendly_name), manufacturer=(info.manufacturer or http_device_status.manufacturer), model_name=(info.model_name or http_device_status.model_name) ) def _discover_chromecast(hass: HomeAssistantType, info: ChromecastInfo): if info in hass.data[KNOWN_CHROMECAST_INFO_KEY]: _LOGGER.debug("Discovered previous chromecast %s", info) return # Either discovered completely new chromecast or a "moved" one. info = _fill_out_missing_chromecast_info(info) _LOGGER.debug("Discovered chromecast %s", info) if info.uuid is not None: # Remove previous cast infos with same uuid from known chromecasts. same_uuid = set(x for x in hass.data[KNOWN_CHROMECAST_INFO_KEY] if info.uuid == x.uuid) hass.data[KNOWN_CHROMECAST_INFO_KEY] -= same_uuid hass.data[KNOWN_CHROMECAST_INFO_KEY].add(info) dispatcher_send(hass, SIGNAL_CAST_DISCOVERED, info) def _setup_internal_discovery(hass: HomeAssistantType) -> None: """Set up the pychromecast internal discovery.""" if INTERNAL_DISCOVERY_RUNNING_KEY not in hass.data: hass.data[INTERNAL_DISCOVERY_RUNNING_KEY] = threading.Lock() if not hass.data[INTERNAL_DISCOVERY_RUNNING_KEY].acquire(blocking=False): # Internal discovery is already running return import pychromecast def internal_callback(name): """Handle zeroconf discovery of a new chromecast.""" mdns = listener.services[name] _discover_chromecast(hass, ChromecastInfo( host=mdns[0], port=mdns[1], uuid=mdns[2], model_name=mdns[3], friendly_name=mdns[4], )) _LOGGER.debug("Starting internal pychromecast discovery.") listener, browser = pychromecast.start_discovery(internal_callback) def stop_discovery(event): """Stop discovery of new chromecasts.""" _LOGGER.debug("Stopping internal pychromecast discovery.") pychromecast.stop_discovery(browser) hass.data[INTERNAL_DISCOVERY_RUNNING_KEY].release() hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_discovery) @callback def _async_create_cast_device(hass: HomeAssistantType, info: ChromecastInfo): """Create a CastDevice Entity from the chromecast object. Returns None if the cast device has already been added. """ if info.uuid is None: # Found a cast without UUID, we don't store it because we won't be able # to update it anyway. return CastDevice(info) # Found a cast with UUID added_casts = hass.data[ADDED_CAST_DEVICES_KEY] if info.uuid in added_casts: # Already added this one, the entity will take care of moved hosts # itself return None # -> New cast device added_casts.add(info.uuid) return CastDevice(info) async def async_setup_platform(hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None): """Set up thet Cast platform. Deprecated. """ _LOGGER.warning( 'Setting configuration for Cast via platform is deprecated. ' 'Configure via Cast component instead.') await _async_setup_platform( hass, config, async_add_entities, discovery_info) async def async_setup_entry(hass, config_entry, async_add_entities): """Set up Cast from a config entry.""" config = hass.data[CAST_DOMAIN].get('media_player', {}) if not isinstance(config, list): config = [config] # no pending task done, _ = await asyncio.wait([ _async_setup_platform(hass, cfg, async_add_entities, None) for cfg in config]) if any([task.exception() for task in done]): raise PlatformNotReady async def _async_setup_platform(hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info): """Set up the cast platform.""" import pychromecast # Import CEC IGNORE attributes pychromecast.IGNORE_CEC += config.get(CONF_IGNORE_CEC, []) hass.data.setdefault(ADDED_CAST_DEVICES_KEY, set()) hass.data.setdefault(KNOWN_CHROMECAST_INFO_KEY, set()) info = None if discovery_info is not None: info = ChromecastInfo(host=discovery_info['host'], port=discovery_info['port']) elif CONF_HOST in config: info = ChromecastInfo(host=config[CONF_HOST], port=DEFAULT_PORT) @callback def async_cast_discovered(discover: ChromecastInfo) -> None: """Handle discovery of a new chromecast.""" if info is not None and info.host_port != discover.host_port: # Not our requested cast device. return cast_device = _async_create_cast_device(hass, discover) if cast_device is not None: async_add_entities([cast_device]) remove_handler = async_dispatcher_connect( hass, SIGNAL_CAST_DISCOVERED, async_cast_discovered) # Re-play the callback for all past chromecasts, store the objects in # a list to avoid concurrent modification resulting in exception. for chromecast in list(hass.data[KNOWN_CHROMECAST_INFO_KEY]): async_cast_discovered(chromecast) if info is None or info.is_audio_group: # If we were a) explicitly told to enable discovery or # b) have an audio group cast device, we need internal discovery. hass.async_add_job(_setup_internal_discovery, hass) else: info = await hass.async_add_job(_fill_out_missing_chromecast_info, info) if info.friendly_name is None: _LOGGER.debug("Cannot retrieve detail information for chromecast" " %s, the device may not be online", info) remove_handler() raise PlatformNotReady hass.async_add_job(_discover_chromecast, hass, info) class CastStatusListener: """Helper class to handle pychromecast status callbacks. Necessary because a CastDevice entity can create a new socket client and therefore callbacks from multiple chromecast connections can potentially arrive. This class allows invalidating past chromecast objects. """ def __init__(self, cast_device, chromecast): """Initialize the status listener.""" self._cast_device = cast_device self._valid = True chromecast.register_status_listener(self) chromecast.socket_client.media_controller.register_status_listener( self) chromecast.register_connection_listener(self) def new_cast_status(self, cast_status): """Handle reception of a new CastStatus.""" if self._valid: self._cast_device.new_cast_status(cast_status) def new_media_status(self, media_status): """Handle reception of a new MediaStatus.""" if self._valid: self._cast_device.new_media_status(media_status) def new_connection_status(self, connection_status): """Handle reception of a new ConnectionStatus.""" if self._valid: self._cast_device.new_connection_status(connection_status) def invalidate(self): """Invalidate this status listener. All following callbacks won't be forwarded. """ self._valid = False class CastDevice(MediaPlayerDevice): """Representation of a Cast device on the network. This class is the holder of the pychromecast.Chromecast object and its socket client. It therefore handles all reconnects and audio group changing "elected leader" itself. """ def __init__(self, cast_info): """Initialize the cast device.""" self._cast_info = cast_info # type: ChromecastInfo self._chromecast = None # type: Optional[pychromecast.Chromecast] self.cast_status = None self.media_status = None self.media_status_received = None self._available = False # type: bool self._status_listener = None # type: Optional[CastStatusListener] async def async_added_to_hass(self): """Create chromecast object when added to hass.""" @callback def async_cast_discovered(discover: ChromecastInfo): """Handle discovery of new Chromecast.""" if self._cast_info.uuid is None: # We can't handle empty UUIDs return if self._cast_info.uuid != discover.uuid: # Discovered is not our device. return _LOGGER.debug("Discovered chromecast with same UUID: %s", discover) self.hass.async_create_task(self.async_set_cast_info(discover)) async def async_stop(event): """Disconnect socket on Home Assistant stop.""" await self._async_disconnect() async_dispatcher_connect(self.hass, SIGNAL_CAST_DISCOVERED, async_cast_discovered) self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop) self.hass.async_create_task(self.async_set_cast_info(self._cast_info)) async def async_will_remove_from_hass(self) -> None: """Disconnect Chromecast object when removed.""" await self._async_disconnect() if self._cast_info.uuid is not None: # Remove the entity from the added casts so that it can dynamically # be re-added again. self.hass.data[ADDED_CAST_DEVICES_KEY].remove(self._cast_info.uuid) async def async_set_cast_info(self, cast_info): """Set the cast information and set up the chromecast object.""" import pychromecast old_cast_info = self._cast_info self._cast_info = cast_info if self._chromecast is not None: if old_cast_info.host_port == cast_info.host_port: _LOGGER.debug("No connection related update: %s", cast_info.host_port) return await self._async_disconnect() # pylint: disable=protected-access _LOGGER.debug("Connecting to cast device %s", cast_info) chromecast = await self.hass.async_add_job( pychromecast._get_chromecast_from_host, ( cast_info.host, cast_info.port, cast_info.uuid, cast_info.model_name, cast_info.friendly_name )) self._chromecast = chromecast self._status_listener = CastStatusListener(self, chromecast) # Initialise connection status as connected because we can only # register the connection listener *after* the initial connection # attempt. If the initial connection failed, we would never reach # this code anyway. self._available = True self.cast_status = chromecast.status self.media_status = chromecast.media_controller.status _LOGGER.debug("Connection successful!") self.async_schedule_update_ha_state() async def _async_disconnect(self): """Disconnect Chromecast object if it is set.""" if self._chromecast is None: # Can't disconnect if not connected. return _LOGGER.debug("Disconnecting from chromecast socket.") self._available = False self.async_schedule_update_ha_state() await self.hass.async_add_job(self._chromecast.disconnect) self._invalidate() self.async_schedule_update_ha_state() def _invalidate(self): """Invalidate some attributes.""" self._chromecast = None self.cast_status = None self.media_status = None self.media_status_received = None if self._status_listener is not None: self._status_listener.invalidate() self._status_listener = None # ========== Callbacks ========== def new_cast_status(self, cast_status): """Handle updates of the cast status.""" self.cast_status = cast_status self.schedule_update_ha_state() def new_media_status(self, media_status): """Handle updates of the media status.""" self.media_status = media_status self.media_status_received = dt_util.utcnow() self.schedule_update_ha_state() def new_connection_status(self, connection_status): """Handle updates of connection status.""" from pychromecast.socket_client import CONNECTION_STATUS_CONNECTED, \ CONNECTION_STATUS_DISCONNECTED _LOGGER.debug("Received cast device connection status: %s", connection_status.status) if connection_status.status == CONNECTION_STATUS_DISCONNECTED: self._available = False self._invalidate() self.schedule_update_ha_state() return new_available = connection_status.status == CONNECTION_STATUS_CONNECTED if new_available != self._available: # Connection status callbacks happen often when disconnected. # Only update state when availability changed to put less pressure # on state machine. _LOGGER.debug("Cast device availability changed: %s", connection_status.status) self._available = new_available self.schedule_update_ha_state() # ========== Service Calls ========== def turn_on(self): """Turn on the cast device.""" import pychromecast if not self._chromecast.is_idle: # Already turned on return if self._chromecast.app_id is not None: # Quit the previous app before starting splash screen self._chromecast.quit_app() # The only way we can turn the Chromecast is on is by launching an app self._chromecast.play_media(CAST_SPLASH, pychromecast.STREAM_TYPE_BUFFERED) def turn_off(self): """Turn off the cast device.""" self._chromecast.quit_app() def mute_volume(self, mute): """Mute the volume.""" self._chromecast.set_volume_muted(mute) def set_volume_level(self, volume): """Set volume level, range 0..1.""" self._chromecast.set_volume(volume) def media_play(self): """Send play command.""" self._chromecast.media_controller.play() def media_pause(self): """Send pause command.""" self._chromecast.media_controller.pause() def media_stop(self): """Send stop command.""" self._chromecast.media_controller.stop() def media_previous_track(self): """Send previous track command.""" self._chromecast.media_controller.rewind() def media_next_track(self): """Send next track command.""" self._chromecast.media_controller.skip() def media_seek(self, position): """Seek the media to a specific location.""" self._chromecast.media_controller.seek(position) def play_media(self, media_type, media_id, **kwargs): """Play media from a URL.""" self._chromecast.media_controller.play_media(media_id, media_type) # ========== Properties ========== @property def should_poll(self): """No polling needed.""" return False @property def name(self): """Return the name of the device.""" return self._cast_info.friendly_name @property def device_info(self): """Return information about the device.""" cast_info = self._cast_info if cast_info.model_name == "Google Cast Group": return None return { 'name': cast_info.friendly_name, 'identifiers': { (CAST_DOMAIN, cast_info.uuid.replace('-', '')) }, 'model': cast_info.model_name, 'manufacturer': cast_info.manufacturer, } @property def state(self): """Return the state of the player.""" if self.media_status is None: return None if self.media_status.player_is_playing: return STATE_PLAYING if self.media_status.player_is_paused: return STATE_PAUSED if self.media_status.player_is_idle: return STATE_IDLE if self._chromecast is not None and self._chromecast.is_idle: return STATE_OFF return None @property def available(self): """Return True if the cast device is connected.""" return self._available @property def volume_level(self): """Volume level of the media player (0..1).""" return self.cast_status.volume_level if self.cast_status else None @property def is_volume_muted(self): """Boolean if volume is currently muted.""" return self.cast_status.volume_muted if self.cast_status else None @property def media_content_id(self): """Content ID of current playing media.""" return self.media_status.content_id if self.media_status else None @property def media_content_type(self): """Content type of current playing media.""" if self.media_status is None: return None if self.media_status.media_is_tvshow: return MEDIA_TYPE_TVSHOW if self.media_status.media_is_movie: return MEDIA_TYPE_MOVIE if self.media_status.media_is_musictrack: return MEDIA_TYPE_MUSIC return None @property def media_duration(self): """Duration of current playing media in seconds.""" return self.media_status.duration if self.media_status else None @property def media_image_url(self): """Image url of current playing media.""" if self.media_status is None: return None images = self.media_status.images return images[0].url if images and images[0].url else None @property def media_title(self): """Title of current playing media.""" return self.media_status.title if self.media_status else None @property def media_artist(self): """Artist of current playing media (Music track only).""" return self.media_status.artist if self.media_status else None @property def media_album(self): """Album of current playing media (Music track only).""" return self.media_status.album_name if self.media_status else None @property def media_album_artist(self): """Album artist of current playing media (Music track only).""" return self.media_status.album_artist if self.media_status else None @property def media_track(self): """Track number of current playing media (Music track only).""" return self.media_status.track if self.media_status else None @property def media_series_title(self): """Return the title of the series of current playing media.""" return self.media_status.series_title if self.media_status else None @property def media_season(self): """Season of current playing media (TV Show only).""" return self.media_status.season if self.media_status else None @property def media_episode(self): """Episode of current playing media (TV Show only).""" return self.media_status.episode if self.media_status else None @property def app_id(self): """Return the ID of the current running app.""" return self._chromecast.app_id if self._chromecast else None @property def app_name(self): """Name of the current running app.""" return self._chromecast.app_display_name if self._chromecast else None @property def supported_features(self): """Flag media player features that are supported.""" return SUPPORT_CAST @property def media_position(self): """Position of current playing media in seconds.""" if self.media_status is None or \ not (self.media_status.player_is_playing or self.media_status.player_is_paused or self.media_status.player_is_idle): return None return self.media_status.current_time @property def media_position_updated_at(self): """When was the position of the current playing media valid. Returns value from homeassistant.util.dt.utcnow(). """ return self.media_status_received @property def unique_id(self) -> Optional[str]: """Return a unique ID.""" return self._cast_info.uuid
apache-2.0
3,968,205,885,144,293,400
35.628488
79
0.643281
false
Juniper/nova
nova/virt/powervm/host.py
2
2812
# Copyright 2014, 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math from oslo_log import log as logging from oslo_serialization import jsonutils from nova import conf as cfg from nova.objects import fields LOG = logging.getLogger(__name__) CONF = cfg.CONF # Power VM hypervisor info # Normally, the hypervisor version is a string in the form of '8.0.0' and # converted to an int with nova.virt.utils.convert_version_to_int() however # there isn't currently a mechanism to retrieve the exact version. # Complicating this is the fact that nova conductor only allows live migration # from the source host to the destination if the source is equal to or less # than the destination version. PowerVM live migration limitations are # checked by the PowerVM capabilities flags and not specific version levels. # For that reason, we'll just publish the major level. IBM_POWERVM_HYPERVISOR_VERSION = 8 # The types of LPARS that are supported. POWERVM_SUPPORTED_INSTANCES = [ (fields.Architecture.PPC64, fields.HVType.PHYP, fields.VMMode.HVM), (fields.Architecture.PPC64LE, fields.HVType.PHYP, fields.VMMode.HVM)] def build_host_resource_from_ms(ms_w): """Build the host resource dict from a ManagedSystem PowerVM wrapper. :param ms_w: The pypowervm System wrapper describing the managed system. """ data = {} # Calculate the vcpus proc_units = ms_w.proc_units_configurable pu_used = float(proc_units) - float(ms_w.proc_units_avail) data['vcpus'] = int(math.ceil(float(proc_units))) data['vcpus_used'] = int(math.ceil(pu_used)) data['memory_mb'] = ms_w.memory_configurable data['memory_mb_used'] = (ms_w.memory_configurable - ms_w.memory_free) data["hypervisor_type"] = fields.HVType.PHYP data["hypervisor_version"] = IBM_POWERVM_HYPERVISOR_VERSION data["hypervisor_hostname"] = CONF.host data["cpu_info"] = jsonutils.dumps({'vendor': 'ibm', 'arch': 'ppc64'}) data["numa_topology"] = None data["supported_instances"] = POWERVM_SUPPORTED_INSTANCES stats = {'proc_units': '%.2f' % float(proc_units), 'proc_units_used': '%.2f' % pu_used, 'memory_region_size': ms_w.memory_region_size} data["stats"] = stats return data
apache-2.0
805,997,186,879,891,300
40.352941
78
0.706259
false
raildo/nova
nova/tests/unit/test_cinder.py
14
6997
# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinderclient.v1 import client as cinder_client_v1 from cinderclient.v2 import client as cinder_client_v2 from requests_mock.contrib import fixture from testtools import matchers from nova import context from nova import exception from nova import test from nova.volume import cinder _image_metadata = { 'kernel_id': 'fake', 'ramdisk_id': 'fake' } class BaseCinderTestCase(object): def setUp(self): super(BaseCinderTestCase, self).setUp() cinder.reset_globals() self.requests = self.useFixture(fixture.Fixture()) self.api = cinder.API() self.context = context.RequestContext('username', 'project_id', auth_token='token', service_catalog=self.CATALOG) def flags(self, *args, **kwargs): super(BaseCinderTestCase, self).flags(*args, **kwargs) cinder.reset_globals() def create_client(self): return cinder.cinderclient(self.context) def test_context_with_catalog(self): self.assertEqual(self.URL, self.create_client().client.get_endpoint()) def test_cinder_http_retries(self): retries = 42 self.flags(http_retries=retries, group='cinder') self.assertEqual(retries, self.create_client().client.connect_retries) def test_cinder_api_insecure(self): # The True/False negation is awkward, but better for the client # to pass us insecure=True and we check verify_cert == False self.flags(insecure=True, group='cinder') self.assertFalse(self.create_client().client.session.verify) def test_cinder_http_timeout(self): timeout = 123 self.flags(timeout=timeout, group='cinder') self.assertEqual(timeout, self.create_client().client.session.timeout) def test_cinder_api_cacert_file(self): cacert = "/etc/ssl/certs/ca-certificates.crt" self.flags(cafile=cacert, group='cinder') self.assertEqual(cacert, self.create_client().client.session.verify) class CinderTestCase(BaseCinderTestCase, test.NoDBTestCase): """Test case for cinder volume v1 api.""" URL = "http://localhost:8776/v1/project_id" CATALOG = [{ "type": "volumev2", "name": "cinderv2", "endpoints": [{"publicURL": URL}] }] def create_client(self): c = super(CinderTestCase, self).create_client() self.assertIsInstance(c, cinder_client_v1.Client) return c def stub_volume(self, **kwargs): volume = { 'display_name': None, 'display_description': None, "attachments": [], "availability_zone": "cinder", "created_at": "2012-09-10T00:00:00.000000", "id": '00000000-0000-0000-0000-000000000000', "metadata": {}, "size": 1, "snapshot_id": None, "status": "available", "volume_type": "None", "bootable": "true" } volume.update(kwargs) return volume def test_cinder_endpoint_template(self): endpoint = 'http://other_host:8776/v1/%(project_id)s' self.flags(endpoint_template=endpoint, group='cinder') self.assertEqual('http://other_host:8776/v1/project_id', self.create_client().client.endpoint_override) def test_get_non_existing_volume(self): self.requests.get(self.URL + '/volumes/nonexisting', status_code=404) self.assertRaises(exception.VolumeNotFound, self.api.get, self.context, 'nonexisting') def test_volume_with_image_metadata(self): v = self.stub_volume(id='1234', volume_image_metadata=_image_metadata) m = self.requests.get(self.URL + '/volumes/5678', json={'volume': v}) volume = self.api.get(self.context, '5678') self.assertThat(m.last_request.path, matchers.EndsWith('/volumes/5678')) self.assertIn('volume_image_metadata', volume) self.assertEqual(_image_metadata, volume['volume_image_metadata']) class CinderV2TestCase(BaseCinderTestCase, test.NoDBTestCase): """Test case for cinder volume v2 api.""" URL = "http://localhost:8776/v2/project_id" CATALOG = [{ "type": "volumev2", "name": "cinder", "endpoints": [{"publicURL": URL}] }] def setUp(self): super(CinderV2TestCase, self).setUp() cinder.CONF.set_override('catalog_info', 'volumev2:cinder:publicURL', group='cinder') self.addCleanup(cinder.CONF.reset) def create_client(self): c = super(CinderV2TestCase, self).create_client() self.assertIsInstance(c, cinder_client_v2.Client) return c def stub_volume(self, **kwargs): volume = { 'name': None, 'description': None, "attachments": [], "availability_zone": "cinderv2", "created_at": "2013-08-10T00:00:00.000000", "id": '00000000-0000-0000-0000-000000000000', "metadata": {}, "size": 1, "snapshot_id": None, "status": "available", "volume_type": "None", "bootable": "true" } volume.update(kwargs) return volume def test_cinder_endpoint_template(self): endpoint = 'http://other_host:8776/v2/%(project_id)s' self.flags(endpoint_template=endpoint, group='cinder') self.assertEqual('http://other_host:8776/v2/project_id', self.create_client().client.endpoint_override) def test_get_non_existing_volume(self): self.requests.get(self.URL + '/volumes/nonexisting', status_code=404) self.assertRaises(exception.VolumeNotFound, self.api.get, self.context, 'nonexisting') def test_volume_with_image_metadata(self): v = self.stub_volume(id='1234', volume_image_metadata=_image_metadata) self.requests.get(self.URL + '/volumes/5678', json={'volume': v}) volume = self.api.get(self.context, '5678') self.assertIn('volume_image_metadata', volume) self.assertEqual(_image_metadata, volume['volume_image_metadata'])
apache-2.0
7,415,518,022,476,962,000
35.253886
79
0.60726
false
whitehorse-io/encarnia
pyenv/lib/python2.7/site-packages/twisted/python/_shellcomp.py
15
24327
# -*- test-case-name: twisted.python.test.test_shellcomp -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ No public APIs are provided by this module. Internal use only. This module implements dynamic tab-completion for any command that uses twisted.python.usage. Currently, only zsh is supported. Bash support may be added in the future. Maintainer: Eric P. Mangold - twisted AT teratorn DOT org In order for zsh completion to take place the shell must be able to find an appropriate "stub" file ("completion function") that invokes this code and displays the results to the user. The stub used for Twisted commands is in the file C{twisted-completion.zsh}, which is also included in the official Zsh distribution at C{Completion/Unix/Command/_twisted}. Use this file as a basis for completion functions for your own commands. You should only need to change the first line to something like C{#compdef mycommand}. The main public documentation exists in the L{twisted.python.usage.Options} docstring, the L{twisted.python.usage.Completions} docstring, and the Options howto. """ import itertools, getopt, inspect from twisted.python import reflect, util, usage def shellComplete(config, cmdName, words, shellCompFile): """ Perform shell completion. A completion function (shell script) is generated for the requested shell and written to C{shellCompFile}, typically C{stdout}. The result is then eval'd by the shell to produce the desired completions. @type config: L{twisted.python.usage.Options} @param config: The L{twisted.python.usage.Options} instance to generate completions for. @type cmdName: C{str} @param cmdName: The name of the command we're generating completions for. In the case of zsh, this is used to print an appropriate "#compdef $CMD" line at the top of the output. This is not necessary for the functionality of the system, but it helps in debugging, since the output we produce is properly formed and may be saved in a file and used as a stand-alone completion function. @type words: C{list} of C{str} @param words: The raw command-line words passed to use by the shell stub function. argv[0] has already been stripped off. @type shellCompFile: C{file} @param shellCompFile: The file to write completion data to. """ # shellName is provided for forward-compatibility. It is not used, # since we currently only support zsh. shellName, position = words[-1].split(":") position = int(position) # zsh gives the completion position ($CURRENT) as a 1-based index, # and argv[0] has already been stripped off, so we subtract 2 to # get the real 0-based index. position -= 2 cWord = words[position] # since the user may hit TAB at any time, we may have been called with an # incomplete command-line that would generate getopt errors if parsed # verbatim. However, we must do *some* parsing in order to determine if # there is a specific subcommand that we need to provide completion for. # So, to make the command-line more sane we work backwards from the # current completion position and strip off all words until we find one # that "looks" like a subcommand. It may in fact be the argument to a # normal command-line option, but that won't matter for our purposes. while position >= 1: if words[position - 1].startswith("-"): position -= 1 else: break words = words[:position] subCommands = getattr(config, 'subCommands', None) if subCommands: # OK, this command supports sub-commands, so lets see if we have been # given one. # If the command-line arguments are not valid then we won't be able to # sanely detect the sub-command, so just generate completions as if no # sub-command was found. args = None try: opts, args = getopt.getopt(words, config.shortOpt, config.longOpt) except getopt.error: pass if args: # yes, we have a subcommand. Try to find it. for (cmd, short, parser, doc) in config.subCommands: if args[0] == cmd or args[0] == short: subOptions = parser() subOptions.parent = config gen = ZshSubcommandBuilder(subOptions, config, cmdName, shellCompFile) gen.write() return # sub-command not given, or did not match any knowns sub-command names genSubs = True if cWord.startswith("-"): # optimization: if the current word being completed starts # with a hyphen then it can't be a sub-command, so skip # the expensive generation of the sub-command list genSubs = False gen = ZshBuilder(config, cmdName, shellCompFile) gen.write(genSubs=genSubs) else: gen = ZshBuilder(config, cmdName, shellCompFile) gen.write() class SubcommandAction(usage.Completer): def _shellCode(self, optName, shellType): if shellType == usage._ZSH: return '*::subcmd:->subcmd' raise NotImplementedError("Unknown shellType %r" % (shellType,)) class ZshBuilder(object): """ Constructs zsh code that will complete options for a given usage.Options instance, possibly including a list of subcommand names. Completions for options to subcommands won't be generated because this class will never be used if the user is completing options for a specific subcommand. (See L{ZshSubcommandBuilder} below) @type options: L{twisted.python.usage.Options} @ivar options: The L{twisted.python.usage.Options} instance defined for this command. @type cmdName: C{str} @ivar cmdName: The name of the command we're generating completions for. @type file: C{file} @ivar file: The C{file} to write the completion function to. """ def __init__(self, options, cmdName, file): self.options = options self.cmdName = cmdName self.file = file def write(self, genSubs=True): """ Generate the completion function and write it to the output file @return: L{None} @type genSubs: C{bool} @param genSubs: Flag indicating whether or not completions for the list of subcommand should be generated. Only has an effect if the C{subCommands} attribute has been defined on the L{twisted.python.usage.Options} instance. """ if genSubs and getattr(self.options, 'subCommands', None) is not None: gen = ZshArgumentsGenerator(self.options, self.cmdName, self.file) gen.extraActions.insert(0, SubcommandAction()) gen.write() self.file.write(b'local _zsh_subcmds_array\n_zsh_subcmds_array=(\n') for (cmd, short, parser, desc) in self.options.subCommands: self.file.write( b'\"' + cmd.encode('utf-8') + b':' + desc.encode('utf-8') +b'\"\n') self.file.write(b")\n\n") self.file.write(b'_describe "sub-command" _zsh_subcmds_array\n') else: gen = ZshArgumentsGenerator(self.options, self.cmdName, self.file) gen.write() class ZshSubcommandBuilder(ZshBuilder): """ Constructs zsh code that will complete options for a given usage.Options instance, and also for a single sub-command. This will only be used in the case where the user is completing options for a specific subcommand. @type subOptions: L{twisted.python.usage.Options} @ivar subOptions: The L{twisted.python.usage.Options} instance defined for the sub command. """ def __init__(self, subOptions, *args): self.subOptions = subOptions ZshBuilder.__init__(self, *args) def write(self): """ Generate the completion function and write it to the output file @return: L{None} """ gen = ZshArgumentsGenerator(self.options, self.cmdName, self.file) gen.extraActions.insert(0, SubcommandAction()) gen.write() gen = ZshArgumentsGenerator(self.subOptions, self.cmdName, self.file) gen.write() class ZshArgumentsGenerator(object): """ Generate a call to the zsh _arguments completion function based on data in a usage.Options instance @type options: L{twisted.python.usage.Options} @ivar options: The L{twisted.python.usage.Options} instance to generate for @type cmdName: C{str} @ivar cmdName: The name of the command we're generating completions for. @type file: C{file} @ivar file: The C{file} to write the completion function to The following non-constructor variables are populated by this class with data gathered from the C{Options} instance passed in, and its base classes. @type descriptions: C{dict} @ivar descriptions: A dict mapping long option names to alternate descriptions. When this variable is defined, the descriptions contained here will override those descriptions provided in the optFlags and optParameters variables. @type multiUse: C{list} @ivar multiUse: An iterable containing those long option names which may appear on the command line more than once. By default, options will only be completed one time. @type mutuallyExclusive: C{list} of C{tuple} @ivar mutuallyExclusive: A sequence of sequences, with each sub-sequence containing those long option names that are mutually exclusive. That is, those options that cannot appear on the command line together. @type optActions: C{dict} @ivar optActions: A dict mapping long option names to shell "actions". These actions define what may be completed as the argument to the given option, and should be given as instances of L{twisted.python.usage.Completer}. Callables may instead be given for the values in this dict. The callable should accept no arguments, and return a C{Completer} instance used as the action. @type extraActions: C{list} of C{twisted.python.usage.Completer} @ivar extraActions: Extra arguments are those arguments typically appearing at the end of the command-line, which are not associated with any particular named option. That is, the arguments that are given to the parseArgs() method of your usage.Options subclass. """ def __init__(self, options, cmdName, file): self.options = options self.cmdName = cmdName self.file = file self.descriptions = {} self.multiUse = set() self.mutuallyExclusive = [] self.optActions = {} self.extraActions = [] for cls in reversed(inspect.getmro(options.__class__)): data = getattr(cls, 'compData', None) if data: self.descriptions.update(data.descriptions) self.optActions.update(data.optActions) self.multiUse.update(data.multiUse) self.mutuallyExclusive.extend(data.mutuallyExclusive) # I don't see any sane way to aggregate extraActions, so just # take the one at the top of the MRO (nearest the `options' # instance). if data.extraActions: self.extraActions = data.extraActions aCL = reflect.accumulateClassList optFlags = [] optParams = [] aCL(options.__class__, 'optFlags', optFlags) aCL(options.__class__, 'optParameters', optParams) for i, optList in enumerate(optFlags): if len(optList) != 3: optFlags[i] = util.padTo(3, optList) for i, optList in enumerate(optParams): if len(optList) != 5: optParams[i] = util.padTo(5, optList) self.optFlags = optFlags self.optParams = optParams paramNameToDefinition = {} for optList in optParams: paramNameToDefinition[optList[0]] = optList[1:] self.paramNameToDefinition = paramNameToDefinition flagNameToDefinition = {} for optList in optFlags: flagNameToDefinition[optList[0]] = optList[1:] self.flagNameToDefinition = flagNameToDefinition allOptionsNameToDefinition = {} allOptionsNameToDefinition.update(paramNameToDefinition) allOptionsNameToDefinition.update(flagNameToDefinition) self.allOptionsNameToDefinition = allOptionsNameToDefinition self.addAdditionalOptions() # makes sure none of the Completions metadata references # option names that don't exist. (great for catching typos) self.verifyZshNames() self.excludes = self.makeExcludesDict() def write(self): """ Write the zsh completion code to the file given to __init__ @return: L{None} """ self.writeHeader() self.writeExtras() self.writeOptions() self.writeFooter() def writeHeader(self): """ This is the start of the code that calls _arguments @return: L{None} """ self.file.write(b'#compdef ' + self.cmdName.encode('utf-8') + b'\n\n' b'_arguments -s -A "-*" \\\n') def writeOptions(self): """ Write out zsh code for each option in this command @return: L{None} """ optNames = list(self.allOptionsNameToDefinition.keys()) optNames.sort() for longname in optNames: self.writeOpt(longname) def writeExtras(self): """ Write out completion information for extra arguments appearing on the command-line. These are extra positional arguments not associated with a named option. That is, the stuff that gets passed to Options.parseArgs(). @return: L{None} @raises: ValueError: if C{Completer} with C{repeat=True} is found and is not the last item in the C{extraActions} list. """ for i, action in enumerate(self.extraActions): # a repeatable action must be the last action in the list if action._repeat and i != len(self.extraActions) - 1: raise ValueError("Completer with repeat=True must be " "last item in Options.extraActions") self.file.write( escape(action._shellCode('', usage._ZSH)).encode('utf-8')) self.file.write(b' \\\n') def writeFooter(self): """ Write the last bit of code that finishes the call to _arguments @return: L{None} """ self.file.write(b'&& return 0\n') def verifyZshNames(self): """ Ensure that none of the option names given in the metadata are typoed @return: L{None} @raise ValueError: Raised if unknown option names have been found. """ def err(name): raise ValueError("Unknown option name \"%s\" found while\n" "examining Completions instances on %s" % ( name, self.options)) for name in itertools.chain(self.descriptions, self.optActions, self.multiUse): if name not in self.allOptionsNameToDefinition: err(name) for seq in self.mutuallyExclusive: for name in seq: if name not in self.allOptionsNameToDefinition: err(name) def excludeStr(self, longname, buildShort=False): """ Generate an "exclusion string" for the given option @type longname: C{str} @param longname: The long option name (e.g. "verbose" instead of "v") @type buildShort: C{bool} @param buildShort: May be True to indicate we're building an excludes string for the short option that corresponds to the given long opt. @return: The generated C{str} """ if longname in self.excludes: exclusions = self.excludes[longname].copy() else: exclusions = set() # if longname isn't a multiUse option (can't appear on the cmd line more # than once), then we have to exclude the short option if we're # building for the long option, and vice versa. if longname not in self.multiUse: if buildShort is False: short = self.getShortOption(longname) if short is not None: exclusions.add(short) else: exclusions.add(longname) if not exclusions: return '' strings = [] for optName in exclusions: if len(optName) == 1: # short option strings.append("-" + optName) else: strings.append("--" + optName) strings.sort() # need deterministic order for reliable unit-tests return "(%s)" % " ".join(strings) def makeExcludesDict(self): """ @return: A C{dict} that maps each option name appearing in self.mutuallyExclusive to a list of those option names that is it mutually exclusive with (can't appear on the cmd line with). """ #create a mapping of long option name -> single character name longToShort = {} for optList in itertools.chain(self.optParams, self.optFlags): if optList[1] != None: longToShort[optList[0]] = optList[1] excludes = {} for lst in self.mutuallyExclusive: for i, longname in enumerate(lst): tmp = set(lst[:i] + lst[i+1:]) for name in tmp.copy(): if name in longToShort: tmp.add(longToShort[name]) if longname in excludes: excludes[longname] = excludes[longname].union(tmp) else: excludes[longname] = tmp return excludes def writeOpt(self, longname): """ Write out the zsh code for the given argument. This is just part of the one big call to _arguments @type longname: C{str} @param longname: The long option name (e.g. "verbose" instead of "v") @return: L{None} """ if longname in self.flagNameToDefinition: # It's a flag option. Not one that takes a parameter. longField = "--%s" % longname else: longField = "--%s=" % longname short = self.getShortOption(longname) if short != None: shortField = "-" + short else: shortField = '' descr = self.getDescription(longname) descriptionField = descr.replace("[", "\[") descriptionField = descriptionField.replace("]", "\]") descriptionField = '[%s]' % descriptionField actionField = self.getAction(longname) if longname in self.multiUse: multiField = '*' else: multiField = '' longExclusionsField = self.excludeStr(longname) if short: #we have to write an extra line for the short option if we have one shortExclusionsField = self.excludeStr(longname, buildShort=True) self.file.write(escape('%s%s%s%s%s' % (shortExclusionsField, multiField, shortField, descriptionField, actionField)).encode('utf-8')) self.file.write(b' \\\n') self.file.write(escape('%s%s%s%s%s' % (longExclusionsField, multiField, longField, descriptionField, actionField)).encode('utf-8')) self.file.write(b' \\\n') def getAction(self, longname): """ Return a zsh "action" string for the given argument @return: C{str} """ if longname in self.optActions: if callable(self.optActions[longname]): action = self.optActions[longname]() else: action = self.optActions[longname] return action._shellCode(longname, usage._ZSH) if longname in self.paramNameToDefinition: return ':%s:_files' % (longname,) return '' def getDescription(self, longname): """ Return the description to be used for this argument @return: C{str} """ #check if we have an alternate descr for this arg, and if so use it if longname in self.descriptions: return self.descriptions[longname] #otherwise we have to get it from the optFlags or optParams try: descr = self.flagNameToDefinition[longname][1] except KeyError: try: descr = self.paramNameToDefinition[longname][2] except KeyError: descr = None if descr is not None: return descr # let's try to get it from the opt_foo method doc string if there is one longMangled = longname.replace('-', '_') # this is what t.p.usage does obj = getattr(self.options, 'opt_%s' % longMangled, None) if obj is not None: descr = descrFromDoc(obj) if descr is not None: return descr return longname # we really ought to have a good description to use def getShortOption(self, longname): """ Return the short option letter or None @return: C{str} or L{None} """ optList = self.allOptionsNameToDefinition[longname] return optList[0] or None def addAdditionalOptions(self): """ Add additional options to the optFlags and optParams lists. These will be defined by 'opt_foo' methods of the Options subclass @return: L{None} """ methodsDict = {} reflect.accumulateMethods(self.options, methodsDict, 'opt_') methodToShort = {} for name in methodsDict.copy(): if len(name) == 1: methodToShort[methodsDict[name]] = name del methodsDict[name] for methodName, methodObj in methodsDict.items(): longname = methodName.replace('_', '-') # t.p.usage does this # if this option is already defined by the optFlags or # optParameters then we don't want to override that data if longname in self.allOptionsNameToDefinition: continue descr = self.getDescription(longname) short = None if methodObj in methodToShort: short = methodToShort[methodObj] reqArgs = methodObj.__func__.__code__.co_argcount if reqArgs == 2: self.optParams.append([longname, short, None, descr]) self.paramNameToDefinition[longname] = [short, None, descr] self.allOptionsNameToDefinition[longname] = [short, None, descr] else: # reqArgs must equal 1. self.options would have failed # to instantiate if it had opt_ methods with bad signatures. self.optFlags.append([longname, short, descr]) self.flagNameToDefinition[longname] = [short, descr] self.allOptionsNameToDefinition[longname] = [short, None, descr] def descrFromDoc(obj): """ Generate an appropriate description from docstring of the given object """ if obj.__doc__ is None or obj.__doc__.isspace(): return None lines = [x.strip() for x in obj.__doc__.split("\n") if x and not x.isspace()] return " ".join(lines) def escape(x): """ Shell escape the given string Implementation borrowed from now-deprecated commands.mkarg() in the stdlib """ if '\'' not in x: return '\'' + x + '\'' s = '"' for c in x: if c in '\\$"`': s = s + '\\' s = s + c s = s + '"' return s
mit
7,625,184,498,486,815,000
35.472264
88
0.612324
false
MadCat34/Sick-Beard
sickbeard/clients/requests/status_codes.py
252
3043
# -*- coding: utf-8 -*- from .structures import LookupDict _codes = { # Informational. 100: ('continue',), 101: ('switching_protocols',), 102: ('processing',), 103: ('checkpoint',), 122: ('uri_too_long', 'request_uri_too_long'), 200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'), 201: ('created',), 202: ('accepted',), 203: ('non_authoritative_info', 'non_authoritative_information'), 204: ('no_content',), 205: ('reset_content', 'reset'), 206: ('partial_content', 'partial'), 207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'), 208: ('im_used',), # Redirection. 300: ('multiple_choices',), 301: ('moved_permanently', 'moved', '\\o-'), 302: ('found',), 303: ('see_other', 'other'), 304: ('not_modified',), 305: ('use_proxy',), 306: ('switch_proxy',), 307: ('temporary_redirect', 'temporary_moved', 'temporary'), 308: ('resume_incomplete', 'resume'), # Client Error. 400: ('bad_request', 'bad'), 401: ('unauthorized',), 402: ('payment_required', 'payment'), 403: ('forbidden',), 404: ('not_found', '-o-'), 405: ('method_not_allowed', 'not_allowed'), 406: ('not_acceptable',), 407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'), 408: ('request_timeout', 'timeout'), 409: ('conflict',), 410: ('gone',), 411: ('length_required',), 412: ('precondition_failed', 'precondition'), 413: ('request_entity_too_large',), 414: ('request_uri_too_large',), 415: ('unsupported_media_type', 'unsupported_media', 'media_type'), 416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'), 417: ('expectation_failed',), 418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'), 422: ('unprocessable_entity', 'unprocessable'), 423: ('locked',), 424: ('failed_dependency', 'dependency'), 425: ('unordered_collection', 'unordered'), 426: ('upgrade_required', 'upgrade'), 428: ('precondition_required', 'precondition'), 429: ('too_many_requests', 'too_many'), 431: ('header_fields_too_large', 'fields_too_large'), 444: ('no_response', 'none'), 449: ('retry_with', 'retry'), 450: ('blocked_by_windows_parental_controls', 'parental_controls'), 499: ('client_closed_request',), # Server Error. 500: ('internal_server_error', 'server_error', '/o\\', '✗'), 501: ('not_implemented',), 502: ('bad_gateway',), 503: ('service_unavailable', 'unavailable'), 504: ('gateway_timeout',), 505: ('http_version_not_supported', 'http_version'), 506: ('variant_also_negotiates',), 507: ('insufficient_storage',), 509: ('bandwidth_limit_exceeded', 'bandwidth'), 510: ('not_extended',), } codes = LookupDict(name='status_codes') for (code, titles) in list(_codes.items()): for title in titles: setattr(codes, title, code) if not title.startswith('\\'): setattr(codes, title.upper(), code)
gpl-3.0
-6,465,533,652,602,509,000
34.337209
89
0.58177
false
richardgroves/namebench
nb_third_party/jinja2/environment.py
199
43213
# -*- coding: utf-8 -*- """ jinja2.environment ~~~~~~~~~~~~~~~~~~ Provides a class that holds runtime and parsing time options. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ import os import sys from jinja2 import nodes from jinja2.defaults import * from jinja2.lexer import get_lexer, TokenStream from jinja2.parser import Parser from jinja2.optimizer import optimize from jinja2.compiler import generate from jinja2.runtime import Undefined, new_context from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \ TemplatesNotFound from jinja2.utils import import_string, LRUCache, Markup, missing, \ concat, consume, internalcode, _encode_filename # for direct template usage we have up to ten living environments _spontaneous_environments = LRUCache(10) # the function to create jinja traceback objects. This is dynamically # imported on the first exception in the exception handler. _make_traceback = None def get_spontaneous_environment(*args): """Return a new spontaneous environment. A spontaneous environment is an unnamed and unaccessible (in theory) environment that is used for templates generated from a string and not from the file system. """ try: env = _spontaneous_environments.get(args) except TypeError: return Environment(*args) if env is not None: return env _spontaneous_environments[args] = env = Environment(*args) env.shared = True return env def create_cache(size): """Return the cache class for the given size.""" if size == 0: return None if size < 0: return {} return LRUCache(size) def copy_cache(cache): """Create an empty copy of the given cache.""" if cache is None: return None elif type(cache) is dict: return {} return LRUCache(cache.capacity) def load_extensions(environment, extensions): """Load the extensions from the list and bind it to the environment. Returns a dict of instanciated environments. """ result = {} for extension in extensions: if isinstance(extension, basestring): extension = import_string(extension) result[extension.identifier] = extension(environment) return result def _environment_sanity_check(environment): """Perform a sanity check on the environment.""" assert issubclass(environment.undefined, Undefined), 'undefined must ' \ 'be a subclass of undefined because filters depend on it.' assert environment.block_start_string != \ environment.variable_start_string != \ environment.comment_start_string, 'block, variable and comment ' \ 'start strings must be different' assert environment.newline_sequence in ('\r', '\r\n', '\n'), \ 'newline_sequence set to unknown line ending string.' return environment class Environment(object): r"""The core component of Jinja is the `Environment`. It contains important shared variables like configuration, filters, tests, globals and others. Instances of this class may be modified if they are not shared and if no template was loaded so far. Modifications on environments after the first template was loaded will lead to surprising effects and undefined behavior. Here the possible initialization parameters: `block_start_string` The string marking the begin of a block. Defaults to ``'{%'``. `block_end_string` The string marking the end of a block. Defaults to ``'%}'``. `variable_start_string` The string marking the begin of a print statement. Defaults to ``'{{'``. `variable_end_string` The string marking the end of a print statement. Defaults to ``'}}'``. `comment_start_string` The string marking the begin of a comment. Defaults to ``'{#'``. `comment_end_string` The string marking the end of a comment. Defaults to ``'#}'``. `line_statement_prefix` If given and a string, this will be used as prefix for line based statements. See also :ref:`line-statements`. `line_comment_prefix` If given and a string, this will be used as prefix for line based based comments. See also :ref:`line-statements`. .. versionadded:: 2.2 `trim_blocks` If this is set to ``True`` the first newline after a block is removed (block, not variable tag!). Defaults to `False`. `newline_sequence` The sequence that starts a newline. Must be one of ``'\r'``, ``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a useful default for Linux and OS X systems as well as web applications. `extensions` List of Jinja extensions to use. This can either be import paths as strings or extension classes. For more information have a look at :ref:`the extensions documentation <jinja-extensions>`. `optimized` should the optimizer be enabled? Default is `True`. `undefined` :class:`Undefined` or a subclass of it that is used to represent undefined values in the template. `finalize` A callable that can be used to process the result of a variable expression before it is output. For example one can convert `None` implicitly into an empty string here. `autoescape` If set to true the XML/HTML autoescaping feature is enabled by default. For more details about auto escaping see :class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also be a callable that is passed the template name and has to return `True` or `False` depending on autoescape should be enabled by default. .. versionchanged:: 2.4 `autoescape` can now be a function `loader` The template loader for this environment. `cache_size` The size of the cache. Per default this is ``50`` which means that if more than 50 templates are loaded the loader will clean out the least recently used template. If the cache size is set to ``0`` templates are recompiled all the time, if the cache size is ``-1`` the cache will not be cleaned. `auto_reload` Some loaders load templates from locations where the template sources may change (ie: file system or database). If `auto_reload` is set to `True` (default) every time a template is requested the loader checks if the source changed and if yes, it will reload the template. For higher performance it's possible to disable that. `bytecode_cache` If set to a bytecode cache object, this object will provide a cache for the internal Jinja bytecode so that templates don't have to be parsed if they were not changed. See :ref:`bytecode-cache` for more information. """ #: if this environment is sandboxed. Modifying this variable won't make #: the environment sandboxed though. For a real sandboxed environment #: have a look at jinja2.sandbox sandboxed = False #: True if the environment is just an overlay overlayed = False #: the environment this environment is linked to if it is an overlay linked_to = None #: shared environments have this set to `True`. A shared environment #: must not be modified shared = False #: these are currently EXPERIMENTAL undocumented features. exception_handler = None exception_formatter = None def __init__(self, block_start_string=BLOCK_START_STRING, block_end_string=BLOCK_END_STRING, variable_start_string=VARIABLE_START_STRING, variable_end_string=VARIABLE_END_STRING, comment_start_string=COMMENT_START_STRING, comment_end_string=COMMENT_END_STRING, line_statement_prefix=LINE_STATEMENT_PREFIX, line_comment_prefix=LINE_COMMENT_PREFIX, trim_blocks=TRIM_BLOCKS, newline_sequence=NEWLINE_SEQUENCE, extensions=(), optimized=True, undefined=Undefined, finalize=None, autoescape=False, loader=None, cache_size=50, auto_reload=True, bytecode_cache=None): # !!Important notice!! # The constructor accepts quite a few arguments that should be # passed by keyword rather than position. However it's important to # not change the order of arguments because it's used at least # internally in those cases: # - spontaneus environments (i18n extension and Template) # - unittests # If parameter changes are required only add parameters at the end # and don't change the arguments (or the defaults!) of the arguments # existing already. # lexer / parser information self.block_start_string = block_start_string self.block_end_string = block_end_string self.variable_start_string = variable_start_string self.variable_end_string = variable_end_string self.comment_start_string = comment_start_string self.comment_end_string = comment_end_string self.line_statement_prefix = line_statement_prefix self.line_comment_prefix = line_comment_prefix self.trim_blocks = trim_blocks self.newline_sequence = newline_sequence # runtime information self.undefined = undefined self.optimized = optimized self.finalize = finalize self.autoescape = autoescape # defaults self.filters = DEFAULT_FILTERS.copy() self.tests = DEFAULT_TESTS.copy() self.globals = DEFAULT_NAMESPACE.copy() # set the loader provided self.loader = loader self.bytecode_cache = None self.cache = create_cache(cache_size) self.bytecode_cache = bytecode_cache self.auto_reload = auto_reload # load extensions self.extensions = load_extensions(self, extensions) _environment_sanity_check(self) def extend(self, **attributes): """Add the items to the instance of the environment if they do not exist yet. This is used by :ref:`extensions <writing-extensions>` to register callbacks and configuration values without breaking inheritance. """ for key, value in attributes.iteritems(): if not hasattr(self, key): setattr(self, key, value) def overlay(self, block_start_string=missing, block_end_string=missing, variable_start_string=missing, variable_end_string=missing, comment_start_string=missing, comment_end_string=missing, line_statement_prefix=missing, line_comment_prefix=missing, trim_blocks=missing, extensions=missing, optimized=missing, undefined=missing, finalize=missing, autoescape=missing, loader=missing, cache_size=missing, auto_reload=missing, bytecode_cache=missing): """Create a new overlay environment that shares all the data with the current environment except of cache and the overridden attributes. Extensions cannot be removed for an overlayed environment. An overlayed environment automatically gets all the extensions of the environment it is linked to plus optional extra extensions. Creating overlays should happen after the initial environment was set up completely. Not all attributes are truly linked, some are just copied over so modifications on the original environment may not shine through. """ args = dict(locals()) del args['self'], args['cache_size'], args['extensions'] rv = object.__new__(self.__class__) rv.__dict__.update(self.__dict__) rv.overlayed = True rv.linked_to = self for key, value in args.iteritems(): if value is not missing: setattr(rv, key, value) if cache_size is not missing: rv.cache = create_cache(cache_size) else: rv.cache = copy_cache(self.cache) rv.extensions = {} for key, value in self.extensions.iteritems(): rv.extensions[key] = value.bind(rv) if extensions is not missing: rv.extensions.update(load_extensions(extensions)) return _environment_sanity_check(rv) lexer = property(get_lexer, doc="The lexer for this environment.") def iter_extensions(self): """Iterates over the extensions by priority.""" return iter(sorted(self.extensions.values(), key=lambda x: x.priority)) def getitem(self, obj, argument): """Get an item or attribute of an object but prefer the item.""" try: return obj[argument] except (TypeError, LookupError): if isinstance(argument, basestring): try: attr = str(argument) except: pass else: try: return getattr(obj, attr) except AttributeError: pass return self.undefined(obj=obj, name=argument) def getattr(self, obj, attribute): """Get an item or attribute of an object but prefer the attribute. Unlike :meth:`getitem` the attribute *must* be a bytestring. """ try: return getattr(obj, attribute) except AttributeError: pass try: return obj[attribute] except (TypeError, LookupError, AttributeError): return self.undefined(obj=obj, name=attribute) @internalcode def parse(self, source, name=None, filename=None): """Parse the sourcecode and return the abstract syntax tree. This tree of nodes is used by the compiler to convert the template into executable source- or bytecode. This is useful for debugging or to extract information from templates. If you are :ref:`developing Jinja2 extensions <writing-extensions>` this gives you a good overview of the node tree generated. """ try: return self._parse(source, name, filename) except TemplateSyntaxError: exc_info = sys.exc_info() self.handle_exception(exc_info, source_hint=source) def _parse(self, source, name, filename): """Internal parsing function used by `parse` and `compile`.""" return Parser(self, source, name, _encode_filename(filename)).parse() def lex(self, source, name=None, filename=None): """Lex the given sourcecode and return a generator that yields tokens as tuples in the form ``(lineno, token_type, value)``. This can be useful for :ref:`extension development <writing-extensions>` and debugging templates. This does not perform preprocessing. If you want the preprocessing of the extensions to be applied you have to filter source through the :meth:`preprocess` method. """ source = unicode(source) try: return self.lexer.tokeniter(source, name, filename) except TemplateSyntaxError: exc_info = sys.exc_info() self.handle_exception(exc_info, source_hint=source) def preprocess(self, source, name=None, filename=None): """Preprocesses the source with all extensions. This is automatically called for all parsing and compiling methods but *not* for :meth:`lex` because there you usually only want the actual source tokenized. """ return reduce(lambda s, e: e.preprocess(s, name, filename), self.iter_extensions(), unicode(source)) def _tokenize(self, source, name, filename=None, state=None): """Called by the parser to do the preprocessing and filtering for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`. """ source = self.preprocess(source, name, filename) stream = self.lexer.tokenize(source, name, filename, state) for ext in self.iter_extensions(): stream = ext.filter_stream(stream) if not isinstance(stream, TokenStream): stream = TokenStream(stream, name, filename) return stream @internalcode def compile(self, source, name=None, filename=None, raw=False, defer_init=False): """Compile a node or template source code. The `name` parameter is the load name of the template after it was joined using :meth:`join_path` if necessary, not the filename on the file system. the `filename` parameter is the estimated filename of the template on the file system. If the template came from a database or memory this can be omitted. The return value of this method is a python code object. If the `raw` parameter is `True` the return value will be a string with python code equivalent to the bytecode returned otherwise. This method is mainly used internally. `defer_init` is use internally to aid the module code generator. This causes the generated code to be able to import without the global environment variable to be set. .. versionadded:: 2.4 `defer_init` parameter added. """ source_hint = None try: if isinstance(source, basestring): source_hint = source source = self._parse(source, name, filename) if self.optimized: source = optimize(source, self) source = generate(source, self, name, filename, defer_init=defer_init) if raw: return source if filename is None: filename = '<template>' else: filename = _encode_filename(filename) return compile(source, filename, 'exec') except TemplateSyntaxError: exc_info = sys.exc_info() self.handle_exception(exc_info, source_hint=source) def compile_expression(self, source, undefined_to_none=True): """A handy helper method that returns a callable that accepts keyword arguments that appear as variables in the expression. If called it returns the result of the expression. This is useful if applications want to use the same rules as Jinja in template "configuration files" or similar situations. Example usage: >>> env = Environment() >>> expr = env.compile_expression('foo == 42') >>> expr(foo=23) False >>> expr(foo=42) True Per default the return value is converted to `None` if the expression returns an undefined value. This can be changed by setting `undefined_to_none` to `False`. >>> env.compile_expression('var')() is None True >>> env.compile_expression('var', undefined_to_none=False)() Undefined .. versionadded:: 2.1 """ parser = Parser(self, source, state='variable') exc_info = None try: expr = parser.parse_expression() if not parser.stream.eos: raise TemplateSyntaxError('chunk after expression', parser.stream.current.lineno, None, None) expr.set_environment(self) except TemplateSyntaxError: exc_info = sys.exc_info() if exc_info is not None: self.handle_exception(exc_info, source_hint=source) body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)] template = self.from_string(nodes.Template(body, lineno=1)) return TemplateExpression(template, undefined_to_none) def compile_templates(self, target, extensions=None, filter_func=None, zip='deflated', log_function=None, ignore_errors=True, py_compile=False): """Compiles all the templates the loader can find, compiles them and stores them in `target`. If `zip` is `None`, instead of in a zipfile, the templates will be will be stored in a directory. By default a deflate zip algorithm is used, to switch to the stored algorithm, `zip` can be set to ``'stored'``. `extensions` and `filter_func` are passed to :meth:`list_templates`. Each template returned will be compiled to the target folder or zipfile. By default template compilation errors are ignored. In case a log function is provided, errors are logged. If you want template syntax errors to abort the compilation you can set `ignore_errors` to `False` and you will get an exception on syntax errors. If `py_compile` is set to `True` .pyc files will be written to the target instead of standard .py files. .. versionadded:: 2.4 """ from jinja2.loaders import ModuleLoader if log_function is None: log_function = lambda x: None if py_compile: import imp, struct, marshal py_header = imp.get_magic() + \ u'\xff\xff\xff\xff'.encode('iso-8859-15') def write_file(filename, data, mode): if zip: info = ZipInfo(filename) info.external_attr = 0755 << 16L zip_file.writestr(info, data) else: f = open(os.path.join(target, filename), mode) try: f.write(data) finally: f.close() if zip is not None: from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED, stored=ZIP_STORED)[zip]) log_function('Compiling into Zip archive "%s"' % target) else: if not os.path.isdir(target): os.makedirs(target) log_function('Compiling into folder "%s"' % target) try: for name in self.list_templates(extensions, filter_func): source, filename, _ = self.loader.get_source(self, name) try: code = self.compile(source, name, filename, True, True) except TemplateSyntaxError, e: if not ignore_errors: raise log_function('Could not compile "%s": %s' % (name, e)) continue filename = ModuleLoader.get_module_filename(name) if py_compile: c = compile(code, _encode_filename(filename), 'exec') write_file(filename + 'c', py_header + marshal.dumps(c), 'wb') log_function('Byte-compiled "%s" as %s' % (name, filename + 'c')) else: write_file(filename, code, 'w') log_function('Compiled "%s" as %s' % (name, filename)) finally: if zip: zip_file.close() log_function('Finished compiling templates') def list_templates(self, extensions=None, filter_func=None): """Returns a list of templates for this environment. This requires that the loader supports the loader's :meth:`~BaseLoader.list_templates` method. If there are other files in the template folder besides the actual templates, the returned list can be filtered. There are two ways: either `extensions` is set to a list of file extensions for templates, or a `filter_func` can be provided which is a callable that is passed a template name and should return `True` if it should end up in the result list. If the loader does not support that, a :exc:`TypeError` is raised. """ x = self.loader.list_templates() if extensions is not None: if filter_func is not None: raise TypeError('either extensions or filter_func ' 'can be passed, but not both') filter_func = lambda x: '.' in x and \ x.rsplit('.', 1)[1] in extensions if filter_func is not None: x = filter(filter_func, x) return x def handle_exception(self, exc_info=None, rendered=False, source_hint=None): """Exception handling helper. This is used internally to either raise rewritten exceptions or return a rendered traceback for the template. """ global _make_traceback if exc_info is None: exc_info = sys.exc_info() # the debugging module is imported when it's used for the first time. # we're doing a lot of stuff there and for applications that do not # get any exceptions in template rendering there is no need to load # all of that. if _make_traceback is None: from jinja2.debug import make_traceback as _make_traceback traceback = _make_traceback(exc_info, source_hint) if rendered and self.exception_formatter is not None: return self.exception_formatter(traceback) if self.exception_handler is not None: self.exception_handler(traceback) exc_type, exc_value, tb = traceback.standard_exc_info raise exc_type, exc_value, tb def join_path(self, template, parent): """Join a template with the parent. By default all the lookups are relative to the loader root so this method returns the `template` parameter unchanged, but if the paths should be relative to the parent template, this function can be used to calculate the real template name. Subclasses may override this method and implement template path joining here. """ return template @internalcode def _load_template(self, name, globals): if self.loader is None: raise TypeError('no loader for this environment specified') if self.cache is not None: template = self.cache.get(name) if template is not None and (not self.auto_reload or \ template.is_up_to_date): return template template = self.loader.load(self, name, globals) if self.cache is not None: self.cache[name] = template return template @internalcode def get_template(self, name, parent=None, globals=None): """Load a template from the loader. If a loader is configured this method ask the loader for the template and returns a :class:`Template`. If the `parent` parameter is not `None`, :meth:`join_path` is called to get the real template name before loading. The `globals` parameter can be used to provide template wide globals. These variables are available in the context at render time. If the template does not exist a :exc:`TemplateNotFound` exception is raised. .. versionchanged:: 2.4 If `name` is a :class:`Template` object it is returned from the function unchanged. """ if isinstance(name, Template): return name if parent is not None: name = self.join_path(name, parent) return self._load_template(name, self.make_globals(globals)) @internalcode def select_template(self, names, parent=None, globals=None): """Works like :meth:`get_template` but tries a number of templates before it fails. If it cannot find any of the templates, it will raise a :exc:`TemplatesNotFound` exception. .. versionadded:: 2.3 .. versionchanged:: 2.4 If `names` contains a :class:`Template` object it is returned from the function unchanged. """ if not names: raise TemplatesNotFound(message=u'Tried to select from an empty list ' u'of templates.') globals = self.make_globals(globals) for name in names: if isinstance(name, Template): return name if parent is not None: name = self.join_path(name, parent) try: return self._load_template(name, globals) except TemplateNotFound: pass raise TemplatesNotFound(names) @internalcode def get_or_select_template(self, template_name_or_list, parent=None, globals=None): """Does a typecheck and dispatches to :meth:`select_template` if an iterable of template names is given, otherwise to :meth:`get_template`. .. versionadded:: 2.3 """ if isinstance(template_name_or_list, basestring): return self.get_template(template_name_or_list, parent, globals) elif isinstance(template_name_or_list, Template): return template_name_or_list return self.select_template(template_name_or_list, parent, globals) def from_string(self, source, globals=None, template_class=None): """Load a template from a string. This parses the source given and returns a :class:`Template` object. """ globals = self.make_globals(globals) cls = template_class or self.template_class return cls.from_code(self, self.compile(source), globals, None) def make_globals(self, d): """Return a dict for the globals.""" if not d: return self.globals return dict(self.globals, **d) class Template(object): """The central template object. This class represents a compiled template and is used to evaluate it. Normally the template object is generated from an :class:`Environment` but it also has a constructor that makes it possible to create a template instance directly using the constructor. It takes the same arguments as the environment constructor but it's not possible to specify a loader. Every template object has a few methods and members that are guaranteed to exist. However it's important that a template object should be considered immutable. Modifications on the object are not supported. Template objects created from the constructor rather than an environment do have an `environment` attribute that points to a temporary environment that is probably shared with other templates created with the constructor and compatible settings. >>> template = Template('Hello {{ name }}!') >>> template.render(name='John Doe') u'Hello John Doe!' >>> stream = template.stream(name='John Doe') >>> stream.next() u'Hello John Doe!' >>> stream.next() Traceback (most recent call last): ... StopIteration """ def __new__(cls, source, block_start_string=BLOCK_START_STRING, block_end_string=BLOCK_END_STRING, variable_start_string=VARIABLE_START_STRING, variable_end_string=VARIABLE_END_STRING, comment_start_string=COMMENT_START_STRING, comment_end_string=COMMENT_END_STRING, line_statement_prefix=LINE_STATEMENT_PREFIX, line_comment_prefix=LINE_COMMENT_PREFIX, trim_blocks=TRIM_BLOCKS, newline_sequence=NEWLINE_SEQUENCE, extensions=(), optimized=True, undefined=Undefined, finalize=None, autoescape=False): env = get_spontaneous_environment( block_start_string, block_end_string, variable_start_string, variable_end_string, comment_start_string, comment_end_string, line_statement_prefix, line_comment_prefix, trim_blocks, newline_sequence, frozenset(extensions), optimized, undefined, finalize, autoescape, None, 0, False, None) return env.from_string(source, template_class=cls) @classmethod def from_code(cls, environment, code, globals, uptodate=None): """Creates a template object from compiled code and the globals. This is used by the loaders and environment to create a template object. """ namespace = { 'environment': environment, '__file__': code.co_filename } exec code in namespace rv = cls._from_namespace(environment, namespace, globals) rv._uptodate = uptodate return rv @classmethod def from_module_dict(cls, environment, module_dict, globals): """Creates a template object from a module. This is used by the module loader to create a template object. .. versionadded:: 2.4 """ return cls._from_namespace(environment, module_dict, globals) @classmethod def _from_namespace(cls, environment, namespace, globals): t = object.__new__(cls) t.environment = environment t.globals = globals t.name = namespace['name'] t.filename = namespace['__file__'] t.blocks = namespace['blocks'] # render function and module t.root_render_func = namespace['root'] t._module = None # debug and loader helpers t._debug_info = namespace['debug_info'] t._uptodate = None # store the reference namespace['environment'] = environment namespace['__jinja_template__'] = t return t def render(self, *args, **kwargs): """This method accepts the same arguments as the `dict` constructor: A dict, a dict subclass or some keyword arguments. If no arguments are given the context will be empty. These two calls do the same:: template.render(knights='that say nih') template.render({'knights': 'that say nih'}) This will return the rendered template as unicode string. """ vars = dict(*args, **kwargs) try: return concat(self.root_render_func(self.new_context(vars))) except: exc_info = sys.exc_info() return self.environment.handle_exception(exc_info, True) def stream(self, *args, **kwargs): """Works exactly like :meth:`generate` but returns a :class:`TemplateStream`. """ return TemplateStream(self.generate(*args, **kwargs)) def generate(self, *args, **kwargs): """For very large templates it can be useful to not render the whole template at once but evaluate each statement after another and yield piece for piece. This method basically does exactly that and returns a generator that yields one item after another as unicode strings. It accepts the same arguments as :meth:`render`. """ vars = dict(*args, **kwargs) try: for event in self.root_render_func(self.new_context(vars)): yield event except: exc_info = sys.exc_info() else: return yield self.environment.handle_exception(exc_info, True) def new_context(self, vars=None, shared=False, locals=None): """Create a new :class:`Context` for this template. The vars provided will be passed to the template. Per default the globals are added to the context. If shared is set to `True` the data is passed as it to the context without adding the globals. `locals` can be a dict of local variables for internal usage. """ return new_context(self.environment, self.name, self.blocks, vars, shared, self.globals, locals) def make_module(self, vars=None, shared=False, locals=None): """This method works like the :attr:`module` attribute when called without arguments but it will evaluate the template on every call rather than caching it. It's also possible to provide a dict which is then used as context. The arguments are the same as for the :meth:`new_context` method. """ return TemplateModule(self, self.new_context(vars, shared, locals)) @property def module(self): """The template as module. This is used for imports in the template runtime but is also useful if one wants to access exported template variables from the Python layer: >>> t = Template('{% macro foo() %}42{% endmacro %}23') >>> unicode(t.module) u'23' >>> t.module.foo() u'42' """ if self._module is not None: return self._module self._module = rv = self.make_module() return rv def get_corresponding_lineno(self, lineno): """Return the source line number of a line number in the generated bytecode as they are not in sync. """ for template_line, code_line in reversed(self.debug_info): if code_line <= lineno: return template_line return 1 @property def is_up_to_date(self): """If this variable is `False` there is a newer version available.""" if self._uptodate is None: return True return self._uptodate() @property def debug_info(self): """The debug info mapping.""" return [tuple(map(int, x.split('='))) for x in self._debug_info.split('&')] def __repr__(self): if self.name is None: name = 'memory:%x' % id(self) else: name = repr(self.name) return '<%s %s>' % (self.__class__.__name__, name) class TemplateModule(object): """Represents an imported template. All the exported names of the template are available as attributes on this object. Additionally converting it into an unicode- or bytestrings renders the contents. """ def __init__(self, template, context): self._body_stream = list(template.root_render_func(context)) self.__dict__.update(context.get_exported()) self.__name__ = template.name def __html__(self): return Markup(concat(self._body_stream)) def __str__(self): return unicode(self).encode('utf-8') # unicode goes after __str__ because we configured 2to3 to rename # __unicode__ to __str__. because the 2to3 tree is not designed to # remove nodes from it, we leave the above __str__ around and let # it override at runtime. def __unicode__(self): return concat(self._body_stream) def __repr__(self): if self.__name__ is None: name = 'memory:%x' % id(self) else: name = repr(self.__name__) return '<%s %s>' % (self.__class__.__name__, name) class TemplateExpression(object): """The :meth:`jinja2.Environment.compile_expression` method returns an instance of this object. It encapsulates the expression-like access to the template with an expression it wraps. """ def __init__(self, template, undefined_to_none): self._template = template self._undefined_to_none = undefined_to_none def __call__(self, *args, **kwargs): context = self._template.new_context(dict(*args, **kwargs)) consume(self._template.root_render_func(context)) rv = context.vars['result'] if self._undefined_to_none and isinstance(rv, Undefined): rv = None return rv class TemplateStream(object): """A template stream works pretty much like an ordinary python generator but it can buffer multiple items to reduce the number of total iterations. Per default the output is unbuffered which means that for every unbuffered instruction in the template one unicode string is yielded. If buffering is enabled with a buffer size of 5, five items are combined into a new unicode string. This is mainly useful if you are streaming big templates to a client via WSGI which flushes after each iteration. """ def __init__(self, gen): self._gen = gen self.disable_buffering() def dump(self, fp, encoding=None, errors='strict'): """Dump the complete stream into a file or file-like object. Per default unicode strings are written, if you want to encode before writing specifiy an `encoding`. Example usage:: Template('Hello {{ name }}!').stream(name='foo').dump('hello.html') """ close = False if isinstance(fp, basestring): fp = file(fp, 'w') close = True try: if encoding is not None: iterable = (x.encode(encoding, errors) for x in self) else: iterable = self if hasattr(fp, 'writelines'): fp.writelines(iterable) else: for item in iterable: fp.write(item) finally: if close: fp.close() def disable_buffering(self): """Disable the output buffering.""" self._next = self._gen.next self.buffered = False def enable_buffering(self, size=5): """Enable buffering. Buffer `size` items before yielding them.""" if size <= 1: raise ValueError('buffer size too small') def generator(next): buf = [] c_size = 0 push = buf.append while 1: try: while c_size < size: c = next() push(c) if c: c_size += 1 except StopIteration: if not c_size: return yield concat(buf) del buf[:] c_size = 0 self.buffered = True self._next = generator(self._gen.next).next def __iter__(self): return self def next(self): return self._next() # hook in default template class. if anyone reads this comment: ignore that # it's possible to use custom templates ;-) Environment.template_class = Template
apache-2.0
2,180,516,693,026,373,600
38.463927
82
0.605998
false
smartshark/serverSHARK
smartshark/urls.py
1
1884
from django.conf.urls import url from django.core.urlresolvers import reverse_lazy from django.contrib.auth.views import login, logout from smartshark.views import analysis, common, collection, visualizations, remote urlpatterns = [ # Frontend url(r'^login/$', login, name='mysite_login'), url(r'^logout/$', logout, {'next_page': reverse_lazy('index')}, name='mysite_logout'), url(r'^$', common.index, name='index'), url(r'^documentation/$', common.documentation, name='documentation'), url(r'^visualizations/overview/$', visualizations.overview, name='overview'), url(r'^spark/submit/$', analysis.spark_submit, name='spark_submit'), # Backend url(r'^smartshark/project/collection/choose/$', collection.choose_plugins, name='choose_plugins'), url(r'^smartshark/project/collection/start/$', collection.start_collection, name='collection_start'), url(r'^smartshark/project/delete/$', collection.delete_project_data, name='project_delete_data'), url(r'^admin/smartshark/project/plugin_status/(?P<id>[0-9]+)$', common.plugin_status, name='plugin_status'), url(r'^admin/smartshark/project/plugin_execution/(?P<id>[0-9]+)$', common.plugin_execution_status, name='plugin_execution_status'), url(r'^admin/smartshark/project/job/(?P<id>[0-9]+)/(?P<type>[a-z]+)$', common.job_output, name='job_output'), url(r'^smartshark/plugin/install/$', collection.install, name='install'), url(r'^smartshark/plugin/github/install', collection.installgithub, name='view'), # remote additions url(r'^remote/test/$', remote.test_connection, name='remote_test_connection'), url(r'^remote/plugin/$', remote.list_plugins, name='remote_list_plugins'), url(r'^remote/argument/$', remote.list_arguments, name='remote_list_plugin_arguments'), url(r'^remote/collect/$', remote.start_collection, name='remote_start_collection') ]
apache-2.0
-4,563,876,464,949,703,000
59.774194
135
0.705945
false
pawaranand/phr_frappe
frappe/website/statics.py
17
5584
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import frappe, os, time def sync_statics(rebuild=False): s = sync() s.verbose = True # s.start(rebuild) # frappe.db.commit() while True: s.start(rebuild) frappe.db.commit() time.sleep(2) rebuild = False class sync(object): def __init__(self, verbose=False): self.verbose = verbose def start(self, rebuild=False): self.synced = [] self.synced_paths = [] self.updated = 0 if rebuild: frappe.db.sql("delete from `tabWeb Page` where ifnull(template_path, '')!=''") for app in frappe.get_installed_apps(): self.sync_for_app(app) self.cleanup() def sync_for_app(self, app): self.statics_path = frappe.get_app_path(app, "templates", "statics") if os.path.exists(self.statics_path): for basepath, folders, files in os.walk(self.statics_path): self.sync_folder(basepath, folders, files) def sync_folder(self, basepath, folders, files): self.get_index_txt(basepath, files) index_found = self.sync_index_page(basepath, files) if not index_found and basepath!=self.statics_path: # not synced either by generator or by index.html return if self.index: self.sync_using_given_index(basepath, folders, files) else: self.sync_alphabetically(basepath, folders, [filename for filename in files if filename.endswith('html') or filename.endswith('md')]) def get_index_txt(self, basepath, files): self.index = [] if "index.txt" in files: with open(os.path.join(basepath, "index.txt"), "r") as indexfile: self.index = indexfile.read().splitlines() def sync_index_page(self, basepath, files): for extn in ("md", "html"): fname = "index." + extn if fname in files: self.sync_file(fname, os.path.join(basepath, fname), None) return True def sync_using_given_index(self, basepath, folders, files): for i, page_name in enumerate(self.index): if page_name in folders: # for folder, sync inner index first (so that idx is set) for extn in ("md", "html"): path = os.path.join(basepath, page_name, "index." + extn) if os.path.exists(path): self.sync_file("index." + extn, path, i) break # other files if page_name + ".md" in files: self.sync_file(page_name + ".md", os.path.join(basepath, page_name + ".md"), i) elif page_name + ".html" in files: self.sync_file(page_name + ".html", os.path.join(basepath, page_name + ".html"), i) else: if page_name not in folders: print page_name + " not found in " + basepath def sync_alphabetically(self, basepath, folders, files): files.sort() for fname in files: page_name = fname.rsplit(".", 1)[0] if not (page_name=="index" and basepath!=self.statics_path): self.sync_file(fname, os.path.join(basepath, fname), None) def sync_file(self, fname, template_path, priority): route = os.path.relpath(template_path, self.statics_path).rsplit(".", 1)[0] if fname.rsplit(".", 1)[0]=="index" and \ os.path.dirname(template_path) != self.statics_path: route = os.path.dirname(route) parent_web_page = frappe.db.sql("""select name from `tabWeb Page` where page_name=%s and ifnull(parent_website_route, '')=ifnull(%s, '')""", (os.path.basename(os.path.dirname(route)), os.path.dirname(os.path.dirname(route)))) parent_web_page = parent_web_page and parent_web_page[0][0] or "" page_name = os.path.basename(route) published = 1 idx = priority if (parent_web_page, page_name) in self.synced: return title = self.get_title(template_path) if not frappe.db.get_value("Web Page", {"template_path":template_path}): web_page = frappe.new_doc("Web Page") web_page.page_name = page_name web_page.parent_web_page = parent_web_page web_page.template_path = template_path web_page.title = title web_page.published = published web_page.idx = idx web_page.from_website_sync = True web_page.insert() if self.verbose: print "Inserted: " + web_page.name else: web_page = frappe.get_doc("Web Page", {"template_path":template_path}) dirty = False for key in ("parent_web_page", "title", "template_path", "published", "idx"): if web_page.get(key) != locals().get(key): web_page.set(key, locals().get(key)) dirty = True if dirty: web_page.from_website_sync = True web_page.save() if self.verbose: print "Updated: " + web_page.name self.synced.append((parent_web_page, page_name)) def get_title(self, fpath): title = os.path.basename(fpath).rsplit(".", 1)[0] if title =="index": title = os.path.basename(os.path.dirname(fpath)) title = title.replace("-", " ").replace("_", " ").title() with open(fpath, "r") as f: content = unicode(f.read().strip(), "utf-8") if content.startswith("# "): title = content.splitlines()[0][2:] if "<!-- title:" in content: title = content.split("<!-- title:", 1)[1].split("-->", 1)[0].strip() return title def cleanup(self): if self.synced: # delete static web pages that are not in immediate list for static_page in frappe.db.sql("""select name, page_name, parent_web_page from `tabWeb Page` where ifnull(template_path,'')!=''""", as_dict=1): if (static_page.parent_web_page, static_page.page_name) not in self.synced: frappe.delete_doc("Web Page", static_page.name, force=1) else: # delete all static web pages frappe.delete_doc("Web Page", frappe.db.sql_list("""select name from `tabWeb Page` where ifnull(template_path,'')!=''"""), force=1)
mit
-7,153,857,291,017,994,000
31.277457
136
0.6601
false
kbran420/android-quill
jni/libhpdf-2.3.0RC2/if/python/demo/slide_show_demo.py
32
6231
### ## * << Haru Free PDF Library 2.0.6 >> -- slideshow_demo.c ## * ## * Copyright (c) 1999-2006 Takeshi Kanno <[email protected]> ## * ## * Permission to use, copy, modify, distribute and sell this software ## * and its documentation for any purpose is hereby granted without fee, ## * provided that the above copyright notice appear in all copies and ## * that both that copyright notice and this permission notice appear ## * in supporting documentation. ## * It is provided "as is" without express or implied warranty. ## * ## ## port to python by Li Jun ## http://groups.google.com/group/pythoncia import os, sys from ctypes import * up=2 def setlibpath(up): import sys path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up) if path not in sys.path: sys.path.append(path) setlibpath(up) from haru import * from haru.c_func import * from haru.hpdf_errorcode import * import random @HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p) def error_handler (error_no, detail_no, user_data): global pdf printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no], detail_no) HPDF_Free (pdf) sys.exit(1) def print_page (page, caption, font, style, prev, next): r = random.random() g = random.random() b = random.random() rect=HPDF_Rect() HPDF_Page_SetWidth (page, 800) HPDF_Page_SetHeight (page, 600) HPDF_Page_SetRGBFill (page, r, g, b) HPDF_Page_Rectangle (page, 0, 0, 800, 600) HPDF_Page_Fill (page) HPDF_Page_SetRGBFill (page, 1.0 - r, 1.0 - g, 1.0 - b) HPDF_Page_SetFontAndSize (page, font, 30) HPDF_Page_BeginText (page) HPDF_Page_SetTextMatrix (page, 0.8, 0.0, 0.0, 1.0, 0.0, 0.0) HPDF_Page_TextOut (page, 50, 530, caption) HPDF_Page_SetTextMatrix (page, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0) HPDF_Page_SetFontAndSize (page, font, 20) HPDF_Page_TextOut (page, 55, 300, "Type \"Ctrl+L\" in order to return from full screen mode.") HPDF_Page_EndText (page) HPDF_Page_SetSlideShow (page, style, 5.0, 1.0) HPDF_Page_SetFontAndSize (page, font, 20) if (next): HPDF_Page_BeginText (page) HPDF_Page_TextOut (page, 680, 50, "Next=>") HPDF_Page_EndText (page) rect.left = 680 rect.right = 750 rect.top = 70 rect.bottom = 50 dst = HPDF_Page_CreateDestination (next) HPDF_Destination_SetFit(dst) annot = HPDF_Page_CreateLinkAnnot (page, rect, dst) HPDF_LinkAnnot_SetBorderStyle (annot, 0, 0, 0) HPDF_LinkAnnot_SetHighlightMode (annot, HPDF_ANNOT_INVERT_BOX) if (prev): HPDF_Page_BeginText (page) HPDF_Page_TextOut (page, 50, 50, "<=Prev") HPDF_Page_EndText (page) rect.left = 50 rect.right = 110 rect.top = 70 rect.bottom = 50 dst = HPDF_Page_CreateDestination (prev) HPDF_Destination_SetFit(dst) annot = HPDF_Page_CreateLinkAnnot (page, rect, dst) HPDF_LinkAnnot_SetBorderStyle (annot, 0, 0, 0) HPDF_LinkAnnot_SetHighlightMode (annot, HPDF_ANNOT_INVERT_BOX) def main(): global pdf page=[None for i in range(17)] fname=os.path.realpath(sys.argv[0]) fname=fname[:fname.rfind('.')]+'.pdf' pdf = HPDF_New (error_handler, NULL) if (not pdf): printf ("error: cannot create PdfDoc object\n") return 1 # create default-font font = HPDF_GetFont (pdf, "Courier", NULL) # Add 17 pages to the document. page[0] = HPDF_AddPage (pdf) page[1] = HPDF_AddPage (pdf) page[2] = HPDF_AddPage (pdf) page[3] = HPDF_AddPage (pdf) page[4] = HPDF_AddPage (pdf) page[5] = HPDF_AddPage (pdf) page[6] = HPDF_AddPage (pdf) page[7] = HPDF_AddPage (pdf) page[8] = HPDF_AddPage (pdf) page[9] = HPDF_AddPage (pdf) page[10] = HPDF_AddPage (pdf) page[11] = HPDF_AddPage (pdf) page[12] = HPDF_AddPage (pdf) page[13] = HPDF_AddPage (pdf) page[14] = HPDF_AddPage (pdf) page[15] = HPDF_AddPage (pdf) page[16] = HPDF_AddPage (pdf) print_page(page[0], "HPDF_TS_WIPE_RIGHT", font, HPDF_TS_WIPE_RIGHT, NULL, page[1]) print_page(page[1], "HPDF_TS_WIPE_UP", font, HPDF_TS_WIPE_UP, page[0], page[2]) print_page(page[2], "HPDF_TS_WIPE_LEFT", font, HPDF_TS_WIPE_LEFT, page[1], page[3]) print_page(page[3], "HPDF_TS_WIPE_DOWN", font, HPDF_TS_WIPE_DOWN, page[2], page[4]) print_page(page[4], "HPDF_TS_BARN_DOORS_HORIZONTAL_OUT", font, HPDF_TS_BARN_DOORS_HORIZONTAL_OUT, page[3], page[5]) print_page(page[5], "HPDF_TS_BARN_DOORS_HORIZONTAL_IN", font, HPDF_TS_BARN_DOORS_HORIZONTAL_IN, page[4], page[6]) print_page(page[6], "HPDF_TS_BARN_DOORS_VERTICAL_OUT", font, HPDF_TS_BARN_DOORS_VERTICAL_OUT, page[5], page[7]) print_page(page[7], "HPDF_TS_BARN_DOORS_VERTICAL_IN", font, HPDF_TS_BARN_DOORS_VERTICAL_IN, page[6], page[8]) print_page(page[8], "HPDF_TS_BOX_OUT", font, HPDF_TS_BOX_OUT, page[7], page[9]) print_page(page[9], "HPDF_TS_BOX_IN", font, HPDF_TS_BOX_IN, page[8], page[10]) print_page(page[10], "HPDF_TS_BLINDS_HORIZONTAL", font, HPDF_TS_BLINDS_HORIZONTAL, page[9], page[11]) print_page(page[11], "HPDF_TS_BLINDS_VERTICAL", font, HPDF_TS_BLINDS_VERTICAL, page[10], page[12]) print_page(page[12], "HPDF_TS_DISSOLVE", font, HPDF_TS_DISSOLVE, page[11], page[13]) print_page(page[13], "HPDF_TS_GLITTER_RIGHT", font, HPDF_TS_GLITTER_RIGHT, page[12], page[14]) print_page(page[14], "HPDF_TS_GLITTER_DOWN", font, HPDF_TS_GLITTER_DOWN, page[13], page[15]) print_page(page[15], "HPDF_TS_GLITTER_TOP_LEFT_TO_BOTTOM_RIGHT", font, HPDF_TS_GLITTER_TOP_LEFT_TO_BOTTOM_RIGHT, page[14], page[16]) print_page(page[16], "HPDF_TS_REPLACE", font, HPDF_TS_REPLACE, page[15], NULL) HPDF_SetPageMode (pdf, HPDF_PAGE_MODE_FULL_SCREEN) # save the document to a file HPDF_SaveToFile (pdf, fname) # clean up HPDF_Free (pdf) return 0 main()
gpl-3.0
3,287,125,042,290,296,300
30.805263
80
0.610175
false
thepug/Speeqe
speeqeweb/speeqe/templatetags/splib.py
1
1687
from django.contrib.sites.models import Site from django.template import Library, Node import speeqeweb.xmpp.muc as muc import speeqeweb.settings as settings register = Library() @register.simple_tag def current_domain(): return settings.HTTP_DOMAIN #return all active muc rooms class ActiveRoomsNode(Node): """ return all active muc rooms """ def render(self, context): try: context['rooms'] = muc.listrooms()[:5] except: pass return '' @register.tag(name="show_rooms") def show_rooms(parser,token): return ActiveRoomsNode() class Room: pass class FeaturedRoomsNode(Node): def __init__(self): """do I need this?""" pass def render(self, context): try: featured_rooms = [] for key in settings.FEATURED_ROOMS.keys(): room = Room() room.name = key room.url = settings.FEATURED_ROOMS[key] featured_rooms.append(room) context['featuredrooms'] = featured_rooms except: pass return '' @register.tag(name="show_featured_rooms") def show_featured_rooms(parser,token): return FeaturedRoomsNode() @register.simple_tag def help_email(): return settings.HELP_EMAIL class DnsRoomNamesNode(Node): """ return setting that the dns trick for room names is being used """ def render(self, context): try: context['dns_room_names'] = settings.DNS_ROOM_NAMES except: pass return '' @register.tag(name="use_dns_room_names") def use_dns_room_names(parser,token): return DnsRoomNamesNode()
agpl-3.0
-5,732,071,145,099,429,000
23.808824
74
0.614108
false
calberti/models
resnet/resnet_main.py
2
7036
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ResNet Train/Eval module. """ import sys import time import cifar_input import numpy as np import resnet_model import tensorflow as tf FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('dataset', 'cifar10', 'cifar10 or cifar100.') tf.app.flags.DEFINE_string('mode', 'train', 'train or eval.') tf.app.flags.DEFINE_string('train_data_path', '', 'Filename for training data.') tf.app.flags.DEFINE_string('eval_data_path', '', 'Filename for eval data') tf.app.flags.DEFINE_integer('image_size', 32, 'Image side length.') tf.app.flags.DEFINE_string('train_dir', '', 'Directory to keep training outputs.') tf.app.flags.DEFINE_string('eval_dir', '', 'Directory to keep eval outputs.') tf.app.flags.DEFINE_integer('eval_batch_count', 50, 'Number of batches to eval.') tf.app.flags.DEFINE_bool('eval_once', False, 'Whether evaluate the model only once.') tf.app.flags.DEFINE_string('log_root', '', 'Directory to keep the checkpoints. Should be a ' 'parent directory of FLAGS.train_dir/eval_dir.') tf.app.flags.DEFINE_integer('num_gpus', 0, 'Number of gpus used for training. (0 or 1)') def train(hps): """Training loop.""" images, labels = cifar_input.build_input( FLAGS.dataset, FLAGS.train_data_path, hps.batch_size, FLAGS.mode) model = resnet_model.ResNet(hps, images, labels, FLAGS.mode) model.build_graph() summary_writer = tf.train.SummaryWriter(FLAGS.train_dir) sv = tf.train.Supervisor(logdir=FLAGS.log_root, is_chief=True, summary_op=None, save_summaries_secs=60, save_model_secs=300, global_step=model.global_step) sess = sv.prepare_or_wait_for_session() step = 0 total_prediction = 0 correct_prediction = 0 precision = 0.0 lrn_rate = 0.1 while not sv.should_stop(): (_, summaries, loss, predictions, truth, train_step) = sess.run( [model.train_op, model.summaries, model.cost, model.predictions, model.labels, model.global_step], feed_dict={model.lrn_rate: lrn_rate}) if train_step < 40000: lrn_rate = 0.1 elif train_step < 60000: lrn_rate = 0.01 elif train_step < 80000: lrn_rate = 0.001 else: lrn_rate = 0.0001 predictions = np.argmax(predictions, axis=1) truth = np.argmax(truth, axis=1) for (t, p) in zip(truth, predictions): if t == p: correct_prediction += 1 total_prediction += 1 precision = float(correct_prediction) / total_prediction correct_prediction = total_prediction = 0 step += 1 if step % 100 == 0: precision_summ = tf.Summary() precision_summ.value.add( tag='Precision', simple_value=precision) summary_writer.add_summary(precision_summ, train_step) summary_writer.add_summary(summaries, train_step) tf.logging.info('loss: %.3f, precision: %.3f\n' % (loss, precision)) summary_writer.flush() sv.Stop() def evaluate(hps): """Eval loop.""" images, labels = cifar_input.build_input( FLAGS.dataset, FLAGS.eval_data_path, hps.batch_size, FLAGS.mode) model = resnet_model.ResNet(hps, images, labels, FLAGS.mode) model.build_graph() saver = tf.train.Saver() summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir) sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) tf.train.start_queue_runners(sess) best_precision = 0.0 while True: time.sleep(60) try: ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) except tf.errors.OutOfRangeError as e: tf.logging.error('Cannot restore checkpoint: %s', e) continue if not (ckpt_state and ckpt_state.model_checkpoint_path): tf.logging.info('No model to eval yet at %s', FLAGS.log_root) continue tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path) saver.restore(sess, ckpt_state.model_checkpoint_path) total_prediction, correct_prediction = 0, 0 for _ in xrange(FLAGS.eval_batch_count): (summaries, loss, predictions, truth, train_step) = sess.run( [model.summaries, model.cost, model.predictions, model.labels, model.global_step]) best_predictions = np.argmax(predictions, axis=1) truth = np.argmax(truth, axis=1) for (t, p) in zip(truth, best_predictions): if t == p: correct_prediction += 1 total_prediction += 1 precision = 1.0 * correct_prediction / total_prediction best_precision = max(precision, best_precision) precision_summ = tf.Summary() precision_summ.value.add( tag='Precision', simple_value=precision) summary_writer.add_summary(precision_summ, train_step) best_precision_summ = tf.Summary() best_precision_summ.value.add( tag='Best Precision', simple_value=best_precision) summary_writer.add_summary(best_precision_summ, train_step) summary_writer.add_summary(summaries, train_step) tf.logging.info('loss: %.3f, precision: %.3f, best precision: %.3f\n' % (loss, precision, best_precision)) summary_writer.flush() if FLAGS.eval_once: break def main(_): if FLAGS.num_gpus == 0: dev = '/cpu:0' elif FLAGS.num_gpus == 1: dev = '/gpu:0' else: raise ValueError('Only support 0 or 1 gpu.') if FLAGS.mode == 'train': batch_size = 128 elif FLAGS.mode == 'eval': batch_size = 100 if FLAGS.dataset == 'cifar10': num_classes = 10 elif FLAGS.dataset == 'cifar100': num_classes = 100 hps = resnet_model.HParams(batch_size=batch_size, num_classes=num_classes, min_lrn_rate=0.0001, lrn_rate=0.1, num_residual_units=5, use_bottleneck=False, weight_decay_rate=0.0002, relu_leakiness=0.1, optimizer='mom') with tf.device(dev): if FLAGS.mode == 'train': train(hps) elif FLAGS.mode == 'eval': evaluate(hps) if __name__ == '__main__': tf.app.run()
apache-2.0
1,408,642,753,262,316,500
34.004975
80
0.614412
false
thurt/arangodb
3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_datetime.py
52
133995
"""Test date/time type. See http://www.zope.org/Members/fdrake/DateTimeWiki/TestCases """ import os import pickle import cPickle import unittest from test import test_support from datetime import MINYEAR, MAXYEAR from datetime import timedelta from datetime import tzinfo from datetime import time from datetime import date, datetime pickle_choices = [(pickler, unpickler, proto) for pickler in pickle, cPickle for unpickler in pickle, cPickle for proto in range(3)] assert len(pickle_choices) == 2*2*3 # An arbitrary collection of objects of non-datetime types, for testing # mixed-type comparisons. OTHERSTUFF = (10, 10L, 34.5, "abc", {}, [], ()) ############################################################################# # module tests class TestModule(unittest.TestCase): def test_constants(self): import datetime self.assertEqual(datetime.MINYEAR, 1) self.assertEqual(datetime.MAXYEAR, 9999) ############################################################################# # tzinfo tests class FixedOffset(tzinfo): def __init__(self, offset, name, dstoffset=42): if isinstance(offset, int): offset = timedelta(minutes=offset) if isinstance(dstoffset, int): dstoffset = timedelta(minutes=dstoffset) self.__offset = offset self.__name = name self.__dstoffset = dstoffset def __repr__(self): return self.__name.lower() def utcoffset(self, dt): return self.__offset def tzname(self, dt): return self.__name def dst(self, dt): return self.__dstoffset class PicklableFixedOffset(FixedOffset): def __init__(self, offset=None, name=None, dstoffset=None): FixedOffset.__init__(self, offset, name, dstoffset) class TestTZInfo(unittest.TestCase): def test_non_abstractness(self): # In order to allow subclasses to get pickled, the C implementation # wasn't able to get away with having __init__ raise # NotImplementedError. useless = tzinfo() dt = datetime.max self.assertRaises(NotImplementedError, useless.tzname, dt) self.assertRaises(NotImplementedError, useless.utcoffset, dt) self.assertRaises(NotImplementedError, useless.dst, dt) def test_subclass_must_override(self): class NotEnough(tzinfo): def __init__(self, offset, name): self.__offset = offset self.__name = name self.failUnless(issubclass(NotEnough, tzinfo)) ne = NotEnough(3, "NotByALongShot") self.failUnless(isinstance(ne, tzinfo)) dt = datetime.now() self.assertRaises(NotImplementedError, ne.tzname, dt) self.assertRaises(NotImplementedError, ne.utcoffset, dt) self.assertRaises(NotImplementedError, ne.dst, dt) def test_normal(self): fo = FixedOffset(3, "Three") self.failUnless(isinstance(fo, tzinfo)) for dt in datetime.now(), None: self.assertEqual(fo.utcoffset(dt), timedelta(minutes=3)) self.assertEqual(fo.tzname(dt), "Three") self.assertEqual(fo.dst(dt), timedelta(minutes=42)) def test_pickling_base(self): # There's no point to pickling tzinfo objects on their own (they # carry no data), but they need to be picklable anyway else # concrete subclasses can't be pickled. orig = tzinfo.__new__(tzinfo) self.failUnless(type(orig) is tzinfo) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.failUnless(type(derived) is tzinfo) def test_pickling_subclass(self): # Make sure we can pickle/unpickle an instance of a subclass. offset = timedelta(minutes=-300) orig = PicklableFixedOffset(offset, 'cookie') self.failUnless(isinstance(orig, tzinfo)) self.failUnless(type(orig) is PicklableFixedOffset) self.assertEqual(orig.utcoffset(None), offset) self.assertEqual(orig.tzname(None), 'cookie') for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.failUnless(isinstance(derived, tzinfo)) self.failUnless(type(derived) is PicklableFixedOffset) self.assertEqual(derived.utcoffset(None), offset) self.assertEqual(derived.tzname(None), 'cookie') ############################################################################# # Base clase for testing a particular aspect of timedelta, time, date and # datetime comparisons. class HarmlessMixedComparison: # Test that __eq__ and __ne__ don't complain for mixed-type comparisons. # Subclasses must define 'theclass', and theclass(1, 1, 1) must be a # legit constructor. def test_harmless_mixed_comparison(self): me = self.theclass(1, 1, 1) self.failIf(me == ()) self.failUnless(me != ()) self.failIf(() == me) self.failUnless(() != me) self.failUnless(me in [1, 20L, [], me]) self.failIf(me not in [1, 20L, [], me]) self.failUnless([] in [me, 1, 20L, []]) self.failIf([] not in [me, 1, 20L, []]) def test_harmful_mixed_comparison(self): me = self.theclass(1, 1, 1) self.assertRaises(TypeError, lambda: me < ()) self.assertRaises(TypeError, lambda: me <= ()) self.assertRaises(TypeError, lambda: me > ()) self.assertRaises(TypeError, lambda: me >= ()) self.assertRaises(TypeError, lambda: () < me) self.assertRaises(TypeError, lambda: () <= me) self.assertRaises(TypeError, lambda: () > me) self.assertRaises(TypeError, lambda: () >= me) self.assertRaises(TypeError, cmp, (), me) self.assertRaises(TypeError, cmp, me, ()) ############################################################################# # timedelta tests class TestTimeDelta(HarmlessMixedComparison, unittest.TestCase): theclass = timedelta def test_constructor(self): eq = self.assertEqual td = timedelta # Check keyword args to constructor eq(td(), td(weeks=0, days=0, hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0)) eq(td(1), td(days=1)) eq(td(0, 1), td(seconds=1)) eq(td(0, 0, 1), td(microseconds=1)) eq(td(weeks=1), td(days=7)) eq(td(days=1), td(hours=24)) eq(td(hours=1), td(minutes=60)) eq(td(minutes=1), td(seconds=60)) eq(td(seconds=1), td(milliseconds=1000)) eq(td(milliseconds=1), td(microseconds=1000)) # Check float args to constructor eq(td(weeks=1.0/7), td(days=1)) eq(td(days=1.0/24), td(hours=1)) eq(td(hours=1.0/60), td(minutes=1)) eq(td(minutes=1.0/60), td(seconds=1)) eq(td(seconds=0.001), td(milliseconds=1)) eq(td(milliseconds=0.001), td(microseconds=1)) def test_computations(self): eq = self.assertEqual td = timedelta a = td(7) # One week b = td(0, 60) # One minute c = td(0, 0, 1000) # One millisecond eq(a+b+c, td(7, 60, 1000)) eq(a-b, td(6, 24*3600 - 60)) eq(-a, td(-7)) eq(+a, td(7)) eq(-b, td(-1, 24*3600 - 60)) eq(-c, td(-1, 24*3600 - 1, 999000)) eq(abs(a), a) eq(abs(-a), a) eq(td(6, 24*3600), a) eq(td(0, 0, 60*1000000), b) eq(a*10, td(70)) eq(a*10, 10*a) eq(a*10L, 10*a) eq(b*10, td(0, 600)) eq(10*b, td(0, 600)) eq(b*10L, td(0, 600)) eq(c*10, td(0, 0, 10000)) eq(10*c, td(0, 0, 10000)) eq(c*10L, td(0, 0, 10000)) eq(a*-1, -a) eq(b*-2, -b-b) eq(c*-2, -c+-c) eq(b*(60*24), (b*60)*24) eq(b*(60*24), (60*b)*24) eq(c*1000, td(0, 1)) eq(1000*c, td(0, 1)) eq(a//7, td(1)) eq(b//10, td(0, 6)) eq(c//1000, td(0, 0, 1)) eq(a//10, td(0, 7*24*360)) eq(a//3600000, td(0, 0, 7*24*1000)) def test_disallowed_computations(self): a = timedelta(42) # Add/sub ints, longs, floats should be illegal for i in 1, 1L, 1.0: self.assertRaises(TypeError, lambda: a+i) self.assertRaises(TypeError, lambda: a-i) self.assertRaises(TypeError, lambda: i+a) self.assertRaises(TypeError, lambda: i-a) # Mul/div by float isn't supported. x = 2.3 self.assertRaises(TypeError, lambda: a*x) self.assertRaises(TypeError, lambda: x*a) self.assertRaises(TypeError, lambda: a/x) self.assertRaises(TypeError, lambda: x/a) self.assertRaises(TypeError, lambda: a // x) self.assertRaises(TypeError, lambda: x // a) # Division of int by timedelta doesn't make sense. # Division by zero doesn't make sense. for zero in 0, 0L: self.assertRaises(TypeError, lambda: zero // a) self.assertRaises(ZeroDivisionError, lambda: a // zero) def test_basic_attributes(self): days, seconds, us = 1, 7, 31 td = timedelta(days, seconds, us) self.assertEqual(td.days, days) self.assertEqual(td.seconds, seconds) self.assertEqual(td.microseconds, us) def test_carries(self): t1 = timedelta(days=100, weeks=-7, hours=-24*(100-49), minutes=-3, seconds=12, microseconds=(3*60 - 12) * 1e6 + 1) t2 = timedelta(microseconds=1) self.assertEqual(t1, t2) def test_hash_equality(self): t1 = timedelta(days=100, weeks=-7, hours=-24*(100-49), minutes=-3, seconds=12, microseconds=(3*60 - 12) * 1000000) t2 = timedelta() self.assertEqual(hash(t1), hash(t2)) t1 += timedelta(weeks=7) t2 += timedelta(days=7*7) self.assertEqual(t1, t2) self.assertEqual(hash(t1), hash(t2)) d = {t1: 1} d[t2] = 2 self.assertEqual(len(d), 1) self.assertEqual(d[t1], 2) def test_pickling(self): args = 12, 34, 56 orig = timedelta(*args) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) def test_compare(self): t1 = timedelta(2, 3, 4) t2 = timedelta(2, 3, 4) self.failUnless(t1 == t2) self.failUnless(t1 <= t2) self.failUnless(t1 >= t2) self.failUnless(not t1 != t2) self.failUnless(not t1 < t2) self.failUnless(not t1 > t2) self.assertEqual(cmp(t1, t2), 0) self.assertEqual(cmp(t2, t1), 0) for args in (3, 3, 3), (2, 4, 4), (2, 3, 5): t2 = timedelta(*args) # this is larger than t1 self.failUnless(t1 < t2) self.failUnless(t2 > t1) self.failUnless(t1 <= t2) self.failUnless(t2 >= t1) self.failUnless(t1 != t2) self.failUnless(t2 != t1) self.failUnless(not t1 == t2) self.failUnless(not t2 == t1) self.failUnless(not t1 > t2) self.failUnless(not t2 < t1) self.failUnless(not t1 >= t2) self.failUnless(not t2 <= t1) self.assertEqual(cmp(t1, t2), -1) self.assertEqual(cmp(t2, t1), 1) for badarg in OTHERSTUFF: self.assertEqual(t1 == badarg, False) self.assertEqual(t1 != badarg, True) self.assertEqual(badarg == t1, False) self.assertEqual(badarg != t1, True) self.assertRaises(TypeError, lambda: t1 <= badarg) self.assertRaises(TypeError, lambda: t1 < badarg) self.assertRaises(TypeError, lambda: t1 > badarg) self.assertRaises(TypeError, lambda: t1 >= badarg) self.assertRaises(TypeError, lambda: badarg <= t1) self.assertRaises(TypeError, lambda: badarg < t1) self.assertRaises(TypeError, lambda: badarg > t1) self.assertRaises(TypeError, lambda: badarg >= t1) def test_str(self): td = timedelta eq = self.assertEqual eq(str(td(1)), "1 day, 0:00:00") eq(str(td(-1)), "-1 day, 0:00:00") eq(str(td(2)), "2 days, 0:00:00") eq(str(td(-2)), "-2 days, 0:00:00") eq(str(td(hours=12, minutes=58, seconds=59)), "12:58:59") eq(str(td(hours=2, minutes=3, seconds=4)), "2:03:04") eq(str(td(weeks=-30, hours=23, minutes=12, seconds=34)), "-210 days, 23:12:34") eq(str(td(milliseconds=1)), "0:00:00.001000") eq(str(td(microseconds=3)), "0:00:00.000003") eq(str(td(days=999999999, hours=23, minutes=59, seconds=59, microseconds=999999)), "999999999 days, 23:59:59.999999") def test_roundtrip(self): for td in (timedelta(days=999999999, hours=23, minutes=59, seconds=59, microseconds=999999), timedelta(days=-999999999), timedelta(days=1, seconds=2, microseconds=3)): # Verify td -> string -> td identity. s = repr(td) self.failUnless(s.startswith('datetime.')) s = s[9:] td2 = eval(s) self.assertEqual(td, td2) # Verify identity via reconstructing from pieces. td2 = timedelta(td.days, td.seconds, td.microseconds) self.assertEqual(td, td2) def test_resolution_info(self): self.assert_(isinstance(timedelta.min, timedelta)) self.assert_(isinstance(timedelta.max, timedelta)) self.assert_(isinstance(timedelta.resolution, timedelta)) self.assert_(timedelta.max > timedelta.min) self.assertEqual(timedelta.min, timedelta(-999999999)) self.assertEqual(timedelta.max, timedelta(999999999, 24*3600-1, 1e6-1)) self.assertEqual(timedelta.resolution, timedelta(0, 0, 1)) def test_overflow(self): tiny = timedelta.resolution td = timedelta.min + tiny td -= tiny # no problem self.assertRaises(OverflowError, td.__sub__, tiny) self.assertRaises(OverflowError, td.__add__, -tiny) td = timedelta.max - tiny td += tiny # no problem self.assertRaises(OverflowError, td.__add__, tiny) self.assertRaises(OverflowError, td.__sub__, -tiny) self.assertRaises(OverflowError, lambda: -timedelta.max) def test_microsecond_rounding(self): td = timedelta eq = self.assertEqual # Single-field rounding. eq(td(milliseconds=0.4/1000), td(0)) # rounds to 0 eq(td(milliseconds=-0.4/1000), td(0)) # rounds to 0 eq(td(milliseconds=0.6/1000), td(microseconds=1)) eq(td(milliseconds=-0.6/1000), td(microseconds=-1)) # Rounding due to contributions from more than one field. us_per_hour = 3600e6 us_per_day = us_per_hour * 24 eq(td(days=.4/us_per_day), td(0)) eq(td(hours=.2/us_per_hour), td(0)) eq(td(days=.4/us_per_day, hours=.2/us_per_hour), td(microseconds=1)) eq(td(days=-.4/us_per_day), td(0)) eq(td(hours=-.2/us_per_hour), td(0)) eq(td(days=-.4/us_per_day, hours=-.2/us_per_hour), td(microseconds=-1)) def test_massive_normalization(self): td = timedelta(microseconds=-1) self.assertEqual((td.days, td.seconds, td.microseconds), (-1, 24*3600-1, 999999)) def test_bool(self): self.failUnless(timedelta(1)) self.failUnless(timedelta(0, 1)) self.failUnless(timedelta(0, 0, 1)) self.failUnless(timedelta(microseconds=1)) self.failUnless(not timedelta(0)) def test_subclass_timedelta(self): class T(timedelta): @staticmethod def from_td(td): return T(td.days, td.seconds, td.microseconds) def as_hours(self): sum = (self.days * 24 + self.seconds / 3600.0 + self.microseconds / 3600e6) return round(sum) t1 = T(days=1) self.assert_(type(t1) is T) self.assertEqual(t1.as_hours(), 24) t2 = T(days=-1, seconds=-3600) self.assert_(type(t2) is T) self.assertEqual(t2.as_hours(), -25) t3 = t1 + t2 self.assert_(type(t3) is timedelta) t4 = T.from_td(t3) self.assert_(type(t4) is T) self.assertEqual(t3.days, t4.days) self.assertEqual(t3.seconds, t4.seconds) self.assertEqual(t3.microseconds, t4.microseconds) self.assertEqual(str(t3), str(t4)) self.assertEqual(t4.as_hours(), -1) ############################################################################# # date tests class TestDateOnly(unittest.TestCase): # Tests here won't pass if also run on datetime objects, so don't # subclass this to test datetimes too. def test_delta_non_days_ignored(self): dt = date(2000, 1, 2) delta = timedelta(days=1, hours=2, minutes=3, seconds=4, microseconds=5) days = timedelta(delta.days) self.assertEqual(days, timedelta(1)) dt2 = dt + delta self.assertEqual(dt2, dt + days) dt2 = delta + dt self.assertEqual(dt2, dt + days) dt2 = dt - delta self.assertEqual(dt2, dt - days) delta = -delta days = timedelta(delta.days) self.assertEqual(days, timedelta(-2)) dt2 = dt + delta self.assertEqual(dt2, dt + days) dt2 = delta + dt self.assertEqual(dt2, dt + days) dt2 = dt - delta self.assertEqual(dt2, dt - days) class SubclassDate(date): sub_var = 1 class TestDate(HarmlessMixedComparison, unittest.TestCase): # Tests here should pass for both dates and datetimes, except for a # few tests that TestDateTime overrides. theclass = date def test_basic_attributes(self): dt = self.theclass(2002, 3, 1) self.assertEqual(dt.year, 2002) self.assertEqual(dt.month, 3) self.assertEqual(dt.day, 1) def test_roundtrip(self): for dt in (self.theclass(1, 2, 3), self.theclass.today()): # Verify dt -> string -> date identity. s = repr(dt) self.failUnless(s.startswith('datetime.')) s = s[9:] dt2 = eval(s) self.assertEqual(dt, dt2) # Verify identity via reconstructing from pieces. dt2 = self.theclass(dt.year, dt.month, dt.day) self.assertEqual(dt, dt2) def test_ordinal_conversions(self): # Check some fixed values. for y, m, d, n in [(1, 1, 1, 1), # calendar origin (1, 12, 31, 365), (2, 1, 1, 366), # first example from "Calendrical Calculations" (1945, 11, 12, 710347)]: d = self.theclass(y, m, d) self.assertEqual(n, d.toordinal()) fromord = self.theclass.fromordinal(n) self.assertEqual(d, fromord) if hasattr(fromord, "hour"): # if we're checking something fancier than a date, verify # the extra fields have been zeroed out self.assertEqual(fromord.hour, 0) self.assertEqual(fromord.minute, 0) self.assertEqual(fromord.second, 0) self.assertEqual(fromord.microsecond, 0) # Check first and last days of year spottily across the whole # range of years supported. for year in xrange(MINYEAR, MAXYEAR+1, 7): # Verify (year, 1, 1) -> ordinal -> y, m, d is identity. d = self.theclass(year, 1, 1) n = d.toordinal() d2 = self.theclass.fromordinal(n) self.assertEqual(d, d2) # Verify that moving back a day gets to the end of year-1. if year > 1: d = self.theclass.fromordinal(n-1) d2 = self.theclass(year-1, 12, 31) self.assertEqual(d, d2) self.assertEqual(d2.toordinal(), n-1) # Test every day in a leap-year and a non-leap year. dim = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] for year, isleap in (2000, True), (2002, False): n = self.theclass(year, 1, 1).toordinal() for month, maxday in zip(range(1, 13), dim): if month == 2 and isleap: maxday += 1 for day in range(1, maxday+1): d = self.theclass(year, month, day) self.assertEqual(d.toordinal(), n) self.assertEqual(d, self.theclass.fromordinal(n)) n += 1 def test_extreme_ordinals(self): a = self.theclass.min a = self.theclass(a.year, a.month, a.day) # get rid of time parts aord = a.toordinal() b = a.fromordinal(aord) self.assertEqual(a, b) self.assertRaises(ValueError, lambda: a.fromordinal(aord - 1)) b = a + timedelta(days=1) self.assertEqual(b.toordinal(), aord + 1) self.assertEqual(b, self.theclass.fromordinal(aord + 1)) a = self.theclass.max a = self.theclass(a.year, a.month, a.day) # get rid of time parts aord = a.toordinal() b = a.fromordinal(aord) self.assertEqual(a, b) self.assertRaises(ValueError, lambda: a.fromordinal(aord + 1)) b = a - timedelta(days=1) self.assertEqual(b.toordinal(), aord - 1) self.assertEqual(b, self.theclass.fromordinal(aord - 1)) def test_bad_constructor_arguments(self): # bad years self.theclass(MINYEAR, 1, 1) # no exception self.theclass(MAXYEAR, 1, 1) # no exception self.assertRaises(ValueError, self.theclass, MINYEAR-1, 1, 1) self.assertRaises(ValueError, self.theclass, MAXYEAR+1, 1, 1) # bad months self.theclass(2000, 1, 1) # no exception self.theclass(2000, 12, 1) # no exception self.assertRaises(ValueError, self.theclass, 2000, 0, 1) self.assertRaises(ValueError, self.theclass, 2000, 13, 1) # bad days self.theclass(2000, 2, 29) # no exception self.theclass(2004, 2, 29) # no exception self.theclass(2400, 2, 29) # no exception self.assertRaises(ValueError, self.theclass, 2000, 2, 30) self.assertRaises(ValueError, self.theclass, 2001, 2, 29) self.assertRaises(ValueError, self.theclass, 2100, 2, 29) self.assertRaises(ValueError, self.theclass, 1900, 2, 29) self.assertRaises(ValueError, self.theclass, 2000, 1, 0) self.assertRaises(ValueError, self.theclass, 2000, 1, 32) def test_hash_equality(self): d = self.theclass(2000, 12, 31) # same thing e = self.theclass(2000, 12, 31) self.assertEqual(d, e) self.assertEqual(hash(d), hash(e)) dic = {d: 1} dic[e] = 2 self.assertEqual(len(dic), 1) self.assertEqual(dic[d], 2) self.assertEqual(dic[e], 2) d = self.theclass(2001, 1, 1) # same thing e = self.theclass(2001, 1, 1) self.assertEqual(d, e) self.assertEqual(hash(d), hash(e)) dic = {d: 1} dic[e] = 2 self.assertEqual(len(dic), 1) self.assertEqual(dic[d], 2) self.assertEqual(dic[e], 2) def test_computations(self): a = self.theclass(2002, 1, 31) b = self.theclass(1956, 1, 31) diff = a-b self.assertEqual(diff.days, 46*365 + len(range(1956, 2002, 4))) self.assertEqual(diff.seconds, 0) self.assertEqual(diff.microseconds, 0) day = timedelta(1) week = timedelta(7) a = self.theclass(2002, 3, 2) self.assertEqual(a + day, self.theclass(2002, 3, 3)) self.assertEqual(day + a, self.theclass(2002, 3, 3)) self.assertEqual(a - day, self.theclass(2002, 3, 1)) self.assertEqual(-day + a, self.theclass(2002, 3, 1)) self.assertEqual(a + week, self.theclass(2002, 3, 9)) self.assertEqual(a - week, self.theclass(2002, 2, 23)) self.assertEqual(a + 52*week, self.theclass(2003, 3, 1)) self.assertEqual(a - 52*week, self.theclass(2001, 3, 3)) self.assertEqual((a + week) - a, week) self.assertEqual((a + day) - a, day) self.assertEqual((a - week) - a, -week) self.assertEqual((a - day) - a, -day) self.assertEqual(a - (a + week), -week) self.assertEqual(a - (a + day), -day) self.assertEqual(a - (a - week), week) self.assertEqual(a - (a - day), day) # Add/sub ints, longs, floats should be illegal for i in 1, 1L, 1.0: self.assertRaises(TypeError, lambda: a+i) self.assertRaises(TypeError, lambda: a-i) self.assertRaises(TypeError, lambda: i+a) self.assertRaises(TypeError, lambda: i-a) # delta - date is senseless. self.assertRaises(TypeError, lambda: day - a) # mixing date and (delta or date) via * or // is senseless self.assertRaises(TypeError, lambda: day * a) self.assertRaises(TypeError, lambda: a * day) self.assertRaises(TypeError, lambda: day // a) self.assertRaises(TypeError, lambda: a // day) self.assertRaises(TypeError, lambda: a * a) self.assertRaises(TypeError, lambda: a // a) # date + date is senseless self.assertRaises(TypeError, lambda: a + a) def test_overflow(self): tiny = self.theclass.resolution dt = self.theclass.min + tiny dt -= tiny # no problem self.assertRaises(OverflowError, dt.__sub__, tiny) self.assertRaises(OverflowError, dt.__add__, -tiny) dt = self.theclass.max - tiny dt += tiny # no problem self.assertRaises(OverflowError, dt.__add__, tiny) self.assertRaises(OverflowError, dt.__sub__, -tiny) def test_fromtimestamp(self): import time # Try an arbitrary fixed value. year, month, day = 1999, 9, 19 ts = time.mktime((year, month, day, 0, 0, 0, 0, 0, -1)) d = self.theclass.fromtimestamp(ts) self.assertEqual(d.year, year) self.assertEqual(d.month, month) self.assertEqual(d.day, day) def test_insane_fromtimestamp(self): # It's possible that some platform maps time_t to double, # and that this test will fail there. This test should # exempt such platforms (provided they return reasonable # results!). for insane in -1e200, 1e200: self.assertRaises(ValueError, self.theclass.fromtimestamp, insane) def test_today(self): import time # We claim that today() is like fromtimestamp(time.time()), so # prove it. for dummy in range(3): today = self.theclass.today() ts = time.time() todayagain = self.theclass.fromtimestamp(ts) if today == todayagain: break # There are several legit reasons that could fail: # 1. It recently became midnight, between the today() and the # time() calls. # 2. The platform time() has such fine resolution that we'll # never get the same value twice. # 3. The platform time() has poor resolution, and we just # happened to call today() right before a resolution quantum # boundary. # 4. The system clock got fiddled between calls. # In any case, wait a little while and try again. time.sleep(0.1) # It worked or it didn't. If it didn't, assume it's reason #2, and # let the test pass if they're within half a second of each other. self.failUnless(today == todayagain or abs(todayagain - today) < timedelta(seconds=0.5)) def test_weekday(self): for i in range(7): # March 4, 2002 is a Monday self.assertEqual(self.theclass(2002, 3, 4+i).weekday(), i) self.assertEqual(self.theclass(2002, 3, 4+i).isoweekday(), i+1) # January 2, 1956 is a Monday self.assertEqual(self.theclass(1956, 1, 2+i).weekday(), i) self.assertEqual(self.theclass(1956, 1, 2+i).isoweekday(), i+1) def test_isocalendar(self): # Check examples from # http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm for i in range(7): d = self.theclass(2003, 12, 22+i) self.assertEqual(d.isocalendar(), (2003, 52, i+1)) d = self.theclass(2003, 12, 29) + timedelta(i) self.assertEqual(d.isocalendar(), (2004, 1, i+1)) d = self.theclass(2004, 1, 5+i) self.assertEqual(d.isocalendar(), (2004, 2, i+1)) d = self.theclass(2009, 12, 21+i) self.assertEqual(d.isocalendar(), (2009, 52, i+1)) d = self.theclass(2009, 12, 28) + timedelta(i) self.assertEqual(d.isocalendar(), (2009, 53, i+1)) d = self.theclass(2010, 1, 4+i) self.assertEqual(d.isocalendar(), (2010, 1, i+1)) def test_iso_long_years(self): # Calculate long ISO years and compare to table from # http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm ISO_LONG_YEARS_TABLE = """ 4 32 60 88 9 37 65 93 15 43 71 99 20 48 76 26 54 82 105 133 161 189 111 139 167 195 116 144 172 122 150 178 128 156 184 201 229 257 285 207 235 263 291 212 240 268 296 218 246 274 224 252 280 303 331 359 387 308 336 364 392 314 342 370 398 320 348 376 325 353 381 """ iso_long_years = map(int, ISO_LONG_YEARS_TABLE.split()) iso_long_years.sort() L = [] for i in range(400): d = self.theclass(2000+i, 12, 31) d1 = self.theclass(1600+i, 12, 31) self.assertEqual(d.isocalendar()[1:], d1.isocalendar()[1:]) if d.isocalendar()[1] == 53: L.append(i) self.assertEqual(L, iso_long_years) def test_isoformat(self): t = self.theclass(2, 3, 2) self.assertEqual(t.isoformat(), "0002-03-02") def test_ctime(self): t = self.theclass(2002, 3, 2) self.assertEqual(t.ctime(), "Sat Mar 2 00:00:00 2002") def test_strftime(self): t = self.theclass(2005, 3, 2) self.assertEqual(t.strftime("m:%m d:%d y:%y"), "m:03 d:02 y:05") self.assertEqual(t.strftime(""), "") # SF bug #761337 self.assertEqual(t.strftime('x'*1000), 'x'*1000) # SF bug #1556784 self.assertRaises(TypeError, t.strftime) # needs an arg self.assertRaises(TypeError, t.strftime, "one", "two") # too many args self.assertRaises(TypeError, t.strftime, 42) # arg wrong type # test that unicode input is allowed (issue 2782) self.assertEqual(t.strftime(u"%m"), "03") # A naive object replaces %z and %Z w/ empty strings. self.assertEqual(t.strftime("'%z' '%Z'"), "'' ''") #make sure that invalid format specifiers are handled correctly #self.assertRaises(ValueError, t.strftime, "%e") #self.assertRaises(ValueError, t.strftime, "%") #self.assertRaises(ValueError, t.strftime, "%#") #oh well, some systems just ignore those invalid ones. #at least, excercise them to make sure that no crashes #are generated for f in ["%e", "%", "%#"]: try: t.strftime(f) except ValueError: pass #check that this standard extension works t.strftime("%f") def test_format(self): dt = self.theclass(2007, 9, 10) self.assertEqual(dt.__format__(''), str(dt)) # check that a derived class's __str__() gets called class A(self.theclass): def __str__(self): return 'A' a = A(2007, 9, 10) self.assertEqual(a.__format__(''), 'A') # check that a derived class's strftime gets called class B(self.theclass): def strftime(self, format_spec): return 'B' b = B(2007, 9, 10) self.assertEqual(b.__format__(''), str(dt)) for fmt in ["m:%m d:%d y:%y", "m:%m d:%d y:%y H:%H M:%M S:%S", "%z %Z", ]: self.assertEqual(dt.__format__(fmt), dt.strftime(fmt)) self.assertEqual(a.__format__(fmt), dt.strftime(fmt)) self.assertEqual(b.__format__(fmt), 'B') def test_resolution_info(self): self.assert_(isinstance(self.theclass.min, self.theclass)) self.assert_(isinstance(self.theclass.max, self.theclass)) self.assert_(isinstance(self.theclass.resolution, timedelta)) self.assert_(self.theclass.max > self.theclass.min) def test_extreme_timedelta(self): big = self.theclass.max - self.theclass.min # 3652058 days, 23 hours, 59 minutes, 59 seconds, 999999 microseconds n = (big.days*24*3600 + big.seconds)*1000000 + big.microseconds # n == 315537897599999999 ~= 2**58.13 justasbig = timedelta(0, 0, n) self.assertEqual(big, justasbig) self.assertEqual(self.theclass.min + big, self.theclass.max) self.assertEqual(self.theclass.max - big, self.theclass.min) def test_timetuple(self): for i in range(7): # January 2, 1956 is a Monday (0) d = self.theclass(1956, 1, 2+i) t = d.timetuple() self.assertEqual(t, (1956, 1, 2+i, 0, 0, 0, i, 2+i, -1)) # February 1, 1956 is a Wednesday (2) d = self.theclass(1956, 2, 1+i) t = d.timetuple() self.assertEqual(t, (1956, 2, 1+i, 0, 0, 0, (2+i)%7, 32+i, -1)) # March 1, 1956 is a Thursday (3), and is the 31+29+1 = 61st day # of the year. d = self.theclass(1956, 3, 1+i) t = d.timetuple() self.assertEqual(t, (1956, 3, 1+i, 0, 0, 0, (3+i)%7, 61+i, -1)) self.assertEqual(t.tm_year, 1956) self.assertEqual(t.tm_mon, 3) self.assertEqual(t.tm_mday, 1+i) self.assertEqual(t.tm_hour, 0) self.assertEqual(t.tm_min, 0) self.assertEqual(t.tm_sec, 0) self.assertEqual(t.tm_wday, (3+i)%7) self.assertEqual(t.tm_yday, 61+i) self.assertEqual(t.tm_isdst, -1) def test_pickling(self): args = 6, 7, 23 orig = self.theclass(*args) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) def test_compare(self): t1 = self.theclass(2, 3, 4) t2 = self.theclass(2, 3, 4) self.failUnless(t1 == t2) self.failUnless(t1 <= t2) self.failUnless(t1 >= t2) self.failUnless(not t1 != t2) self.failUnless(not t1 < t2) self.failUnless(not t1 > t2) self.assertEqual(cmp(t1, t2), 0) self.assertEqual(cmp(t2, t1), 0) for args in (3, 3, 3), (2, 4, 4), (2, 3, 5): t2 = self.theclass(*args) # this is larger than t1 self.failUnless(t1 < t2) self.failUnless(t2 > t1) self.failUnless(t1 <= t2) self.failUnless(t2 >= t1) self.failUnless(t1 != t2) self.failUnless(t2 != t1) self.failUnless(not t1 == t2) self.failUnless(not t2 == t1) self.failUnless(not t1 > t2) self.failUnless(not t2 < t1) self.failUnless(not t1 >= t2) self.failUnless(not t2 <= t1) self.assertEqual(cmp(t1, t2), -1) self.assertEqual(cmp(t2, t1), 1) for badarg in OTHERSTUFF: self.assertEqual(t1 == badarg, False) self.assertEqual(t1 != badarg, True) self.assertEqual(badarg == t1, False) self.assertEqual(badarg != t1, True) self.assertRaises(TypeError, lambda: t1 < badarg) self.assertRaises(TypeError, lambda: t1 > badarg) self.assertRaises(TypeError, lambda: t1 >= badarg) self.assertRaises(TypeError, lambda: badarg <= t1) self.assertRaises(TypeError, lambda: badarg < t1) self.assertRaises(TypeError, lambda: badarg > t1) self.assertRaises(TypeError, lambda: badarg >= t1) def test_mixed_compare(self): our = self.theclass(2000, 4, 5) self.assertRaises(TypeError, cmp, our, 1) self.assertRaises(TypeError, cmp, 1, our) class AnotherDateTimeClass(object): def __cmp__(self, other): # Return "equal" so calling this can't be confused with # compare-by-address (which never says "equal" for distinct # objects). return 0 __hash__ = None # Silence Py3k warning # This still errors, because date and datetime comparison raise # TypeError instead of NotImplemented when they don't know what to # do, in order to stop comparison from falling back to the default # compare-by-address. their = AnotherDateTimeClass() self.assertRaises(TypeError, cmp, our, their) # Oops: The next stab raises TypeError in the C implementation, # but not in the Python implementation of datetime. The difference # is due to that the Python implementation defines __cmp__ but # the C implementation defines tp_richcompare. This is more pain # to fix than it's worth, so commenting out the test. # self.assertEqual(cmp(their, our), 0) # But date and datetime comparison return NotImplemented instead if the # other object has a timetuple attr. This gives the other object a # chance to do the comparison. class Comparable(AnotherDateTimeClass): def timetuple(self): return () their = Comparable() self.assertEqual(cmp(our, their), 0) self.assertEqual(cmp(their, our), 0) self.failUnless(our == their) self.failUnless(their == our) def test_bool(self): # All dates are considered true. self.failUnless(self.theclass.min) self.failUnless(self.theclass.max) def test_strftime_out_of_range(self): # For nasty technical reasons, we can't handle years before 1900. cls = self.theclass self.assertEqual(cls(1900, 1, 1).strftime("%Y"), "1900") for y in 1, 49, 51, 99, 100, 1000, 1899: self.assertRaises(ValueError, cls(y, 1, 1).strftime, "%Y") def test_replace(self): cls = self.theclass args = [1, 2, 3] base = cls(*args) self.assertEqual(base, base.replace()) i = 0 for name, newval in (("year", 2), ("month", 3), ("day", 4)): newargs = args[:] newargs[i] = newval expected = cls(*newargs) got = base.replace(**{name: newval}) self.assertEqual(expected, got) i += 1 # Out of bounds. base = cls(2000, 2, 29) self.assertRaises(ValueError, base.replace, year=2001) def test_subclass_date(self): class C(self.theclass): theAnswer = 42 def __new__(cls, *args, **kws): temp = kws.copy() extra = temp.pop('extra') result = self.theclass.__new__(cls, *args, **temp) result.extra = extra return result def newmeth(self, start): return start + self.year + self.month args = 2003, 4, 14 dt1 = self.theclass(*args) dt2 = C(*args, **{'extra': 7}) self.assertEqual(dt2.__class__, C) self.assertEqual(dt2.theAnswer, 42) self.assertEqual(dt2.extra, 7) self.assertEqual(dt1.toordinal(), dt2.toordinal()) self.assertEqual(dt2.newmeth(-7), dt1.year + dt1.month - 7) def test_pickling_subclass_date(self): args = 6, 7, 23 orig = SubclassDate(*args) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) def test_backdoor_resistance(self): # For fast unpickling, the constructor accepts a pickle string. # This is a low-overhead backdoor. A user can (by intent or # mistake) pass a string directly, which (if it's the right length) # will get treated like a pickle, and bypass the normal sanity # checks in the constructor. This can create insane objects. # The constructor doesn't want to burn the time to validate all # fields, but does check the month field. This stops, e.g., # datetime.datetime('1995-03-25') from yielding an insane object. base = '1995-03-25' if not issubclass(self.theclass, datetime): base = base[:4] for month_byte in '9', chr(0), chr(13), '\xff': self.assertRaises(TypeError, self.theclass, base[:2] + month_byte + base[3:]) for ord_byte in range(1, 13): # This shouldn't blow up because of the month byte alone. If # the implementation changes to do more-careful checking, it may # blow up because other fields are insane. self.theclass(base[:2] + chr(ord_byte) + base[3:]) ############################################################################# # datetime tests class SubclassDatetime(datetime): sub_var = 1 class TestDateTime(TestDate): theclass = datetime def test_basic_attributes(self): dt = self.theclass(2002, 3, 1, 12, 0) self.assertEqual(dt.year, 2002) self.assertEqual(dt.month, 3) self.assertEqual(dt.day, 1) self.assertEqual(dt.hour, 12) self.assertEqual(dt.minute, 0) self.assertEqual(dt.second, 0) self.assertEqual(dt.microsecond, 0) def test_basic_attributes_nonzero(self): # Make sure all attributes are non-zero so bugs in # bit-shifting access show up. dt = self.theclass(2002, 3, 1, 12, 59, 59, 8000) self.assertEqual(dt.year, 2002) self.assertEqual(dt.month, 3) self.assertEqual(dt.day, 1) self.assertEqual(dt.hour, 12) self.assertEqual(dt.minute, 59) self.assertEqual(dt.second, 59) self.assertEqual(dt.microsecond, 8000) def test_roundtrip(self): for dt in (self.theclass(1, 2, 3, 4, 5, 6, 7), self.theclass.now()): # Verify dt -> string -> datetime identity. s = repr(dt) self.failUnless(s.startswith('datetime.')) s = s[9:] dt2 = eval(s) self.assertEqual(dt, dt2) # Verify identity via reconstructing from pieces. dt2 = self.theclass(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond) self.assertEqual(dt, dt2) def test_isoformat(self): t = self.theclass(2, 3, 2, 4, 5, 1, 123) self.assertEqual(t.isoformat(), "0002-03-02T04:05:01.000123") self.assertEqual(t.isoformat('T'), "0002-03-02T04:05:01.000123") self.assertEqual(t.isoformat(' '), "0002-03-02 04:05:01.000123") # str is ISO format with the separator forced to a blank. self.assertEqual(str(t), "0002-03-02 04:05:01.000123") t = self.theclass(2, 3, 2) self.assertEqual(t.isoformat(), "0002-03-02T00:00:00") self.assertEqual(t.isoformat('T'), "0002-03-02T00:00:00") self.assertEqual(t.isoformat(' '), "0002-03-02 00:00:00") # str is ISO format with the separator forced to a blank. self.assertEqual(str(t), "0002-03-02 00:00:00") def test_format(self): dt = self.theclass(2007, 9, 10, 4, 5, 1, 123) self.assertEqual(dt.__format__(''), str(dt)) # check that a derived class's __str__() gets called class A(self.theclass): def __str__(self): return 'A' a = A(2007, 9, 10, 4, 5, 1, 123) self.assertEqual(a.__format__(''), 'A') # check that a derived class's strftime gets called class B(self.theclass): def strftime(self, format_spec): return 'B' b = B(2007, 9, 10, 4, 5, 1, 123) self.assertEqual(b.__format__(''), str(dt)) for fmt in ["m:%m d:%d y:%y", "m:%m d:%d y:%y H:%H M:%M S:%S", "%z %Z", ]: self.assertEqual(dt.__format__(fmt), dt.strftime(fmt)) self.assertEqual(a.__format__(fmt), dt.strftime(fmt)) self.assertEqual(b.__format__(fmt), 'B') def test_more_ctime(self): # Test fields that TestDate doesn't touch. import time t = self.theclass(2002, 3, 2, 18, 3, 5, 123) self.assertEqual(t.ctime(), "Sat Mar 2 18:03:05 2002") # Oops! The next line fails on Win2K under MSVC 6, so it's commented # out. The difference is that t.ctime() produces " 2" for the day, # but platform ctime() produces "02" for the day. According to # C99, t.ctime() is correct here. # self.assertEqual(t.ctime(), time.ctime(time.mktime(t.timetuple()))) # So test a case where that difference doesn't matter. t = self.theclass(2002, 3, 22, 18, 3, 5, 123) self.assertEqual(t.ctime(), time.ctime(time.mktime(t.timetuple()))) def test_tz_independent_comparing(self): dt1 = self.theclass(2002, 3, 1, 9, 0, 0) dt2 = self.theclass(2002, 3, 1, 10, 0, 0) dt3 = self.theclass(2002, 3, 1, 9, 0, 0) self.assertEqual(dt1, dt3) self.assert_(dt2 > dt3) # Make sure comparison doesn't forget microseconds, and isn't done # via comparing a float timestamp (an IEEE double doesn't have enough # precision to span microsecond resolution across years 1 thru 9999, # so comparing via timestamp necessarily calls some distinct values # equal). dt1 = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999998) us = timedelta(microseconds=1) dt2 = dt1 + us self.assertEqual(dt2 - dt1, us) self.assert_(dt1 < dt2) def test_strftime_with_bad_tzname_replace(self): # verify ok if tzinfo.tzname().replace() returns a non-string class MyTzInfo(FixedOffset): def tzname(self, dt): class MyStr(str): def replace(self, *args): return None return MyStr('name') t = self.theclass(2005, 3, 2, 0, 0, 0, 0, MyTzInfo(3, 'name')) self.assertRaises(TypeError, t.strftime, '%Z') def test_bad_constructor_arguments(self): # bad years self.theclass(MINYEAR, 1, 1) # no exception self.theclass(MAXYEAR, 1, 1) # no exception self.assertRaises(ValueError, self.theclass, MINYEAR-1, 1, 1) self.assertRaises(ValueError, self.theclass, MAXYEAR+1, 1, 1) # bad months self.theclass(2000, 1, 1) # no exception self.theclass(2000, 12, 1) # no exception self.assertRaises(ValueError, self.theclass, 2000, 0, 1) self.assertRaises(ValueError, self.theclass, 2000, 13, 1) # bad days self.theclass(2000, 2, 29) # no exception self.theclass(2004, 2, 29) # no exception self.theclass(2400, 2, 29) # no exception self.assertRaises(ValueError, self.theclass, 2000, 2, 30) self.assertRaises(ValueError, self.theclass, 2001, 2, 29) self.assertRaises(ValueError, self.theclass, 2100, 2, 29) self.assertRaises(ValueError, self.theclass, 1900, 2, 29) self.assertRaises(ValueError, self.theclass, 2000, 1, 0) self.assertRaises(ValueError, self.theclass, 2000, 1, 32) # bad hours self.theclass(2000, 1, 31, 0) # no exception self.theclass(2000, 1, 31, 23) # no exception self.assertRaises(ValueError, self.theclass, 2000, 1, 31, -1) self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 24) # bad minutes self.theclass(2000, 1, 31, 23, 0) # no exception self.theclass(2000, 1, 31, 23, 59) # no exception self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, -1) self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 60) # bad seconds self.theclass(2000, 1, 31, 23, 59, 0) # no exception self.theclass(2000, 1, 31, 23, 59, 59) # no exception self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, -1) self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, 60) # bad microseconds self.theclass(2000, 1, 31, 23, 59, 59, 0) # no exception self.theclass(2000, 1, 31, 23, 59, 59, 999999) # no exception self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, 59, -1) self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, 59, 1000000) def test_hash_equality(self): d = self.theclass(2000, 12, 31, 23, 30, 17) e = self.theclass(2000, 12, 31, 23, 30, 17) self.assertEqual(d, e) self.assertEqual(hash(d), hash(e)) dic = {d: 1} dic[e] = 2 self.assertEqual(len(dic), 1) self.assertEqual(dic[d], 2) self.assertEqual(dic[e], 2) d = self.theclass(2001, 1, 1, 0, 5, 17) e = self.theclass(2001, 1, 1, 0, 5, 17) self.assertEqual(d, e) self.assertEqual(hash(d), hash(e)) dic = {d: 1} dic[e] = 2 self.assertEqual(len(dic), 1) self.assertEqual(dic[d], 2) self.assertEqual(dic[e], 2) def test_computations(self): a = self.theclass(2002, 1, 31) b = self.theclass(1956, 1, 31) diff = a-b self.assertEqual(diff.days, 46*365 + len(range(1956, 2002, 4))) self.assertEqual(diff.seconds, 0) self.assertEqual(diff.microseconds, 0) a = self.theclass(2002, 3, 2, 17, 6) millisec = timedelta(0, 0, 1000) hour = timedelta(0, 3600) day = timedelta(1) week = timedelta(7) self.assertEqual(a + hour, self.theclass(2002, 3, 2, 18, 6)) self.assertEqual(hour + a, self.theclass(2002, 3, 2, 18, 6)) self.assertEqual(a + 10*hour, self.theclass(2002, 3, 3, 3, 6)) self.assertEqual(a - hour, self.theclass(2002, 3, 2, 16, 6)) self.assertEqual(-hour + a, self.theclass(2002, 3, 2, 16, 6)) self.assertEqual(a - hour, a + -hour) self.assertEqual(a - 20*hour, self.theclass(2002, 3, 1, 21, 6)) self.assertEqual(a + day, self.theclass(2002, 3, 3, 17, 6)) self.assertEqual(a - day, self.theclass(2002, 3, 1, 17, 6)) self.assertEqual(a + week, self.theclass(2002, 3, 9, 17, 6)) self.assertEqual(a - week, self.theclass(2002, 2, 23, 17, 6)) self.assertEqual(a + 52*week, self.theclass(2003, 3, 1, 17, 6)) self.assertEqual(a - 52*week, self.theclass(2001, 3, 3, 17, 6)) self.assertEqual((a + week) - a, week) self.assertEqual((a + day) - a, day) self.assertEqual((a + hour) - a, hour) self.assertEqual((a + millisec) - a, millisec) self.assertEqual((a - week) - a, -week) self.assertEqual((a - day) - a, -day) self.assertEqual((a - hour) - a, -hour) self.assertEqual((a - millisec) - a, -millisec) self.assertEqual(a - (a + week), -week) self.assertEqual(a - (a + day), -day) self.assertEqual(a - (a + hour), -hour) self.assertEqual(a - (a + millisec), -millisec) self.assertEqual(a - (a - week), week) self.assertEqual(a - (a - day), day) self.assertEqual(a - (a - hour), hour) self.assertEqual(a - (a - millisec), millisec) self.assertEqual(a + (week + day + hour + millisec), self.theclass(2002, 3, 10, 18, 6, 0, 1000)) self.assertEqual(a + (week + day + hour + millisec), (((a + week) + day) + hour) + millisec) self.assertEqual(a - (week + day + hour + millisec), self.theclass(2002, 2, 22, 16, 5, 59, 999000)) self.assertEqual(a - (week + day + hour + millisec), (((a - week) - day) - hour) - millisec) # Add/sub ints, longs, floats should be illegal for i in 1, 1L, 1.0: self.assertRaises(TypeError, lambda: a+i) self.assertRaises(TypeError, lambda: a-i) self.assertRaises(TypeError, lambda: i+a) self.assertRaises(TypeError, lambda: i-a) # delta - datetime is senseless. self.assertRaises(TypeError, lambda: day - a) # mixing datetime and (delta or datetime) via * or // is senseless self.assertRaises(TypeError, lambda: day * a) self.assertRaises(TypeError, lambda: a * day) self.assertRaises(TypeError, lambda: day // a) self.assertRaises(TypeError, lambda: a // day) self.assertRaises(TypeError, lambda: a * a) self.assertRaises(TypeError, lambda: a // a) # datetime + datetime is senseless self.assertRaises(TypeError, lambda: a + a) def test_pickling(self): args = 6, 7, 23, 20, 59, 1, 64**2 orig = self.theclass(*args) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) def test_more_pickling(self): a = self.theclass(2003, 2, 7, 16, 48, 37, 444116) s = pickle.dumps(a) b = pickle.loads(s) self.assertEqual(b.year, 2003) self.assertEqual(b.month, 2) self.assertEqual(b.day, 7) def test_pickling_subclass_datetime(self): args = 6, 7, 23, 20, 59, 1, 64**2 orig = SubclassDatetime(*args) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) def test_more_compare(self): # The test_compare() inherited from TestDate covers the error cases. # We just want to test lexicographic ordering on the members datetime # has that date lacks. args = [2000, 11, 29, 20, 58, 16, 999998] t1 = self.theclass(*args) t2 = self.theclass(*args) self.failUnless(t1 == t2) self.failUnless(t1 <= t2) self.failUnless(t1 >= t2) self.failUnless(not t1 != t2) self.failUnless(not t1 < t2) self.failUnless(not t1 > t2) self.assertEqual(cmp(t1, t2), 0) self.assertEqual(cmp(t2, t1), 0) for i in range(len(args)): newargs = args[:] newargs[i] = args[i] + 1 t2 = self.theclass(*newargs) # this is larger than t1 self.failUnless(t1 < t2) self.failUnless(t2 > t1) self.failUnless(t1 <= t2) self.failUnless(t2 >= t1) self.failUnless(t1 != t2) self.failUnless(t2 != t1) self.failUnless(not t1 == t2) self.failUnless(not t2 == t1) self.failUnless(not t1 > t2) self.failUnless(not t2 < t1) self.failUnless(not t1 >= t2) self.failUnless(not t2 <= t1) self.assertEqual(cmp(t1, t2), -1) self.assertEqual(cmp(t2, t1), 1) # A helper for timestamp constructor tests. def verify_field_equality(self, expected, got): self.assertEqual(expected.tm_year, got.year) self.assertEqual(expected.tm_mon, got.month) self.assertEqual(expected.tm_mday, got.day) self.assertEqual(expected.tm_hour, got.hour) self.assertEqual(expected.tm_min, got.minute) self.assertEqual(expected.tm_sec, got.second) def test_fromtimestamp(self): import time ts = time.time() expected = time.localtime(ts) got = self.theclass.fromtimestamp(ts) self.verify_field_equality(expected, got) def test_utcfromtimestamp(self): import time ts = time.time() expected = time.gmtime(ts) got = self.theclass.utcfromtimestamp(ts) self.verify_field_equality(expected, got) def test_microsecond_rounding(self): # Test whether fromtimestamp "rounds up" floats that are less # than one microsecond smaller than an integer. self.assertEquals(self.theclass.fromtimestamp(0.9999999), self.theclass.fromtimestamp(1)) def test_insane_fromtimestamp(self): # It's possible that some platform maps time_t to double, # and that this test will fail there. This test should # exempt such platforms (provided they return reasonable # results!). for insane in -1e200, 1e200: self.assertRaises(ValueError, self.theclass.fromtimestamp, insane) def test_insane_utcfromtimestamp(self): # It's possible that some platform maps time_t to double, # and that this test will fail there. This test should # exempt such platforms (provided they return reasonable # results!). for insane in -1e200, 1e200: self.assertRaises(ValueError, self.theclass.utcfromtimestamp, insane) def test_negative_float_fromtimestamp(self): # Windows doesn't accept negative timestamps if os.name == "nt": return # The result is tz-dependent; at least test that this doesn't # fail (like it did before bug 1646728 was fixed). self.theclass.fromtimestamp(-1.05) def test_negative_float_utcfromtimestamp(self): # Windows doesn't accept negative timestamps if os.name == "nt": return d = self.theclass.utcfromtimestamp(-1.05) self.assertEquals(d, self.theclass(1969, 12, 31, 23, 59, 58, 950000)) def test_utcnow(self): import time # Call it a success if utcnow() and utcfromtimestamp() are within # a second of each other. tolerance = timedelta(seconds=1) for dummy in range(3): from_now = self.theclass.utcnow() from_timestamp = self.theclass.utcfromtimestamp(time.time()) if abs(from_timestamp - from_now) <= tolerance: break # Else try again a few times. self.failUnless(abs(from_timestamp - from_now) <= tolerance) def test_strptime(self): import _strptime string = '2004-12-01 13:02:47.197' format = '%Y-%m-%d %H:%M:%S.%f' result, frac = _strptime._strptime(string, format) expected = self.theclass(*(result[0:6]+(frac,))) got = self.theclass.strptime(string, format) self.assertEqual(expected, got) def test_more_timetuple(self): # This tests fields beyond those tested by the TestDate.test_timetuple. t = self.theclass(2004, 12, 31, 6, 22, 33) self.assertEqual(t.timetuple(), (2004, 12, 31, 6, 22, 33, 4, 366, -1)) self.assertEqual(t.timetuple(), (t.year, t.month, t.day, t.hour, t.minute, t.second, t.weekday(), t.toordinal() - date(t.year, 1, 1).toordinal() + 1, -1)) tt = t.timetuple() self.assertEqual(tt.tm_year, t.year) self.assertEqual(tt.tm_mon, t.month) self.assertEqual(tt.tm_mday, t.day) self.assertEqual(tt.tm_hour, t.hour) self.assertEqual(tt.tm_min, t.minute) self.assertEqual(tt.tm_sec, t.second) self.assertEqual(tt.tm_wday, t.weekday()) self.assertEqual(tt.tm_yday, t.toordinal() - date(t.year, 1, 1).toordinal() + 1) self.assertEqual(tt.tm_isdst, -1) def test_more_strftime(self): # This tests fields beyond those tested by the TestDate.test_strftime. t = self.theclass(2004, 12, 31, 6, 22, 33, 47) self.assertEqual(t.strftime("%m %d %y %f %S %M %H %j"), "12 31 04 000047 33 22 06 366") def test_extract(self): dt = self.theclass(2002, 3, 4, 18, 45, 3, 1234) self.assertEqual(dt.date(), date(2002, 3, 4)) self.assertEqual(dt.time(), time(18, 45, 3, 1234)) def test_combine(self): d = date(2002, 3, 4) t = time(18, 45, 3, 1234) expected = self.theclass(2002, 3, 4, 18, 45, 3, 1234) combine = self.theclass.combine dt = combine(d, t) self.assertEqual(dt, expected) dt = combine(time=t, date=d) self.assertEqual(dt, expected) self.assertEqual(d, dt.date()) self.assertEqual(t, dt.time()) self.assertEqual(dt, combine(dt.date(), dt.time())) self.assertRaises(TypeError, combine) # need an arg self.assertRaises(TypeError, combine, d) # need two args self.assertRaises(TypeError, combine, t, d) # args reversed self.assertRaises(TypeError, combine, d, t, 1) # too many args self.assertRaises(TypeError, combine, "date", "time") # wrong types def test_replace(self): cls = self.theclass args = [1, 2, 3, 4, 5, 6, 7] base = cls(*args) self.assertEqual(base, base.replace()) i = 0 for name, newval in (("year", 2), ("month", 3), ("day", 4), ("hour", 5), ("minute", 6), ("second", 7), ("microsecond", 8)): newargs = args[:] newargs[i] = newval expected = cls(*newargs) got = base.replace(**{name: newval}) self.assertEqual(expected, got) i += 1 # Out of bounds. base = cls(2000, 2, 29) self.assertRaises(ValueError, base.replace, year=2001) def test_astimezone(self): # Pretty boring! The TZ test is more interesting here. astimezone() # simply can't be applied to a naive object. dt = self.theclass.now() f = FixedOffset(44, "") self.assertRaises(TypeError, dt.astimezone) # not enough args self.assertRaises(TypeError, dt.astimezone, f, f) # too many args self.assertRaises(TypeError, dt.astimezone, dt) # arg wrong type self.assertRaises(ValueError, dt.astimezone, f) # naive self.assertRaises(ValueError, dt.astimezone, tz=f) # naive class Bogus(tzinfo): def utcoffset(self, dt): return None def dst(self, dt): return timedelta(0) bog = Bogus() self.assertRaises(ValueError, dt.astimezone, bog) # naive class AlsoBogus(tzinfo): def utcoffset(self, dt): return timedelta(0) def dst(self, dt): return None alsobog = AlsoBogus() self.assertRaises(ValueError, dt.astimezone, alsobog) # also naive def test_subclass_datetime(self): class C(self.theclass): theAnswer = 42 def __new__(cls, *args, **kws): temp = kws.copy() extra = temp.pop('extra') result = self.theclass.__new__(cls, *args, **temp) result.extra = extra return result def newmeth(self, start): return start + self.year + self.month + self.second args = 2003, 4, 14, 12, 13, 41 dt1 = self.theclass(*args) dt2 = C(*args, **{'extra': 7}) self.assertEqual(dt2.__class__, C) self.assertEqual(dt2.theAnswer, 42) self.assertEqual(dt2.extra, 7) self.assertEqual(dt1.toordinal(), dt2.toordinal()) self.assertEqual(dt2.newmeth(-7), dt1.year + dt1.month + dt1.second - 7) class SubclassTime(time): sub_var = 1 class TestTime(HarmlessMixedComparison, unittest.TestCase): theclass = time def test_basic_attributes(self): t = self.theclass(12, 0) self.assertEqual(t.hour, 12) self.assertEqual(t.minute, 0) self.assertEqual(t.second, 0) self.assertEqual(t.microsecond, 0) def test_basic_attributes_nonzero(self): # Make sure all attributes are non-zero so bugs in # bit-shifting access show up. t = self.theclass(12, 59, 59, 8000) self.assertEqual(t.hour, 12) self.assertEqual(t.minute, 59) self.assertEqual(t.second, 59) self.assertEqual(t.microsecond, 8000) def test_roundtrip(self): t = self.theclass(1, 2, 3, 4) # Verify t -> string -> time identity. s = repr(t) self.failUnless(s.startswith('datetime.')) s = s[9:] t2 = eval(s) self.assertEqual(t, t2) # Verify identity via reconstructing from pieces. t2 = self.theclass(t.hour, t.minute, t.second, t.microsecond) self.assertEqual(t, t2) def test_comparing(self): args = [1, 2, 3, 4] t1 = self.theclass(*args) t2 = self.theclass(*args) self.failUnless(t1 == t2) self.failUnless(t1 <= t2) self.failUnless(t1 >= t2) self.failUnless(not t1 != t2) self.failUnless(not t1 < t2) self.failUnless(not t1 > t2) self.assertEqual(cmp(t1, t2), 0) self.assertEqual(cmp(t2, t1), 0) for i in range(len(args)): newargs = args[:] newargs[i] = args[i] + 1 t2 = self.theclass(*newargs) # this is larger than t1 self.failUnless(t1 < t2) self.failUnless(t2 > t1) self.failUnless(t1 <= t2) self.failUnless(t2 >= t1) self.failUnless(t1 != t2) self.failUnless(t2 != t1) self.failUnless(not t1 == t2) self.failUnless(not t2 == t1) self.failUnless(not t1 > t2) self.failUnless(not t2 < t1) self.failUnless(not t1 >= t2) self.failUnless(not t2 <= t1) self.assertEqual(cmp(t1, t2), -1) self.assertEqual(cmp(t2, t1), 1) for badarg in OTHERSTUFF: self.assertEqual(t1 == badarg, False) self.assertEqual(t1 != badarg, True) self.assertEqual(badarg == t1, False) self.assertEqual(badarg != t1, True) self.assertRaises(TypeError, lambda: t1 <= badarg) self.assertRaises(TypeError, lambda: t1 < badarg) self.assertRaises(TypeError, lambda: t1 > badarg) self.assertRaises(TypeError, lambda: t1 >= badarg) self.assertRaises(TypeError, lambda: badarg <= t1) self.assertRaises(TypeError, lambda: badarg < t1) self.assertRaises(TypeError, lambda: badarg > t1) self.assertRaises(TypeError, lambda: badarg >= t1) def test_bad_constructor_arguments(self): # bad hours self.theclass(0, 0) # no exception self.theclass(23, 0) # no exception self.assertRaises(ValueError, self.theclass, -1, 0) self.assertRaises(ValueError, self.theclass, 24, 0) # bad minutes self.theclass(23, 0) # no exception self.theclass(23, 59) # no exception self.assertRaises(ValueError, self.theclass, 23, -1) self.assertRaises(ValueError, self.theclass, 23, 60) # bad seconds self.theclass(23, 59, 0) # no exception self.theclass(23, 59, 59) # no exception self.assertRaises(ValueError, self.theclass, 23, 59, -1) self.assertRaises(ValueError, self.theclass, 23, 59, 60) # bad microseconds self.theclass(23, 59, 59, 0) # no exception self.theclass(23, 59, 59, 999999) # no exception self.assertRaises(ValueError, self.theclass, 23, 59, 59, -1) self.assertRaises(ValueError, self.theclass, 23, 59, 59, 1000000) def test_hash_equality(self): d = self.theclass(23, 30, 17) e = self.theclass(23, 30, 17) self.assertEqual(d, e) self.assertEqual(hash(d), hash(e)) dic = {d: 1} dic[e] = 2 self.assertEqual(len(dic), 1) self.assertEqual(dic[d], 2) self.assertEqual(dic[e], 2) d = self.theclass(0, 5, 17) e = self.theclass(0, 5, 17) self.assertEqual(d, e) self.assertEqual(hash(d), hash(e)) dic = {d: 1} dic[e] = 2 self.assertEqual(len(dic), 1) self.assertEqual(dic[d], 2) self.assertEqual(dic[e], 2) def test_isoformat(self): t = self.theclass(4, 5, 1, 123) self.assertEqual(t.isoformat(), "04:05:01.000123") self.assertEqual(t.isoformat(), str(t)) t = self.theclass() self.assertEqual(t.isoformat(), "00:00:00") self.assertEqual(t.isoformat(), str(t)) t = self.theclass(microsecond=1) self.assertEqual(t.isoformat(), "00:00:00.000001") self.assertEqual(t.isoformat(), str(t)) t = self.theclass(microsecond=10) self.assertEqual(t.isoformat(), "00:00:00.000010") self.assertEqual(t.isoformat(), str(t)) t = self.theclass(microsecond=100) self.assertEqual(t.isoformat(), "00:00:00.000100") self.assertEqual(t.isoformat(), str(t)) t = self.theclass(microsecond=1000) self.assertEqual(t.isoformat(), "00:00:00.001000") self.assertEqual(t.isoformat(), str(t)) t = self.theclass(microsecond=10000) self.assertEqual(t.isoformat(), "00:00:00.010000") self.assertEqual(t.isoformat(), str(t)) t = self.theclass(microsecond=100000) self.assertEqual(t.isoformat(), "00:00:00.100000") self.assertEqual(t.isoformat(), str(t)) def test_1653736(self): # verify it doesn't accept extra keyword arguments t = self.theclass(second=1) self.assertRaises(TypeError, t.isoformat, foo=3) def test_strftime(self): t = self.theclass(1, 2, 3, 4) self.assertEqual(t.strftime('%H %M %S %f'), "01 02 03 000004") # A naive object replaces %z and %Z with empty strings. self.assertEqual(t.strftime("'%z' '%Z'"), "'' ''") def test_format(self): t = self.theclass(1, 2, 3, 4) self.assertEqual(t.__format__(''), str(t)) # check that a derived class's __str__() gets called class A(self.theclass): def __str__(self): return 'A' a = A(1, 2, 3, 4) self.assertEqual(a.__format__(''), 'A') # check that a derived class's strftime gets called class B(self.theclass): def strftime(self, format_spec): return 'B' b = B(1, 2, 3, 4) self.assertEqual(b.__format__(''), str(t)) for fmt in ['%H %M %S', ]: self.assertEqual(t.__format__(fmt), t.strftime(fmt)) self.assertEqual(a.__format__(fmt), t.strftime(fmt)) self.assertEqual(b.__format__(fmt), 'B') def test_str(self): self.assertEqual(str(self.theclass(1, 2, 3, 4)), "01:02:03.000004") self.assertEqual(str(self.theclass(10, 2, 3, 4000)), "10:02:03.004000") self.assertEqual(str(self.theclass(0, 2, 3, 400000)), "00:02:03.400000") self.assertEqual(str(self.theclass(12, 2, 3, 0)), "12:02:03") self.assertEqual(str(self.theclass(23, 15, 0, 0)), "23:15:00") def test_repr(self): name = 'datetime.' + self.theclass.__name__ self.assertEqual(repr(self.theclass(1, 2, 3, 4)), "%s(1, 2, 3, 4)" % name) self.assertEqual(repr(self.theclass(10, 2, 3, 4000)), "%s(10, 2, 3, 4000)" % name) self.assertEqual(repr(self.theclass(0, 2, 3, 400000)), "%s(0, 2, 3, 400000)" % name) self.assertEqual(repr(self.theclass(12, 2, 3, 0)), "%s(12, 2, 3)" % name) self.assertEqual(repr(self.theclass(23, 15, 0, 0)), "%s(23, 15)" % name) def test_resolution_info(self): self.assert_(isinstance(self.theclass.min, self.theclass)) self.assert_(isinstance(self.theclass.max, self.theclass)) self.assert_(isinstance(self.theclass.resolution, timedelta)) self.assert_(self.theclass.max > self.theclass.min) def test_pickling(self): args = 20, 59, 16, 64**2 orig = self.theclass(*args) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) def test_pickling_subclass_time(self): args = 20, 59, 16, 64**2 orig = SubclassTime(*args) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) def test_bool(self): cls = self.theclass self.failUnless(cls(1)) self.failUnless(cls(0, 1)) self.failUnless(cls(0, 0, 1)) self.failUnless(cls(0, 0, 0, 1)) self.failUnless(not cls(0)) self.failUnless(not cls()) def test_replace(self): cls = self.theclass args = [1, 2, 3, 4] base = cls(*args) self.assertEqual(base, base.replace()) i = 0 for name, newval in (("hour", 5), ("minute", 6), ("second", 7), ("microsecond", 8)): newargs = args[:] newargs[i] = newval expected = cls(*newargs) got = base.replace(**{name: newval}) self.assertEqual(expected, got) i += 1 # Out of bounds. base = cls(1) self.assertRaises(ValueError, base.replace, hour=24) self.assertRaises(ValueError, base.replace, minute=-1) self.assertRaises(ValueError, base.replace, second=100) self.assertRaises(ValueError, base.replace, microsecond=1000000) def test_subclass_time(self): class C(self.theclass): theAnswer = 42 def __new__(cls, *args, **kws): temp = kws.copy() extra = temp.pop('extra') result = self.theclass.__new__(cls, *args, **temp) result.extra = extra return result def newmeth(self, start): return start + self.hour + self.second args = 4, 5, 6 dt1 = self.theclass(*args) dt2 = C(*args, **{'extra': 7}) self.assertEqual(dt2.__class__, C) self.assertEqual(dt2.theAnswer, 42) self.assertEqual(dt2.extra, 7) self.assertEqual(dt1.isoformat(), dt2.isoformat()) self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.second - 7) def test_backdoor_resistance(self): # see TestDate.test_backdoor_resistance(). base = '2:59.0' for hour_byte in ' ', '9', chr(24), '\xff': self.assertRaises(TypeError, self.theclass, hour_byte + base[1:]) # A mixin for classes with a tzinfo= argument. Subclasses must define # theclass as a class atribute, and theclass(1, 1, 1, tzinfo=whatever) # must be legit (which is true for time and datetime). class TZInfoBase: def test_argument_passing(self): cls = self.theclass # A datetime passes itself on, a time passes None. class introspective(tzinfo): def tzname(self, dt): return dt and "real" or "none" def utcoffset(self, dt): return timedelta(minutes = dt and 42 or -42) dst = utcoffset obj = cls(1, 2, 3, tzinfo=introspective()) expected = cls is time and "none" or "real" self.assertEqual(obj.tzname(), expected) expected = timedelta(minutes=(cls is time and -42 or 42)) self.assertEqual(obj.utcoffset(), expected) self.assertEqual(obj.dst(), expected) def test_bad_tzinfo_classes(self): cls = self.theclass self.assertRaises(TypeError, cls, 1, 1, 1, tzinfo=12) class NiceTry(object): def __init__(self): pass def utcoffset(self, dt): pass self.assertRaises(TypeError, cls, 1, 1, 1, tzinfo=NiceTry) class BetterTry(tzinfo): def __init__(self): pass def utcoffset(self, dt): pass b = BetterTry() t = cls(1, 1, 1, tzinfo=b) self.failUnless(t.tzinfo is b) def test_utc_offset_out_of_bounds(self): class Edgy(tzinfo): def __init__(self, offset): self.offset = timedelta(minutes=offset) def utcoffset(self, dt): return self.offset cls = self.theclass for offset, legit in ((-1440, False), (-1439, True), (1439, True), (1440, False)): if cls is time: t = cls(1, 2, 3, tzinfo=Edgy(offset)) elif cls is datetime: t = cls(6, 6, 6, 1, 2, 3, tzinfo=Edgy(offset)) else: assert 0, "impossible" if legit: aofs = abs(offset) h, m = divmod(aofs, 60) tag = "%c%02d:%02d" % (offset < 0 and '-' or '+', h, m) if isinstance(t, datetime): t = t.timetz() self.assertEqual(str(t), "01:02:03" + tag) else: self.assertRaises(ValueError, str, t) def test_tzinfo_classes(self): cls = self.theclass class C1(tzinfo): def utcoffset(self, dt): return None def dst(self, dt): return None def tzname(self, dt): return None for t in (cls(1, 1, 1), cls(1, 1, 1, tzinfo=None), cls(1, 1, 1, tzinfo=C1())): self.failUnless(t.utcoffset() is None) self.failUnless(t.dst() is None) self.failUnless(t.tzname() is None) class C3(tzinfo): def utcoffset(self, dt): return timedelta(minutes=-1439) def dst(self, dt): return timedelta(minutes=1439) def tzname(self, dt): return "aname" t = cls(1, 1, 1, tzinfo=C3()) self.assertEqual(t.utcoffset(), timedelta(minutes=-1439)) self.assertEqual(t.dst(), timedelta(minutes=1439)) self.assertEqual(t.tzname(), "aname") # Wrong types. class C4(tzinfo): def utcoffset(self, dt): return "aname" def dst(self, dt): return 7 def tzname(self, dt): return 0 t = cls(1, 1, 1, tzinfo=C4()) self.assertRaises(TypeError, t.utcoffset) self.assertRaises(TypeError, t.dst) self.assertRaises(TypeError, t.tzname) # Offset out of range. class C6(tzinfo): def utcoffset(self, dt): return timedelta(hours=-24) def dst(self, dt): return timedelta(hours=24) t = cls(1, 1, 1, tzinfo=C6()) self.assertRaises(ValueError, t.utcoffset) self.assertRaises(ValueError, t.dst) # Not a whole number of minutes. class C7(tzinfo): def utcoffset(self, dt): return timedelta(seconds=61) def dst(self, dt): return timedelta(microseconds=-81) t = cls(1, 1, 1, tzinfo=C7()) self.assertRaises(ValueError, t.utcoffset) self.assertRaises(ValueError, t.dst) def test_aware_compare(self): cls = self.theclass # Ensure that utcoffset() gets ignored if the comparands have # the same tzinfo member. class OperandDependentOffset(tzinfo): def utcoffset(self, t): if t.minute < 10: # d0 and d1 equal after adjustment return timedelta(minutes=t.minute) else: # d2 off in the weeds return timedelta(minutes=59) base = cls(8, 9, 10, tzinfo=OperandDependentOffset()) d0 = base.replace(minute=3) d1 = base.replace(minute=9) d2 = base.replace(minute=11) for x in d0, d1, d2: for y in d0, d1, d2: got = cmp(x, y) expected = cmp(x.minute, y.minute) self.assertEqual(got, expected) # However, if they're different members, uctoffset is not ignored. # Note that a time can't actually have an operand-depedent offset, # though (and time.utcoffset() passes None to tzinfo.utcoffset()), # so skip this test for time. if cls is not time: d0 = base.replace(minute=3, tzinfo=OperandDependentOffset()) d1 = base.replace(minute=9, tzinfo=OperandDependentOffset()) d2 = base.replace(minute=11, tzinfo=OperandDependentOffset()) for x in d0, d1, d2: for y in d0, d1, d2: got = cmp(x, y) if (x is d0 or x is d1) and (y is d0 or y is d1): expected = 0 elif x is y is d2: expected = 0 elif x is d2: expected = -1 else: assert y is d2 expected = 1 self.assertEqual(got, expected) # Testing time objects with a non-None tzinfo. class TestTimeTZ(TestTime, TZInfoBase, unittest.TestCase): theclass = time def test_empty(self): t = self.theclass() self.assertEqual(t.hour, 0) self.assertEqual(t.minute, 0) self.assertEqual(t.second, 0) self.assertEqual(t.microsecond, 0) self.failUnless(t.tzinfo is None) def test_zones(self): est = FixedOffset(-300, "EST", 1) utc = FixedOffset(0, "UTC", -2) met = FixedOffset(60, "MET", 3) t1 = time( 7, 47, tzinfo=est) t2 = time(12, 47, tzinfo=utc) t3 = time(13, 47, tzinfo=met) t4 = time(microsecond=40) t5 = time(microsecond=40, tzinfo=utc) self.assertEqual(t1.tzinfo, est) self.assertEqual(t2.tzinfo, utc) self.assertEqual(t3.tzinfo, met) self.failUnless(t4.tzinfo is None) self.assertEqual(t5.tzinfo, utc) self.assertEqual(t1.utcoffset(), timedelta(minutes=-300)) self.assertEqual(t2.utcoffset(), timedelta(minutes=0)) self.assertEqual(t3.utcoffset(), timedelta(minutes=60)) self.failUnless(t4.utcoffset() is None) self.assertRaises(TypeError, t1.utcoffset, "no args") self.assertEqual(t1.tzname(), "EST") self.assertEqual(t2.tzname(), "UTC") self.assertEqual(t3.tzname(), "MET") self.failUnless(t4.tzname() is None) self.assertRaises(TypeError, t1.tzname, "no args") self.assertEqual(t1.dst(), timedelta(minutes=1)) self.assertEqual(t2.dst(), timedelta(minutes=-2)) self.assertEqual(t3.dst(), timedelta(minutes=3)) self.failUnless(t4.dst() is None) self.assertRaises(TypeError, t1.dst, "no args") self.assertEqual(hash(t1), hash(t2)) self.assertEqual(hash(t1), hash(t3)) self.assertEqual(hash(t2), hash(t3)) self.assertEqual(t1, t2) self.assertEqual(t1, t3) self.assertEqual(t2, t3) self.assertRaises(TypeError, lambda: t4 == t5) # mixed tz-aware & naive self.assertRaises(TypeError, lambda: t4 < t5) # mixed tz-aware & naive self.assertRaises(TypeError, lambda: t5 < t4) # mixed tz-aware & naive self.assertEqual(str(t1), "07:47:00-05:00") self.assertEqual(str(t2), "12:47:00+00:00") self.assertEqual(str(t3), "13:47:00+01:00") self.assertEqual(str(t4), "00:00:00.000040") self.assertEqual(str(t5), "00:00:00.000040+00:00") self.assertEqual(t1.isoformat(), "07:47:00-05:00") self.assertEqual(t2.isoformat(), "12:47:00+00:00") self.assertEqual(t3.isoformat(), "13:47:00+01:00") self.assertEqual(t4.isoformat(), "00:00:00.000040") self.assertEqual(t5.isoformat(), "00:00:00.000040+00:00") d = 'datetime.time' self.assertEqual(repr(t1), d + "(7, 47, tzinfo=est)") self.assertEqual(repr(t2), d + "(12, 47, tzinfo=utc)") self.assertEqual(repr(t3), d + "(13, 47, tzinfo=met)") self.assertEqual(repr(t4), d + "(0, 0, 0, 40)") self.assertEqual(repr(t5), d + "(0, 0, 0, 40, tzinfo=utc)") self.assertEqual(t1.strftime("%H:%M:%S %%Z=%Z %%z=%z"), "07:47:00 %Z=EST %z=-0500") self.assertEqual(t2.strftime("%H:%M:%S %Z %z"), "12:47:00 UTC +0000") self.assertEqual(t3.strftime("%H:%M:%S %Z %z"), "13:47:00 MET +0100") yuck = FixedOffset(-1439, "%z %Z %%z%%Z") t1 = time(23, 59, tzinfo=yuck) self.assertEqual(t1.strftime("%H:%M %%Z='%Z' %%z='%z'"), "23:59 %Z='%z %Z %%z%%Z' %z='-2359'") # Check that an invalid tzname result raises an exception. class Badtzname(tzinfo): def tzname(self, dt): return 42 t = time(2, 3, 4, tzinfo=Badtzname()) self.assertEqual(t.strftime("%H:%M:%S"), "02:03:04") self.assertRaises(TypeError, t.strftime, "%Z") def test_hash_edge_cases(self): # Offsets that overflow a basic time. t1 = self.theclass(0, 1, 2, 3, tzinfo=FixedOffset(1439, "")) t2 = self.theclass(0, 0, 2, 3, tzinfo=FixedOffset(1438, "")) self.assertEqual(hash(t1), hash(t2)) t1 = self.theclass(23, 58, 6, 100, tzinfo=FixedOffset(-1000, "")) t2 = self.theclass(23, 48, 6, 100, tzinfo=FixedOffset(-1010, "")) self.assertEqual(hash(t1), hash(t2)) def test_pickling(self): # Try one without a tzinfo. args = 20, 59, 16, 64**2 orig = self.theclass(*args) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) # Try one with a tzinfo. tinfo = PicklableFixedOffset(-300, 'cookie') orig = self.theclass(5, 6, 7, tzinfo=tinfo) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) self.failUnless(isinstance(derived.tzinfo, PicklableFixedOffset)) self.assertEqual(derived.utcoffset(), timedelta(minutes=-300)) self.assertEqual(derived.tzname(), 'cookie') def test_more_bool(self): # Test cases with non-None tzinfo. cls = self.theclass t = cls(0, tzinfo=FixedOffset(-300, "")) self.failUnless(t) t = cls(5, tzinfo=FixedOffset(-300, "")) self.failUnless(t) t = cls(5, tzinfo=FixedOffset(300, "")) self.failUnless(not t) t = cls(23, 59, tzinfo=FixedOffset(23*60 + 59, "")) self.failUnless(not t) # Mostly ensuring this doesn't overflow internally. t = cls(0, tzinfo=FixedOffset(23*60 + 59, "")) self.failUnless(t) # But this should yield a value error -- the utcoffset is bogus. t = cls(0, tzinfo=FixedOffset(24*60, "")) self.assertRaises(ValueError, lambda: bool(t)) # Likewise. t = cls(0, tzinfo=FixedOffset(-24*60, "")) self.assertRaises(ValueError, lambda: bool(t)) def test_replace(self): cls = self.theclass z100 = FixedOffset(100, "+100") zm200 = FixedOffset(timedelta(minutes=-200), "-200") args = [1, 2, 3, 4, z100] base = cls(*args) self.assertEqual(base, base.replace()) i = 0 for name, newval in (("hour", 5), ("minute", 6), ("second", 7), ("microsecond", 8), ("tzinfo", zm200)): newargs = args[:] newargs[i] = newval expected = cls(*newargs) got = base.replace(**{name: newval}) self.assertEqual(expected, got) i += 1 # Ensure we can get rid of a tzinfo. self.assertEqual(base.tzname(), "+100") base2 = base.replace(tzinfo=None) self.failUnless(base2.tzinfo is None) self.failUnless(base2.tzname() is None) # Ensure we can add one. base3 = base2.replace(tzinfo=z100) self.assertEqual(base, base3) self.failUnless(base.tzinfo is base3.tzinfo) # Out of bounds. base = cls(1) self.assertRaises(ValueError, base.replace, hour=24) self.assertRaises(ValueError, base.replace, minute=-1) self.assertRaises(ValueError, base.replace, second=100) self.assertRaises(ValueError, base.replace, microsecond=1000000) def test_mixed_compare(self): t1 = time(1, 2, 3) t2 = time(1, 2, 3) self.assertEqual(t1, t2) t2 = t2.replace(tzinfo=None) self.assertEqual(t1, t2) t2 = t2.replace(tzinfo=FixedOffset(None, "")) self.assertEqual(t1, t2) t2 = t2.replace(tzinfo=FixedOffset(0, "")) self.assertRaises(TypeError, lambda: t1 == t2) # In time w/ identical tzinfo objects, utcoffset is ignored. class Varies(tzinfo): def __init__(self): self.offset = timedelta(minutes=22) def utcoffset(self, t): self.offset += timedelta(minutes=1) return self.offset v = Varies() t1 = t2.replace(tzinfo=v) t2 = t2.replace(tzinfo=v) self.assertEqual(t1.utcoffset(), timedelta(minutes=23)) self.assertEqual(t2.utcoffset(), timedelta(minutes=24)) self.assertEqual(t1, t2) # But if they're not identical, it isn't ignored. t2 = t2.replace(tzinfo=Varies()) self.failUnless(t1 < t2) # t1's offset counter still going up def test_subclass_timetz(self): class C(self.theclass): theAnswer = 42 def __new__(cls, *args, **kws): temp = kws.copy() extra = temp.pop('extra') result = self.theclass.__new__(cls, *args, **temp) result.extra = extra return result def newmeth(self, start): return start + self.hour + self.second args = 4, 5, 6, 500, FixedOffset(-300, "EST", 1) dt1 = self.theclass(*args) dt2 = C(*args, **{'extra': 7}) self.assertEqual(dt2.__class__, C) self.assertEqual(dt2.theAnswer, 42) self.assertEqual(dt2.extra, 7) self.assertEqual(dt1.utcoffset(), dt2.utcoffset()) self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.second - 7) # Testing datetime objects with a non-None tzinfo. class TestDateTimeTZ(TestDateTime, TZInfoBase, unittest.TestCase): theclass = datetime def test_trivial(self): dt = self.theclass(1, 2, 3, 4, 5, 6, 7) self.assertEqual(dt.year, 1) self.assertEqual(dt.month, 2) self.assertEqual(dt.day, 3) self.assertEqual(dt.hour, 4) self.assertEqual(dt.minute, 5) self.assertEqual(dt.second, 6) self.assertEqual(dt.microsecond, 7) self.assertEqual(dt.tzinfo, None) def test_even_more_compare(self): # The test_compare() and test_more_compare() inherited from TestDate # and TestDateTime covered non-tzinfo cases. # Smallest possible after UTC adjustment. t1 = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, "")) # Largest possible after UTC adjustment. t2 = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999, tzinfo=FixedOffset(-1439, "")) # Make sure those compare correctly, and w/o overflow. self.failUnless(t1 < t2) self.failUnless(t1 != t2) self.failUnless(t2 > t1) self.failUnless(t1 == t1) self.failUnless(t2 == t2) # Equal afer adjustment. t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, "")) t2 = self.theclass(2, 1, 1, 3, 13, tzinfo=FixedOffset(3*60+13+2, "")) self.assertEqual(t1, t2) # Change t1 not to subtract a minute, and t1 should be larger. t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(0, "")) self.failUnless(t1 > t2) # Change t1 to subtract 2 minutes, and t1 should be smaller. t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(2, "")) self.failUnless(t1 < t2) # Back to the original t1, but make seconds resolve it. t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""), second=1) self.failUnless(t1 > t2) # Likewise, but make microseconds resolve it. t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""), microsecond=1) self.failUnless(t1 > t2) # Make t2 naive and it should fail. t2 = self.theclass.min self.assertRaises(TypeError, lambda: t1 == t2) self.assertEqual(t2, t2) # It's also naive if it has tzinfo but tzinfo.utcoffset() is None. class Naive(tzinfo): def utcoffset(self, dt): return None t2 = self.theclass(5, 6, 7, tzinfo=Naive()) self.assertRaises(TypeError, lambda: t1 == t2) self.assertEqual(t2, t2) # OTOH, it's OK to compare two of these mixing the two ways of being # naive. t1 = self.theclass(5, 6, 7) self.assertEqual(t1, t2) # Try a bogus uctoffset. class Bogus(tzinfo): def utcoffset(self, dt): return timedelta(minutes=1440) # out of bounds t1 = self.theclass(2, 2, 2, tzinfo=Bogus()) t2 = self.theclass(2, 2, 2, tzinfo=FixedOffset(0, "")) self.assertRaises(ValueError, lambda: t1 == t2) def test_pickling(self): # Try one without a tzinfo. args = 6, 7, 23, 20, 59, 1, 64**2 orig = self.theclass(*args) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) # Try one with a tzinfo. tinfo = PicklableFixedOffset(-300, 'cookie') orig = self.theclass(*args, **{'tzinfo': tinfo}) derived = self.theclass(1, 1, 1, tzinfo=FixedOffset(0, "", 0)) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) self.failUnless(isinstance(derived.tzinfo, PicklableFixedOffset)) self.assertEqual(derived.utcoffset(), timedelta(minutes=-300)) self.assertEqual(derived.tzname(), 'cookie') def test_extreme_hashes(self): # If an attempt is made to hash these via subtracting the offset # then hashing a datetime object, OverflowError results. The # Python implementation used to blow up here. t = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, "")) hash(t) t = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999, tzinfo=FixedOffset(-1439, "")) hash(t) # OTOH, an OOB offset should blow up. t = self.theclass(5, 5, 5, tzinfo=FixedOffset(-1440, "")) self.assertRaises(ValueError, hash, t) def test_zones(self): est = FixedOffset(-300, "EST") utc = FixedOffset(0, "UTC") met = FixedOffset(60, "MET") t1 = datetime(2002, 3, 19, 7, 47, tzinfo=est) t2 = datetime(2002, 3, 19, 12, 47, tzinfo=utc) t3 = datetime(2002, 3, 19, 13, 47, tzinfo=met) self.assertEqual(t1.tzinfo, est) self.assertEqual(t2.tzinfo, utc) self.assertEqual(t3.tzinfo, met) self.assertEqual(t1.utcoffset(), timedelta(minutes=-300)) self.assertEqual(t2.utcoffset(), timedelta(minutes=0)) self.assertEqual(t3.utcoffset(), timedelta(minutes=60)) self.assertEqual(t1.tzname(), "EST") self.assertEqual(t2.tzname(), "UTC") self.assertEqual(t3.tzname(), "MET") self.assertEqual(hash(t1), hash(t2)) self.assertEqual(hash(t1), hash(t3)) self.assertEqual(hash(t2), hash(t3)) self.assertEqual(t1, t2) self.assertEqual(t1, t3) self.assertEqual(t2, t3) self.assertEqual(str(t1), "2002-03-19 07:47:00-05:00") self.assertEqual(str(t2), "2002-03-19 12:47:00+00:00") self.assertEqual(str(t3), "2002-03-19 13:47:00+01:00") d = 'datetime.datetime(2002, 3, 19, ' self.assertEqual(repr(t1), d + "7, 47, tzinfo=est)") self.assertEqual(repr(t2), d + "12, 47, tzinfo=utc)") self.assertEqual(repr(t3), d + "13, 47, tzinfo=met)") def test_combine(self): met = FixedOffset(60, "MET") d = date(2002, 3, 4) tz = time(18, 45, 3, 1234, tzinfo=met) dt = datetime.combine(d, tz) self.assertEqual(dt, datetime(2002, 3, 4, 18, 45, 3, 1234, tzinfo=met)) def test_extract(self): met = FixedOffset(60, "MET") dt = self.theclass(2002, 3, 4, 18, 45, 3, 1234, tzinfo=met) self.assertEqual(dt.date(), date(2002, 3, 4)) self.assertEqual(dt.time(), time(18, 45, 3, 1234)) self.assertEqual(dt.timetz(), time(18, 45, 3, 1234, tzinfo=met)) def test_tz_aware_arithmetic(self): import random now = self.theclass.now() tz55 = FixedOffset(-330, "west 5:30") timeaware = now.time().replace(tzinfo=tz55) nowaware = self.theclass.combine(now.date(), timeaware) self.failUnless(nowaware.tzinfo is tz55) self.assertEqual(nowaware.timetz(), timeaware) # Can't mix aware and non-aware. self.assertRaises(TypeError, lambda: now - nowaware) self.assertRaises(TypeError, lambda: nowaware - now) # And adding datetime's doesn't make sense, aware or not. self.assertRaises(TypeError, lambda: now + nowaware) self.assertRaises(TypeError, lambda: nowaware + now) self.assertRaises(TypeError, lambda: nowaware + nowaware) # Subtracting should yield 0. self.assertEqual(now - now, timedelta(0)) self.assertEqual(nowaware - nowaware, timedelta(0)) # Adding a delta should preserve tzinfo. delta = timedelta(weeks=1, minutes=12, microseconds=5678) nowawareplus = nowaware + delta self.failUnless(nowaware.tzinfo is tz55) nowawareplus2 = delta + nowaware self.failUnless(nowawareplus2.tzinfo is tz55) self.assertEqual(nowawareplus, nowawareplus2) # that - delta should be what we started with, and that - what we # started with should be delta. diff = nowawareplus - delta self.failUnless(diff.tzinfo is tz55) self.assertEqual(nowaware, diff) self.assertRaises(TypeError, lambda: delta - nowawareplus) self.assertEqual(nowawareplus - nowaware, delta) # Make up a random timezone. tzr = FixedOffset(random.randrange(-1439, 1440), "randomtimezone") # Attach it to nowawareplus. nowawareplus = nowawareplus.replace(tzinfo=tzr) self.failUnless(nowawareplus.tzinfo is tzr) # Make sure the difference takes the timezone adjustments into account. got = nowaware - nowawareplus # Expected: (nowaware base - nowaware offset) - # (nowawareplus base - nowawareplus offset) = # (nowaware base - nowawareplus base) + # (nowawareplus offset - nowaware offset) = # -delta + nowawareplus offset - nowaware offset expected = nowawareplus.utcoffset() - nowaware.utcoffset() - delta self.assertEqual(got, expected) # Try max possible difference. min = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, "min")) max = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999, tzinfo=FixedOffset(-1439, "max")) maxdiff = max - min self.assertEqual(maxdiff, self.theclass.max - self.theclass.min + timedelta(minutes=2*1439)) def test_tzinfo_now(self): meth = self.theclass.now # Ensure it doesn't require tzinfo (i.e., that this doesn't blow up). base = meth() # Try with and without naming the keyword. off42 = FixedOffset(42, "42") another = meth(off42) again = meth(tz=off42) self.failUnless(another.tzinfo is again.tzinfo) self.assertEqual(another.utcoffset(), timedelta(minutes=42)) # Bad argument with and w/o naming the keyword. self.assertRaises(TypeError, meth, 16) self.assertRaises(TypeError, meth, tzinfo=16) # Bad keyword name. self.assertRaises(TypeError, meth, tinfo=off42) # Too many args. self.assertRaises(TypeError, meth, off42, off42) # We don't know which time zone we're in, and don't have a tzinfo # class to represent it, so seeing whether a tz argument actually # does a conversion is tricky. weirdtz = FixedOffset(timedelta(hours=15, minutes=58), "weirdtz", 0) utc = FixedOffset(0, "utc", 0) for dummy in range(3): now = datetime.now(weirdtz) self.failUnless(now.tzinfo is weirdtz) utcnow = datetime.utcnow().replace(tzinfo=utc) now2 = utcnow.astimezone(weirdtz) if abs(now - now2) < timedelta(seconds=30): break # Else the code is broken, or more than 30 seconds passed between # calls; assuming the latter, just try again. else: # Three strikes and we're out. self.fail("utcnow(), now(tz), or astimezone() may be broken") def test_tzinfo_fromtimestamp(self): import time meth = self.theclass.fromtimestamp ts = time.time() # Ensure it doesn't require tzinfo (i.e., that this doesn't blow up). base = meth(ts) # Try with and without naming the keyword. off42 = FixedOffset(42, "42") another = meth(ts, off42) again = meth(ts, tz=off42) self.failUnless(another.tzinfo is again.tzinfo) self.assertEqual(another.utcoffset(), timedelta(minutes=42)) # Bad argument with and w/o naming the keyword. self.assertRaises(TypeError, meth, ts, 16) self.assertRaises(TypeError, meth, ts, tzinfo=16) # Bad keyword name. self.assertRaises(TypeError, meth, ts, tinfo=off42) # Too many args. self.assertRaises(TypeError, meth, ts, off42, off42) # Too few args. self.assertRaises(TypeError, meth) # Try to make sure tz= actually does some conversion. timestamp = 1000000000 utcdatetime = datetime.utcfromtimestamp(timestamp) # In POSIX (epoch 1970), that's 2001-09-09 01:46:40 UTC, give or take. # But on some flavor of Mac, it's nowhere near that. So we can't have # any idea here what time that actually is, we can only test that # relative changes match. utcoffset = timedelta(hours=-15, minutes=39) # arbitrary, but not zero tz = FixedOffset(utcoffset, "tz", 0) expected = utcdatetime + utcoffset got = datetime.fromtimestamp(timestamp, tz) self.assertEqual(expected, got.replace(tzinfo=None)) def test_tzinfo_utcnow(self): meth = self.theclass.utcnow # Ensure it doesn't require tzinfo (i.e., that this doesn't blow up). base = meth() # Try with and without naming the keyword; for whatever reason, # utcnow() doesn't accept a tzinfo argument. off42 = FixedOffset(42, "42") self.assertRaises(TypeError, meth, off42) self.assertRaises(TypeError, meth, tzinfo=off42) def test_tzinfo_utcfromtimestamp(self): import time meth = self.theclass.utcfromtimestamp ts = time.time() # Ensure it doesn't require tzinfo (i.e., that this doesn't blow up). base = meth(ts) # Try with and without naming the keyword; for whatever reason, # utcfromtimestamp() doesn't accept a tzinfo argument. off42 = FixedOffset(42, "42") self.assertRaises(TypeError, meth, ts, off42) self.assertRaises(TypeError, meth, ts, tzinfo=off42) def test_tzinfo_timetuple(self): # TestDateTime tested most of this. datetime adds a twist to the # DST flag. class DST(tzinfo): def __init__(self, dstvalue): if isinstance(dstvalue, int): dstvalue = timedelta(minutes=dstvalue) self.dstvalue = dstvalue def dst(self, dt): return self.dstvalue cls = self.theclass for dstvalue, flag in (-33, 1), (33, 1), (0, 0), (None, -1): d = cls(1, 1, 1, 10, 20, 30, 40, tzinfo=DST(dstvalue)) t = d.timetuple() self.assertEqual(1, t.tm_year) self.assertEqual(1, t.tm_mon) self.assertEqual(1, t.tm_mday) self.assertEqual(10, t.tm_hour) self.assertEqual(20, t.tm_min) self.assertEqual(30, t.tm_sec) self.assertEqual(0, t.tm_wday) self.assertEqual(1, t.tm_yday) self.assertEqual(flag, t.tm_isdst) # dst() returns wrong type. self.assertRaises(TypeError, cls(1, 1, 1, tzinfo=DST("x")).timetuple) # dst() at the edge. self.assertEqual(cls(1,1,1, tzinfo=DST(1439)).timetuple().tm_isdst, 1) self.assertEqual(cls(1,1,1, tzinfo=DST(-1439)).timetuple().tm_isdst, 1) # dst() out of range. self.assertRaises(ValueError, cls(1,1,1, tzinfo=DST(1440)).timetuple) self.assertRaises(ValueError, cls(1,1,1, tzinfo=DST(-1440)).timetuple) def test_utctimetuple(self): class DST(tzinfo): def __init__(self, dstvalue): if isinstance(dstvalue, int): dstvalue = timedelta(minutes=dstvalue) self.dstvalue = dstvalue def dst(self, dt): return self.dstvalue cls = self.theclass # This can't work: DST didn't implement utcoffset. self.assertRaises(NotImplementedError, cls(1, 1, 1, tzinfo=DST(0)).utcoffset) class UOFS(DST): def __init__(self, uofs, dofs=None): DST.__init__(self, dofs) self.uofs = timedelta(minutes=uofs) def utcoffset(self, dt): return self.uofs # Ensure tm_isdst is 0 regardless of what dst() says: DST is never # in effect for a UTC time. for dstvalue in -33, 33, 0, None: d = cls(1, 2, 3, 10, 20, 30, 40, tzinfo=UOFS(-53, dstvalue)) t = d.utctimetuple() self.assertEqual(d.year, t.tm_year) self.assertEqual(d.month, t.tm_mon) self.assertEqual(d.day, t.tm_mday) self.assertEqual(11, t.tm_hour) # 20mm + 53mm = 1hn + 13mm self.assertEqual(13, t.tm_min) self.assertEqual(d.second, t.tm_sec) self.assertEqual(d.weekday(), t.tm_wday) self.assertEqual(d.toordinal() - date(1, 1, 1).toordinal() + 1, t.tm_yday) self.assertEqual(0, t.tm_isdst) # At the edges, UTC adjustment can normalize into years out-of-range # for a datetime object. Ensure that a correct timetuple is # created anyway. tiny = cls(MINYEAR, 1, 1, 0, 0, 37, tzinfo=UOFS(1439)) # That goes back 1 minute less than a full day. t = tiny.utctimetuple() self.assertEqual(t.tm_year, MINYEAR-1) self.assertEqual(t.tm_mon, 12) self.assertEqual(t.tm_mday, 31) self.assertEqual(t.tm_hour, 0) self.assertEqual(t.tm_min, 1) self.assertEqual(t.tm_sec, 37) self.assertEqual(t.tm_yday, 366) # "year 0" is a leap year self.assertEqual(t.tm_isdst, 0) huge = cls(MAXYEAR, 12, 31, 23, 59, 37, 999999, tzinfo=UOFS(-1439)) # That goes forward 1 minute less than a full day. t = huge.utctimetuple() self.assertEqual(t.tm_year, MAXYEAR+1) self.assertEqual(t.tm_mon, 1) self.assertEqual(t.tm_mday, 1) self.assertEqual(t.tm_hour, 23) self.assertEqual(t.tm_min, 58) self.assertEqual(t.tm_sec, 37) self.assertEqual(t.tm_yday, 1) self.assertEqual(t.tm_isdst, 0) def test_tzinfo_isoformat(self): zero = FixedOffset(0, "+00:00") plus = FixedOffset(220, "+03:40") minus = FixedOffset(-231, "-03:51") unknown = FixedOffset(None, "") cls = self.theclass datestr = '0001-02-03' for ofs in None, zero, plus, minus, unknown: for us in 0, 987001: d = cls(1, 2, 3, 4, 5, 59, us, tzinfo=ofs) timestr = '04:05:59' + (us and '.987001' or '') ofsstr = ofs is not None and d.tzname() or '' tailstr = timestr + ofsstr iso = d.isoformat() self.assertEqual(iso, datestr + 'T' + tailstr) self.assertEqual(iso, d.isoformat('T')) self.assertEqual(d.isoformat('k'), datestr + 'k' + tailstr) self.assertEqual(str(d), datestr + ' ' + tailstr) def test_replace(self): cls = self.theclass z100 = FixedOffset(100, "+100") zm200 = FixedOffset(timedelta(minutes=-200), "-200") args = [1, 2, 3, 4, 5, 6, 7, z100] base = cls(*args) self.assertEqual(base, base.replace()) i = 0 for name, newval in (("year", 2), ("month", 3), ("day", 4), ("hour", 5), ("minute", 6), ("second", 7), ("microsecond", 8), ("tzinfo", zm200)): newargs = args[:] newargs[i] = newval expected = cls(*newargs) got = base.replace(**{name: newval}) self.assertEqual(expected, got) i += 1 # Ensure we can get rid of a tzinfo. self.assertEqual(base.tzname(), "+100") base2 = base.replace(tzinfo=None) self.failUnless(base2.tzinfo is None) self.failUnless(base2.tzname() is None) # Ensure we can add one. base3 = base2.replace(tzinfo=z100) self.assertEqual(base, base3) self.failUnless(base.tzinfo is base3.tzinfo) # Out of bounds. base = cls(2000, 2, 29) self.assertRaises(ValueError, base.replace, year=2001) def test_more_astimezone(self): # The inherited test_astimezone covered some trivial and error cases. fnone = FixedOffset(None, "None") f44m = FixedOffset(44, "44") fm5h = FixedOffset(-timedelta(hours=5), "m300") dt = self.theclass.now(tz=f44m) self.failUnless(dt.tzinfo is f44m) # Replacing with degenerate tzinfo raises an exception. self.assertRaises(ValueError, dt.astimezone, fnone) # Ditto with None tz. self.assertRaises(TypeError, dt.astimezone, None) # Replacing with same tzinfo makes no change. x = dt.astimezone(dt.tzinfo) self.failUnless(x.tzinfo is f44m) self.assertEqual(x.date(), dt.date()) self.assertEqual(x.time(), dt.time()) # Replacing with different tzinfo does adjust. got = dt.astimezone(fm5h) self.failUnless(got.tzinfo is fm5h) self.assertEqual(got.utcoffset(), timedelta(hours=-5)) expected = dt - dt.utcoffset() # in effect, convert to UTC expected += fm5h.utcoffset(dt) # and from there to local time expected = expected.replace(tzinfo=fm5h) # and attach new tzinfo self.assertEqual(got.date(), expected.date()) self.assertEqual(got.time(), expected.time()) self.assertEqual(got.timetz(), expected.timetz()) self.failUnless(got.tzinfo is expected.tzinfo) self.assertEqual(got, expected) def test_aware_subtract(self): cls = self.theclass # Ensure that utcoffset() is ignored when the operands have the # same tzinfo member. class OperandDependentOffset(tzinfo): def utcoffset(self, t): if t.minute < 10: # d0 and d1 equal after adjustment return timedelta(minutes=t.minute) else: # d2 off in the weeds return timedelta(minutes=59) base = cls(8, 9, 10, 11, 12, 13, 14, tzinfo=OperandDependentOffset()) d0 = base.replace(minute=3) d1 = base.replace(minute=9) d2 = base.replace(minute=11) for x in d0, d1, d2: for y in d0, d1, d2: got = x - y expected = timedelta(minutes=x.minute - y.minute) self.assertEqual(got, expected) # OTOH, if the tzinfo members are distinct, utcoffsets aren't # ignored. base = cls(8, 9, 10, 11, 12, 13, 14) d0 = base.replace(minute=3, tzinfo=OperandDependentOffset()) d1 = base.replace(minute=9, tzinfo=OperandDependentOffset()) d2 = base.replace(minute=11, tzinfo=OperandDependentOffset()) for x in d0, d1, d2: for y in d0, d1, d2: got = x - y if (x is d0 or x is d1) and (y is d0 or y is d1): expected = timedelta(0) elif x is y is d2: expected = timedelta(0) elif x is d2: expected = timedelta(minutes=(11-59)-0) else: assert y is d2 expected = timedelta(minutes=0-(11-59)) self.assertEqual(got, expected) def test_mixed_compare(self): t1 = datetime(1, 2, 3, 4, 5, 6, 7) t2 = datetime(1, 2, 3, 4, 5, 6, 7) self.assertEqual(t1, t2) t2 = t2.replace(tzinfo=None) self.assertEqual(t1, t2) t2 = t2.replace(tzinfo=FixedOffset(None, "")) self.assertEqual(t1, t2) t2 = t2.replace(tzinfo=FixedOffset(0, "")) self.assertRaises(TypeError, lambda: t1 == t2) # In datetime w/ identical tzinfo objects, utcoffset is ignored. class Varies(tzinfo): def __init__(self): self.offset = timedelta(minutes=22) def utcoffset(self, t): self.offset += timedelta(minutes=1) return self.offset v = Varies() t1 = t2.replace(tzinfo=v) t2 = t2.replace(tzinfo=v) self.assertEqual(t1.utcoffset(), timedelta(minutes=23)) self.assertEqual(t2.utcoffset(), timedelta(minutes=24)) self.assertEqual(t1, t2) # But if they're not identical, it isn't ignored. t2 = t2.replace(tzinfo=Varies()) self.failUnless(t1 < t2) # t1's offset counter still going up def test_subclass_datetimetz(self): class C(self.theclass): theAnswer = 42 def __new__(cls, *args, **kws): temp = kws.copy() extra = temp.pop('extra') result = self.theclass.__new__(cls, *args, **temp) result.extra = extra return result def newmeth(self, start): return start + self.hour + self.year args = 2002, 12, 31, 4, 5, 6, 500, FixedOffset(-300, "EST", 1) dt1 = self.theclass(*args) dt2 = C(*args, **{'extra': 7}) self.assertEqual(dt2.__class__, C) self.assertEqual(dt2.theAnswer, 42) self.assertEqual(dt2.extra, 7) self.assertEqual(dt1.utcoffset(), dt2.utcoffset()) self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.year - 7) # Pain to set up DST-aware tzinfo classes. def first_sunday_on_or_after(dt): days_to_go = 6 - dt.weekday() if days_to_go: dt += timedelta(days_to_go) return dt ZERO = timedelta(0) HOUR = timedelta(hours=1) DAY = timedelta(days=1) # In the US, DST starts at 2am (standard time) on the first Sunday in April. DSTSTART = datetime(1, 4, 1, 2) # and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct, # which is the first Sunday on or after Oct 25. Because we view 1:MM as # being standard time on that day, there is no spelling in local time of # the last hour of DST (that's 1:MM DST, but 1:MM is taken as standard time). DSTEND = datetime(1, 10, 25, 1) class USTimeZone(tzinfo): def __init__(self, hours, reprname, stdname, dstname): self.stdoffset = timedelta(hours=hours) self.reprname = reprname self.stdname = stdname self.dstname = dstname def __repr__(self): return self.reprname def tzname(self, dt): if self.dst(dt): return self.dstname else: return self.stdname def utcoffset(self, dt): return self.stdoffset + self.dst(dt) def dst(self, dt): if dt is None or dt.tzinfo is None: # An exception instead may be sensible here, in one or more of # the cases. return ZERO assert dt.tzinfo is self # Find first Sunday in April. start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year)) assert start.weekday() == 6 and start.month == 4 and start.day <= 7 # Find last Sunday in October. end = first_sunday_on_or_after(DSTEND.replace(year=dt.year)) assert end.weekday() == 6 and end.month == 10 and end.day >= 25 # Can't compare naive to aware objects, so strip the timezone from # dt first. if start <= dt.replace(tzinfo=None) < end: return HOUR else: return ZERO Eastern = USTimeZone(-5, "Eastern", "EST", "EDT") Central = USTimeZone(-6, "Central", "CST", "CDT") Mountain = USTimeZone(-7, "Mountain", "MST", "MDT") Pacific = USTimeZone(-8, "Pacific", "PST", "PDT") utc_real = FixedOffset(0, "UTC", 0) # For better test coverage, we want another flavor of UTC that's west of # the Eastern and Pacific timezones. utc_fake = FixedOffset(-12*60, "UTCfake", 0) class TestTimezoneConversions(unittest.TestCase): # The DST switch times for 2002, in std time. dston = datetime(2002, 4, 7, 2) dstoff = datetime(2002, 10, 27, 1) theclass = datetime # Check a time that's inside DST. def checkinside(self, dt, tz, utc, dston, dstoff): self.assertEqual(dt.dst(), HOUR) # Conversion to our own timezone is always an identity. self.assertEqual(dt.astimezone(tz), dt) asutc = dt.astimezone(utc) there_and_back = asutc.astimezone(tz) # Conversion to UTC and back isn't always an identity here, # because there are redundant spellings (in local time) of # UTC time when DST begins: the clock jumps from 1:59:59 # to 3:00:00, and a local time of 2:MM:SS doesn't really # make sense then. The classes above treat 2:MM:SS as # daylight time then (it's "after 2am"), really an alias # for 1:MM:SS standard time. The latter form is what # conversion back from UTC produces. if dt.date() == dston.date() and dt.hour == 2: # We're in the redundant hour, and coming back from # UTC gives the 1:MM:SS standard-time spelling. self.assertEqual(there_and_back + HOUR, dt) # Although during was considered to be in daylight # time, there_and_back is not. self.assertEqual(there_and_back.dst(), ZERO) # They're the same times in UTC. self.assertEqual(there_and_back.astimezone(utc), dt.astimezone(utc)) else: # We're not in the redundant hour. self.assertEqual(dt, there_and_back) # Because we have a redundant spelling when DST begins, there is # (unforunately) an hour when DST ends that can't be spelled at all in # local time. When DST ends, the clock jumps from 1:59 back to 1:00 # again. The hour 1:MM DST has no spelling then: 1:MM is taken to be # standard time. 1:MM DST == 0:MM EST, but 0:MM is taken to be # daylight time. The hour 1:MM daylight == 0:MM standard can't be # expressed in local time. Nevertheless, we want conversion back # from UTC to mimic the local clock's "repeat an hour" behavior. nexthour_utc = asutc + HOUR nexthour_tz = nexthour_utc.astimezone(tz) if dt.date() == dstoff.date() and dt.hour == 0: # We're in the hour before the last DST hour. The last DST hour # is ineffable. We want the conversion back to repeat 1:MM. self.assertEqual(nexthour_tz, dt.replace(hour=1)) nexthour_utc += HOUR nexthour_tz = nexthour_utc.astimezone(tz) self.assertEqual(nexthour_tz, dt.replace(hour=1)) else: self.assertEqual(nexthour_tz - dt, HOUR) # Check a time that's outside DST. def checkoutside(self, dt, tz, utc): self.assertEqual(dt.dst(), ZERO) # Conversion to our own timezone is always an identity. self.assertEqual(dt.astimezone(tz), dt) # Converting to UTC and back is an identity too. asutc = dt.astimezone(utc) there_and_back = asutc.astimezone(tz) self.assertEqual(dt, there_and_back) def convert_between_tz_and_utc(self, tz, utc): dston = self.dston.replace(tzinfo=tz) # Because 1:MM on the day DST ends is taken as being standard time, # there is no spelling in tz for the last hour of daylight time. # For purposes of the test, the last hour of DST is 0:MM, which is # taken as being daylight time (and 1:MM is taken as being standard # time). dstoff = self.dstoff.replace(tzinfo=tz) for delta in (timedelta(weeks=13), DAY, HOUR, timedelta(minutes=1), timedelta(microseconds=1)): self.checkinside(dston, tz, utc, dston, dstoff) for during in dston + delta, dstoff - delta: self.checkinside(during, tz, utc, dston, dstoff) self.checkoutside(dstoff, tz, utc) for outside in dston - delta, dstoff + delta: self.checkoutside(outside, tz, utc) def test_easy(self): # Despite the name of this test, the endcases are excruciating. self.convert_between_tz_and_utc(Eastern, utc_real) self.convert_between_tz_and_utc(Pacific, utc_real) self.convert_between_tz_and_utc(Eastern, utc_fake) self.convert_between_tz_and_utc(Pacific, utc_fake) # The next is really dancing near the edge. It works because # Pacific and Eastern are far enough apart that their "problem # hours" don't overlap. self.convert_between_tz_and_utc(Eastern, Pacific) self.convert_between_tz_and_utc(Pacific, Eastern) # OTOH, these fail! Don't enable them. The difficulty is that # the edge case tests assume that every hour is representable in # the "utc" class. This is always true for a fixed-offset tzinfo # class (lke utc_real and utc_fake), but not for Eastern or Central. # For these adjacent DST-aware time zones, the range of time offsets # tested ends up creating hours in the one that aren't representable # in the other. For the same reason, we would see failures in the # Eastern vs Pacific tests too if we added 3*HOUR to the list of # offset deltas in convert_between_tz_and_utc(). # # self.convert_between_tz_and_utc(Eastern, Central) # can't work # self.convert_between_tz_and_utc(Central, Eastern) # can't work def test_tricky(self): # 22:00 on day before daylight starts. fourback = self.dston - timedelta(hours=4) ninewest = FixedOffset(-9*60, "-0900", 0) fourback = fourback.replace(tzinfo=ninewest) # 22:00-0900 is 7:00 UTC == 2:00 EST == 3:00 DST. Since it's "after # 2", we should get the 3 spelling. # If we plug 22:00 the day before into Eastern, it "looks like std # time", so its offset is returned as -5, and -5 - -9 = 4. Adding 4 # to 22:00 lands on 2:00, which makes no sense in local time (the # local clock jumps from 1 to 3). The point here is to make sure we # get the 3 spelling. expected = self.dston.replace(hour=3) got = fourback.astimezone(Eastern).replace(tzinfo=None) self.assertEqual(expected, got) # Similar, but map to 6:00 UTC == 1:00 EST == 2:00 DST. In that # case we want the 1:00 spelling. sixutc = self.dston.replace(hour=6, tzinfo=utc_real) # Now 6:00 "looks like daylight", so the offset wrt Eastern is -4, # and adding -4-0 == -4 gives the 2:00 spelling. We want the 1:00 EST # spelling. expected = self.dston.replace(hour=1) got = sixutc.astimezone(Eastern).replace(tzinfo=None) self.assertEqual(expected, got) # Now on the day DST ends, we want "repeat an hour" behavior. # UTC 4:MM 5:MM 6:MM 7:MM checking these # EST 23:MM 0:MM 1:MM 2:MM # EDT 0:MM 1:MM 2:MM 3:MM # wall 0:MM 1:MM 1:MM 2:MM against these for utc in utc_real, utc_fake: for tz in Eastern, Pacific: first_std_hour = self.dstoff - timedelta(hours=2) # 23:MM # Convert that to UTC. first_std_hour -= tz.utcoffset(None) # Adjust for possibly fake UTC. asutc = first_std_hour + utc.utcoffset(None) # First UTC hour to convert; this is 4:00 when utc=utc_real & # tz=Eastern. asutcbase = asutc.replace(tzinfo=utc) for tzhour in (0, 1, 1, 2): expectedbase = self.dstoff.replace(hour=tzhour) for minute in 0, 30, 59: expected = expectedbase.replace(minute=minute) asutc = asutcbase.replace(minute=minute) astz = asutc.astimezone(tz) self.assertEqual(astz.replace(tzinfo=None), expected) asutcbase += HOUR def test_bogus_dst(self): class ok(tzinfo): def utcoffset(self, dt): return HOUR def dst(self, dt): return HOUR now = self.theclass.now().replace(tzinfo=utc_real) # Doesn't blow up. now.astimezone(ok()) # Does blow up. class notok(ok): def dst(self, dt): return None self.assertRaises(ValueError, now.astimezone, notok()) def test_fromutc(self): self.assertRaises(TypeError, Eastern.fromutc) # not enough args now = datetime.utcnow().replace(tzinfo=utc_real) self.assertRaises(ValueError, Eastern.fromutc, now) # wrong tzinfo now = now.replace(tzinfo=Eastern) # insert correct tzinfo enow = Eastern.fromutc(now) # doesn't blow up self.assertEqual(enow.tzinfo, Eastern) # has right tzinfo member self.assertRaises(TypeError, Eastern.fromutc, now, now) # too many args self.assertRaises(TypeError, Eastern.fromutc, date.today()) # wrong type # Always converts UTC to standard time. class FauxUSTimeZone(USTimeZone): def fromutc(self, dt): return dt + self.stdoffset FEastern = FauxUSTimeZone(-5, "FEastern", "FEST", "FEDT") # UTC 4:MM 5:MM 6:MM 7:MM 8:MM 9:MM # EST 23:MM 0:MM 1:MM 2:MM 3:MM 4:MM # EDT 0:MM 1:MM 2:MM 3:MM 4:MM 5:MM # Check around DST start. start = self.dston.replace(hour=4, tzinfo=Eastern) fstart = start.replace(tzinfo=FEastern) for wall in 23, 0, 1, 3, 4, 5: expected = start.replace(hour=wall) if wall == 23: expected -= timedelta(days=1) got = Eastern.fromutc(start) self.assertEqual(expected, got) expected = fstart + FEastern.stdoffset got = FEastern.fromutc(fstart) self.assertEqual(expected, got) # Ensure astimezone() calls fromutc() too. got = fstart.replace(tzinfo=utc_real).astimezone(FEastern) self.assertEqual(expected, got) start += HOUR fstart += HOUR # Check around DST end. start = self.dstoff.replace(hour=4, tzinfo=Eastern) fstart = start.replace(tzinfo=FEastern) for wall in 0, 1, 1, 2, 3, 4: expected = start.replace(hour=wall) got = Eastern.fromutc(start) self.assertEqual(expected, got) expected = fstart + FEastern.stdoffset got = FEastern.fromutc(fstart) self.assertEqual(expected, got) # Ensure astimezone() calls fromutc() too. got = fstart.replace(tzinfo=utc_real).astimezone(FEastern) self.assertEqual(expected, got) start += HOUR fstart += HOUR ############################################################################# # oddballs class Oddballs(unittest.TestCase): def test_bug_1028306(self): # Trying to compare a date to a datetime should act like a mixed- # type comparison, despite that datetime is a subclass of date. as_date = date.today() as_datetime = datetime.combine(as_date, time()) self.assert_(as_date != as_datetime) self.assert_(as_datetime != as_date) self.assert_(not as_date == as_datetime) self.assert_(not as_datetime == as_date) self.assertRaises(TypeError, lambda: as_date < as_datetime) self.assertRaises(TypeError, lambda: as_datetime < as_date) self.assertRaises(TypeError, lambda: as_date <= as_datetime) self.assertRaises(TypeError, lambda: as_datetime <= as_date) self.assertRaises(TypeError, lambda: as_date > as_datetime) self.assertRaises(TypeError, lambda: as_datetime > as_date) self.assertRaises(TypeError, lambda: as_date >= as_datetime) self.assertRaises(TypeError, lambda: as_datetime >= as_date) # Neverthelss, comparison should work with the base-class (date) # projection if use of a date method is forced. self.assert_(as_date.__eq__(as_datetime)) different_day = (as_date.day + 1) % 20 + 1 self.assert_(not as_date.__eq__(as_datetime.replace(day= different_day))) # And date should compare with other subclasses of date. If a # subclass wants to stop this, it's up to the subclass to do so. date_sc = SubclassDate(as_date.year, as_date.month, as_date.day) self.assertEqual(as_date, date_sc) self.assertEqual(date_sc, as_date) # Ditto for datetimes. datetime_sc = SubclassDatetime(as_datetime.year, as_datetime.month, as_date.day, 0, 0, 0) self.assertEqual(as_datetime, datetime_sc) self.assertEqual(datetime_sc, as_datetime) def test_main(): test_support.run_unittest(__name__) if __name__ == "__main__": test_main()
apache-2.0
907,643,222,827,289,300
38.96272
80
0.565566
false
sexroute/commandergenius
project/jni/python/src/Lib/encodings/iso2022_kr.py
816
1053
# # iso2022_kr.py: Python Unicode Codec for ISO2022_KR # # Written by Hye-Shik Chang <[email protected]> # import _codecs_iso2022, codecs import _multibytecodec as mbc codec = _codecs_iso2022.getcodec('iso2022_kr') class Codec(codecs.Codec): encode = codec.encode decode = codec.decode class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder): codec = codec class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder): codec = codec class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): codec = codec class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): codec = codec def getregentry(): return codecs.CodecInfo( name='iso2022_kr', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
lgpl-2.1
3,049,885,450,164,686,000
26
74
0.703704
false
GustavoHennig/ansible
lib/ansible/modules/cloud/misc/xenserver_facts.py
69
5402
#!/usr/bin/python -tt # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: xenserver_facts version_added: "2.0" short_description: get facts reported on xenserver description: - Reads data out of XenAPI, can be used instead of multiple xe commands. author: - Andy Hill (@andyhky) - Tim Rupp options: {} ''' EXAMPLES = ''' - name: Gather facts from xenserver xenserver: - name: Print running VMs debug: msg: "{{ item }}" with_items: "{{ xs_vms.keys() }}" when: xs_vms[item]['power_state'] == "Running" # Which will print: # # TASK: [Print running VMs] *********************************************************** # skipping: [10.13.0.22] => (item=CentOS 4.7 (32-bit)) # ok: [10.13.0.22] => (item=Control domain on host: 10.0.13.22) => { # "item": "Control domain on host: 10.0.13.22", # "msg": "Control domain on host: 10.0.13.22" # } ''' import platform HAVE_XENAPI = False try: import XenAPI HAVE_XENAPI = True except ImportError: pass class XenServerFacts: def __init__(self): self.codes = { '5.5.0': 'george', '5.6.100': 'oxford', '6.0.0': 'boston', '6.1.0': 'tampa', '6.2.0': 'clearwater' } @property def version(self): # Be aware! Deprecated in Python 2.6! result = platform.dist()[1] return result @property def codename(self): if self.version in self.codes: result = self.codes[self.version] else: result = None return result def get_xenapi_session(): session = XenAPI.xapi_local() session.xenapi.login_with_password('', '') return session def get_networks(session): recs = session.xenapi.network.get_all_records() xs_networks = {} networks = change_keys(recs, key='uuid') for network in networks.values(): xs_networks[network['name_label']] = network return xs_networks def get_pifs(session): recs = session.xenapi.PIF.get_all_records() pifs = change_keys(recs, key='uuid') xs_pifs = {} devicenums = range(0, 7) for pif in pifs.values(): for eth in devicenums: interface_name = "eth%s" % (eth) bond_name = interface_name.replace('eth', 'bond') if pif['device'] == interface_name: xs_pifs[interface_name] = pif elif pif['device'] == bond_name: xs_pifs[bond_name] = pif return xs_pifs def get_vlans(session): recs = session.xenapi.VLAN.get_all_records() return change_keys(recs, key='tag') def change_keys(recs, key='uuid', filter_func=None): """ Take a xapi dict, and make the keys the value of recs[ref][key]. Preserves the ref in rec['ref'] """ new_recs = {} for ref, rec in recs.items(): if filter_func is not None and not filter_func(rec): continue new_recs[rec[key]] = rec new_recs[rec[key]]['ref'] = ref return new_recs def get_host(session): """Get the host""" host_recs = session.xenapi.host.get_all() # We only have one host, so just return its entry return session.xenapi.host.get_record(host_recs[0]) def get_vms(session): xs_vms = {} recs = session.xenapi.VM.get_all() if not recs: return None vms = change_keys(recs, key='uuid') for vm in vms.values(): xs_vms[vm['name_label']] = vm return xs_vms def get_srs(session): xs_srs = {} recs = session.xenapi.SR.get_all() if not recs: return None srs = change_keys(recs, key='uuid') for sr in srs.values(): xs_srs[sr['name_label']] = sr return xs_srs def main(): module = AnsibleModule({}) if not HAVE_XENAPI: module.fail_json(changed=False, msg="python xen api required for this module") obj = XenServerFacts() try: session = get_xenapi_session() except XenAPI.Failure as e: module.fail_json(msg='%s' % e) data = { 'xenserver_version': obj.version, 'xenserver_codename': obj.codename } xs_networks = get_networks(session) xs_pifs = get_pifs(session) xs_vlans = get_vlans(session) xs_vms = get_vms(session) xs_srs = get_srs(session) if xs_vlans: data['xs_vlans'] = xs_vlans if xs_pifs: data['xs_pifs'] = xs_pifs if xs_networks: data['xs_networks'] = xs_networks if xs_vms: data['xs_vms'] = xs_vms if xs_srs: data['xs_srs'] = xs_srs module.exit_json(ansible=data) from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
3,760,561,686,781,296,000
24.125581
87
0.593854
false
babyliynfg/cross
tools/project-creator/Python2.6.6/Lib/email/mime/message.py
73
1320
# Copyright (C) 2001-2006 Python Software Foundation # Author: Barry Warsaw # Contact: [email protected] """Class representing message/* MIME documents.""" __all__ = ['MIMEMessage'] from email import message from email.mime.nonmultipart import MIMENonMultipart class MIMEMessage(MIMENonMultipart): """Class representing message/* MIME documents.""" def __init__(self, _msg, _subtype='rfc822'): """Create a message/* type MIME document. _msg is a message object and must be an instance of Message, or a derived class of Message, otherwise a TypeError is raised. Optional _subtype defines the subtype of the contained message. The default is "rfc822" (this is defined by the MIME standard, even though the term "rfc822" is technically outdated by RFC 2822). """ MIMENonMultipart.__init__(self, 'message', _subtype) if not isinstance(_msg, message.Message): raise TypeError('Argument is not an instance of Message') # It's convenient to use this base class method. We need to do it # this way or we'll get an exception message.Message.attach(self, _msg) # And be sure our default type is set correctly self.set_default_type('message/rfc822')
mit
-3,318,329,596,595,224,600
35.742857
78
0.657576
false
yjmade/odoo
openerp/addons/base/res/res_partner.py
22
39855
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import datetime from lxml import etree import math import pytz import urlparse import openerp from openerp import tools, api from openerp.osv import osv, fields from openerp.osv.expression import get_unaccent_wrapper from openerp.tools.translate import _ ADDRESS_FORMAT_LAYOUTS = { '%(city)s %(state_code)s\n%(zip)s': """ <div class="address_format"> <field name="city" placeholder="City" style="width: 50%%"/> <field name="state_id" class="oe_no_button" placeholder="State" style="width: 47%%" options='{"no_open": true}'/> <br/> <field name="zip" placeholder="ZIP"/> </div> """, '%(zip)s %(city)s': """ <div class="address_format"> <field name="zip" placeholder="ZIP" style="width: 40%%"/> <field name="city" placeholder="City" style="width: 57%%"/> <br/> <field name="state_id" class="oe_no_button" placeholder="State" options='{"no_open": true}'/> </div> """, '%(city)s\n%(state_name)s\n%(zip)s': """ <div class="address_format"> <field name="city" placeholder="City"/> <field name="state_id" class="oe_no_button" placeholder="State" options='{"no_open": true}'/> <field name="zip" placeholder="ZIP"/> </div> """ } class format_address(object): @api.model def fields_view_get_address(self, arch): fmt = self.env.user.company_id.country_id.address_format or '' for k, v in ADDRESS_FORMAT_LAYOUTS.items(): if k in fmt: doc = etree.fromstring(arch) for node in doc.xpath("//div[@class='address_format']"): tree = etree.fromstring(v) node.getparent().replace(node, tree) arch = etree.tostring(doc) break return arch @api.model def _tz_get(self): # put POSIX 'Etc/*' entries at the end to avoid confusing users - see bug 1086728 return [(tz,tz) for tz in sorted(pytz.all_timezones, key=lambda tz: tz if not tz.startswith('Etc/') else '_')] class res_partner_category(osv.Model): def name_get(self, cr, uid, ids, context=None): """ Return the categories' display name, including their direct parent by default. If ``context['partner_category_display']`` is ``'short'``, the short version of the category name (without the direct parent) is used. The default is the long version. """ if not isinstance(ids, list): ids = [ids] if context is None: context = {} if context.get('partner_category_display') == 'short': return super(res_partner_category, self).name_get(cr, uid, ids, context=context) res = [] for category in self.browse(cr, uid, ids, context=context): names = [] current = category while current: names.append(current.name) current = current.parent_id res.append((category.id, ' / '.join(reversed(names)))) return res @api.model def name_search(self, name, args=None, operator='ilike', limit=100): args = args or [] if name: # Be sure name_search is symetric to name_get name = name.split(' / ')[-1] args = [('name', operator, name)] + args categories = self.search(args, limit=limit) return categories.name_get() @api.multi def _name_get_fnc(self, field_name, arg): return dict(self.name_get()) _description = 'Partner Tags' _name = 'res.partner.category' _columns = { 'name': fields.char('Category Name', required=True, translate=True), 'parent_id': fields.many2one('res.partner.category', 'Parent Category', select=True, ondelete='cascade'), 'complete_name': fields.function(_name_get_fnc, type="char", string='Full Name'), 'child_ids': fields.one2many('res.partner.category', 'parent_id', 'Child Categories'), 'active': fields.boolean('Active', help="The active field allows you to hide the category without removing it."), 'parent_left': fields.integer('Left parent', select=True), 'parent_right': fields.integer('Right parent', select=True), 'partner_ids': fields.many2many('res.partner', id1='category_id', id2='partner_id', string='Partners'), } _constraints = [ (osv.osv._check_recursion, 'Error ! You can not create recursive categories.', ['parent_id']) ] _defaults = { 'active': 1, } _parent_store = True _parent_order = 'name' _order = 'parent_left' class res_partner_title(osv.osv): _name = 'res.partner.title' _order = 'name' _columns = { 'name': fields.char('Title', required=True, translate=True), 'shortcut': fields.char('Abbreviation', translate=True), 'domain': fields.selection([('partner', 'Partner'), ('contact', 'Contact')], 'Domain', required=True) } _defaults = { 'domain': 'contact', } @api.model def _lang_get(self): languages = self.env['res.lang'].search([]) return [(language.code, language.name) for language in languages] # fields copy if 'use_parent_address' is checked ADDRESS_FIELDS = ('street', 'street2', 'zip', 'city', 'state_id', 'country_id') class res_partner(osv.Model, format_address): _description = 'Partner' _name = "res.partner" def _address_display(self, cr, uid, ids, name, args, context=None): res = {} for partner in self.browse(cr, uid, ids, context=context): res[partner.id] = self._display_address(cr, uid, partner, context=context) return res @api.multi def _get_tz_offset(self, name, args): return dict( (p.id, datetime.datetime.now(pytz.timezone(p.tz or 'GMT')).strftime('%z')) for p in self) @api.multi def _get_image(self, name, args): return dict((p.id, tools.image_get_resized_images(p.image)) for p in self) @api.one def _set_image(self, name, value, args): return self.write({'image': tools.image_resize_image_big(value)}) @api.multi def _has_image(self, name, args): return dict((p.id, bool(p.image)) for p in self) def _commercial_partner_compute(self, cr, uid, ids, name, args, context=None): """ Returns the partner that is considered the commercial entity of this partner. The commercial entity holds the master data for all commercial fields (see :py:meth:`~_commercial_fields`) """ result = dict.fromkeys(ids, False) for partner in self.browse(cr, uid, ids, context=context): current_partner = partner while not current_partner.is_company and current_partner.parent_id: current_partner = current_partner.parent_id result[partner.id] = current_partner.id return result def _display_name_compute(self, cr, uid, ids, name, args, context=None): context = dict(context or {}) context.pop('show_address', None) context.pop('show_address_only', None) context.pop('show_email', None) return dict(self.name_get(cr, uid, ids, context=context)) # indirections to avoid passing a copy of the overridable method when declaring the function field _commercial_partner_id = lambda self, *args, **kwargs: self._commercial_partner_compute(*args, **kwargs) _display_name = lambda self, *args, **kwargs: self._display_name_compute(*args, **kwargs) _commercial_partner_store_triggers = { 'res.partner': (lambda self,cr,uid,ids,context=None: self.search(cr, uid, [('id','child_of',ids)], context=dict(active_test=False)), ['parent_id', 'is_company'], 10) } _display_name_store_triggers = { 'res.partner': (lambda self,cr,uid,ids,context=None: self.search(cr, uid, [('id','child_of',ids)], context=dict(active_test=False)), ['parent_id', 'is_company', 'name'], 10) } _order = "display_name" _columns = { 'name': fields.char('Name', required=True, select=True), 'display_name': fields.function(_display_name, type='char', string='Name', store=_display_name_store_triggers, select=True), 'date': fields.date('Date', select=1), 'title': fields.many2one('res.partner.title', 'Title'), 'parent_id': fields.many2one('res.partner', 'Related Company', select=True), 'child_ids': fields.one2many('res.partner', 'parent_id', 'Contacts', domain=[('active','=',True)]), # force "active_test" domain to bypass _search() override 'ref': fields.char('Contact Reference', select=1), 'lang': fields.selection(_lang_get, 'Language', help="If the selected language is loaded in the system, all documents related to this contact will be printed in this language. If not, it will be English."), 'tz': fields.selection(_tz_get, 'Timezone', size=64, help="The partner's timezone, used to output proper date and time values inside printed reports. " "It is important to set a value for this field. You should use the same timezone " "that is otherwise used to pick and render date and time values: your computer's timezone."), 'tz_offset': fields.function(_get_tz_offset, type='char', size=5, string='Timezone offset', invisible=True), 'user_id': fields.many2one('res.users', 'Salesperson', help='The internal user that is in charge of communicating with this contact if any.'), 'vat': fields.char('TIN', help="Tax Identification Number. Check the box if this contact is subjected to taxes. Used by the some of the legal statements."), 'bank_ids': fields.one2many('res.partner.bank', 'partner_id', 'Banks'), 'website': fields.char('Website', help="Website of Partner or Company"), 'comment': fields.text('Notes'), 'category_id': fields.many2many('res.partner.category', id1='partner_id', id2='category_id', string='Tags'), 'credit_limit': fields.float(string='Credit Limit'), 'ean13': fields.char('EAN13', size=13), 'active': fields.boolean('Active'), 'customer': fields.boolean('Customer', help="Check this box if this contact is a customer."), 'supplier': fields.boolean('Supplier', help="Check this box if this contact is a supplier. If it's not checked, purchase people will not see it when encoding a purchase order."), 'employee': fields.boolean('Employee', help="Check this box if this contact is an Employee."), 'function': fields.char('Job Position'), 'type': fields.selection([('default', 'Default'), ('invoice', 'Invoice'), ('delivery', 'Shipping'), ('contact', 'Contact'), ('other', 'Other')], 'Address Type', help="Used to select automatically the right address according to the context in sales and purchases documents."), 'street': fields.char('Street'), 'street2': fields.char('Street2'), 'zip': fields.char('Zip', size=24, change_default=True), 'city': fields.char('City'), 'state_id': fields.many2one("res.country.state", 'State', ondelete='restrict'), 'country_id': fields.many2one('res.country', 'Country', ondelete='restrict'), 'email': fields.char('Email'), 'phone': fields.char('Phone'), 'fax': fields.char('Fax'), 'mobile': fields.char('Mobile'), 'birthdate': fields.char('Birthdate'), 'is_company': fields.boolean('Is a Company', help="Check if the contact is a company, otherwise it is a person"), 'use_parent_address': fields.boolean('Use Company Address', help="Select this if you want to set company's address information for this contact"), # image: all image fields are base64 encoded and PIL-supported 'image': fields.binary("Image", help="This field holds the image used as avatar for this contact, limited to 1024x1024px"), 'image_medium': fields.function(_get_image, fnct_inv=_set_image, string="Medium-sized image", type="binary", multi="_get_image", store={ 'res.partner': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10), }, help="Medium-sized image of this contact. It is automatically "\ "resized as a 128x128px image, with aspect ratio preserved. "\ "Use this field in form views or some kanban views."), 'image_small': fields.function(_get_image, fnct_inv=_set_image, string="Small-sized image", type="binary", multi="_get_image", store={ 'res.partner': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10), }, help="Small-sized image of this contact. It is automatically "\ "resized as a 64x64px image, with aspect ratio preserved. "\ "Use this field anywhere a small image is required."), 'has_image': fields.function(_has_image, type="boolean"), 'company_id': fields.many2one('res.company', 'Company', select=1), 'color': fields.integer('Color Index'), 'user_ids': fields.one2many('res.users', 'partner_id', 'Users'), 'contact_address': fields.function(_address_display, type='char', string='Complete Address'), # technical field used for managing commercial fields 'commercial_partner_id': fields.function(_commercial_partner_id, type='many2one', relation='res.partner', string='Commercial Entity', store=_commercial_partner_store_triggers) } @api.model def _default_category(self): category_id = self.env.context.get('category_id', False) return [category_id] if category_id else False @api.model def _get_default_image(self, is_company, colorize=False): img_path = openerp.modules.get_module_resource( 'base', 'static/src/img', 'company_image.png' if is_company else 'avatar.png') with open(img_path, 'rb') as f: image = f.read() # colorize user avatars if not is_company: image = tools.image_colorize(image) return tools.image_resize_image_big(image.encode('base64')) def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False): if (not view_id) and (view_type=='form') and context and context.get('force_email', False): view_id = self.pool['ir.model.data'].get_object_reference(cr, user, 'base', 'view_partner_simple_form')[1] res = super(res_partner,self).fields_view_get(cr, user, view_id, view_type, context, toolbar=toolbar, submenu=submenu) if view_type == 'form': res['arch'] = self.fields_view_get_address(cr, user, res['arch'], context=context) return res @api.model def _default_company(self): return self.env['res.company']._company_default_get('res.partner') _defaults = { 'active': True, 'lang': api.model(lambda self: self.env.lang), 'tz': api.model(lambda self: self.env.context.get('tz', False)), 'customer': True, 'category_id': _default_category, 'company_id': _default_company, 'color': 0, 'is_company': False, 'type': 'contact', # type 'default' is wildcard and thus inappropriate 'use_parent_address': False, 'image': False, } _constraints = [ (osv.osv._check_recursion, 'You cannot create recursive Partner hierarchies.', ['parent_id']), ] @api.one def copy(self, default=None): default = dict(default or {}) default['name'] = _('%s (copy)') % self.name return super(res_partner, self).copy(default) @api.multi def onchange_type(self, is_company): value = {'title': False} if is_company: value['use_parent_address'] = False domain = {'title': [('domain', '=', 'partner')]} else: domain = {'title': [('domain', '=', 'contact')]} return {'value': value, 'domain': domain} def onchange_address(self, cr, uid, ids, use_parent_address, parent_id, context=None): def value_or_id(val): """ return val or val.id if val is a browse record """ return val if isinstance(val, (bool, int, long, float, basestring)) else val.id result = {} if parent_id: if ids: partner = self.browse(cr, uid, ids[0], context=context) if partner.parent_id and partner.parent_id.id != parent_id: result['warning'] = {'title': _('Warning'), 'message': _('Changing the company of a contact should only be done if it ' 'was never correctly set. If an existing contact starts working for a new ' 'company then a new contact should be created under that new ' 'company. You can use the "Discard" button to abandon this change.')} if use_parent_address: parent = self.browse(cr, uid, parent_id, context=context) address_fields = self._address_fields(cr, uid, context=context) result['value'] = dict((key, value_or_id(parent[key])) for key in address_fields) else: result['value'] = {'use_parent_address': False} return result @api.multi def onchange_state(self, state_id): if state_id: state = self.env['res.country.state'].browse(state_id) return {'value': {'country_id': state.country_id.id}} return {} def _check_ean_key(self, cr, uid, ids, context=None): for partner_o in self.pool['res.partner'].read(cr, uid, ids, ['ean13',]): thisean=partner_o['ean13'] if thisean and thisean!='': if len(thisean)!=13: return False sum=0 for i in range(12): if not (i % 2): sum+=int(thisean[i]) else: sum+=3*int(thisean[i]) if math.ceil(sum/10.0)*10-sum!=int(thisean[12]): return False return True # _constraints = [(_check_ean_key, 'Error: Invalid ean code', ['ean13'])] def _update_fields_values(self, cr, uid, partner, fields, context=None): """ Returns dict of write() values for synchronizing ``fields`` """ values = {} for field in fields: column = self._all_columns[field].column if column._type == 'one2many': raise AssertionError('One2Many fields cannot be synchronized as part of `commercial_fields` or `address fields`') if column._type == 'many2one': values[field] = partner[field].id if partner[field] else False elif column._type == 'many2many': values[field] = [(6,0,[r.id for r in partner[field] or []])] else: values[field] = partner[field] return values def _address_fields(self, cr, uid, context=None): """ Returns the list of address fields that are synced from the parent when the `use_parent_address` flag is set. """ return list(ADDRESS_FIELDS) def update_address(self, cr, uid, ids, vals, context=None): address_fields = self._address_fields(cr, uid, context=context) addr_vals = dict((key, vals[key]) for key in address_fields if key in vals) if addr_vals: return super(res_partner, self).write(cr, uid, ids, addr_vals, context) def _commercial_fields(self, cr, uid, context=None): """ Returns the list of fields that are managed by the commercial entity to which a partner belongs. These fields are meant to be hidden on partners that aren't `commercial entities` themselves, and will be delegated to the parent `commercial entity`. The list is meant to be extended by inheriting classes. """ return ['vat'] def _commercial_sync_from_company(self, cr, uid, partner, context=None): """ Handle sync of commercial fields when a new parent commercial entity is set, as if they were related fields """ commercial_partner = partner.commercial_partner_id if not commercial_partner: # On child partner creation of a parent partner, # the commercial_partner_id is not yet computed commercial_partner_id = self._commercial_partner_compute( cr, uid, [partner.id], 'commercial_partner_id', [], context=context)[partner.id] commercial_partner = self.browse(cr, uid, commercial_partner_id, context=context) if commercial_partner != partner: commercial_fields = self._commercial_fields(cr, uid, context=context) sync_vals = self._update_fields_values(cr, uid, commercial_partner, commercial_fields, context=context) partner.write(sync_vals) def _commercial_sync_to_children(self, cr, uid, partner, context=None): """ Handle sync of commercial fields to descendants """ commercial_fields = self._commercial_fields(cr, uid, context=context) commercial_partner = partner.commercial_partner_id if not commercial_partner: # On child partner creation of a parent partner, # the commercial_partner_id is not yet computed commercial_partner_id = self._commercial_partner_compute( cr, uid, [partner.id], 'commercial_partner_id', [], context=context)[partner.id] commercial_partner = self.browse(cr, uid, commercial_partner_id, context=context) sync_vals = self._update_fields_values(cr, uid, commercial_partner, commercial_fields, context=context) sync_children = [c for c in partner.child_ids if not c.is_company] for child in sync_children: self._commercial_sync_to_children(cr, uid, child, context=context) return self.write(cr, uid, [c.id for c in sync_children], sync_vals, context=context) def _fields_sync(self, cr, uid, partner, update_values, context=None): """ Sync commercial fields and address fields from company and to children after create/update, just as if those were all modeled as fields.related to the parent """ # 1. From UPSTREAM: sync from parent if update_values.get('parent_id') or update_values.get('use_parent_address'): # 1a. Commercial fields: sync if parent changed if update_values.get('parent_id'): self._commercial_sync_from_company(cr, uid, partner, context=context) # 1b. Address fields: sync if parent or use_parent changed *and* both are now set if partner.parent_id and partner.use_parent_address: onchange_vals = self.onchange_address(cr, uid, [partner.id], use_parent_address=partner.use_parent_address, parent_id=partner.parent_id.id, context=context).get('value', {}) partner.update_address(onchange_vals) # 2. To DOWNSTREAM: sync children if partner.child_ids: # 2a. Commercial Fields: sync if commercial entity if partner.commercial_partner_id == partner: commercial_fields = self._commercial_fields(cr, uid, context=context) if any(field in update_values for field in commercial_fields): self._commercial_sync_to_children(cr, uid, partner, context=context) # 2b. Address fields: sync if address changed address_fields = self._address_fields(cr, uid, context=context) if any(field in update_values for field in address_fields): domain_children = [('parent_id', '=', partner.id), ('use_parent_address', '=', True)] update_ids = self.search(cr, uid, domain_children, context=context) self.update_address(cr, uid, update_ids, update_values, context=context) def _handle_first_contact_creation(self, cr, uid, partner, context=None): """ On creation of first contact for a company (or root) that has no address, assume contact address was meant to be company address """ parent = partner.parent_id address_fields = self._address_fields(cr, uid, context=context) if parent and (parent.is_company or not parent.parent_id) and len(parent.child_ids) == 1 and \ any(partner[f] for f in address_fields) and not any(parent[f] for f in address_fields): addr_vals = self._update_fields_values(cr, uid, partner, address_fields, context=context) parent.update_address(addr_vals) if not parent.is_company: parent.write({'is_company': True}) def _clean_website(self, website): (scheme, netloc, path, params, query, fragment) = urlparse.urlparse(website) if not scheme: if not netloc: netloc, path = path, '' website = urlparse.urlunparse(('http', netloc, path, params, query, fragment)) return website @api.multi def write(self, vals): # res.partner must only allow to set the company_id of a partner if it # is the same as the company of all users that inherit from this partner # (this is to allow the code from res_users to write to the partner!) or # if setting the company_id to False (this is compatible with any user # company) if vals.get('website'): vals['website'] = self._clean_website(vals['website']) if vals.get('company_id'): company = self.env['res.company'].browse(vals['company_id']) for partner in self: if partner.user_ids: companies = set(user.company_id for user in partner.user_ids) if len(companies) > 1 or company not in companies: raise osv.except_osv(_("Warning"),_("You can not change the company as the partner/user has multiple user linked with different companies.")) result = super(res_partner, self).write(vals) for partner in self: self._fields_sync(partner, vals) return result @api.model def create(self, vals): if vals.get('website'): vals['website'] = self._clean_website(vals['website']) partner = super(res_partner, self).create(vals) self._fields_sync(partner, vals) self._handle_first_contact_creation(partner) return partner def open_commercial_entity(self, cr, uid, ids, context=None): """ Utility method used to add an "Open Company" button in partner views """ partner = self.browse(cr, uid, ids[0], context=context) return {'type': 'ir.actions.act_window', 'res_model': 'res.partner', 'view_mode': 'form', 'res_id': partner.commercial_partner_id.id, 'target': 'new', 'flags': {'form': {'action_buttons': True}}} def open_parent(self, cr, uid, ids, context=None): """ Utility method used to add an "Open Parent" button in partner views """ partner = self.browse(cr, uid, ids[0], context=context) return {'type': 'ir.actions.act_window', 'res_model': 'res.partner', 'view_mode': 'form', 'res_id': partner.parent_id.id, 'target': 'new', 'flags': {'form': {'action_buttons': True}}} def name_get(self, cr, uid, ids, context=None): if context is None: context = {} if isinstance(ids, (int, long)): ids = [ids] res = [] for record in self.browse(cr, uid, ids, context=context): name = record.name if record.parent_id and not record.is_company: name = "%s, %s" % (record.parent_id.name, name) if context.get('show_address_only'): name = self._display_address(cr, uid, record, without_company=True, context=context) if context.get('show_address'): name = name + "\n" + self._display_address(cr, uid, record, without_company=True, context=context) name = name.replace('\n\n','\n') name = name.replace('\n\n','\n') if context.get('show_email') and record.email: name = "%s <%s>" % (name, record.email) res.append((record.id, name)) return res def _parse_partner_name(self, text, context=None): """ Supported syntax: - 'Raoul <[email protected]>': will find name and email address - otherwise: default, everything is set as the name """ emails = tools.email_split(text.replace(' ',',')) if emails: email = emails[0] name = text[:text.index(email)].replace('"', '').replace('<', '').strip() else: name, email = text, '' return name, email def name_create(self, cr, uid, name, context=None): """ Override of orm's name_create method for partners. The purpose is to handle some basic formats to create partners using the name_create. If only an email address is received and that the regex cannot find a name, the name will have the email value. If 'force_email' key in context: must find the email address. """ if context is None: context = {} name, email = self._parse_partner_name(name, context=context) if context.get('force_email') and not email: raise osv.except_osv(_('Warning'), _("Couldn't create contact without email address!")) if not name and email: name = email rec_id = self.create(cr, uid, {self._rec_name: name or email, 'email': email or False}, context=context) return self.name_get(cr, uid, [rec_id], context)[0] def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None): """ Override search() to always show inactive children when searching via ``child_of`` operator. The ORM will always call search() with a simple domain of the form [('parent_id', 'in', [ids])]. """ # a special ``domain`` is set on the ``child_ids`` o2m to bypass this logic, as it uses similar domain expressions if len(args) == 1 and len(args[0]) == 3 and args[0][:2] == ('parent_id','in') \ and args[0][2] != [False]: context = dict(context or {}, active_test=False) return super(res_partner, self)._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count, access_rights_uid=access_rights_uid) def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100): if not args: args = [] if name and operator in ('=', 'ilike', '=ilike', 'like', '=like'): self.check_access_rights(cr, uid, 'read') where_query = self._where_calc(cr, uid, args, context=context) self._apply_ir_rules(cr, uid, where_query, 'read', context=context) from_clause, where_clause, where_clause_params = where_query.get_sql() where_str = where_clause and (" WHERE %s AND " % where_clause) or ' WHERE ' # search on the name of the contacts and of its company search_name = name if operator in ('ilike', 'like'): search_name = '%%%s%%' % name if operator in ('=ilike', '=like'): operator = operator[1:] unaccent = get_unaccent_wrapper(cr) query = """SELECT id FROM res_partner {where} ({email} {operator} {percent} OR {display_name} {operator} {percent}) ORDER BY {display_name} """.format(where=where_str, operator=operator, email=unaccent('email'), display_name=unaccent('display_name'), percent=unaccent('%s')) where_clause_params += [search_name, search_name] if limit: query += ' limit %s' where_clause_params.append(limit) cr.execute(query, where_clause_params) ids = map(lambda x: x[0], cr.fetchall()) if ids: return self.name_get(cr, uid, ids, context) else: return [] return super(res_partner,self).name_search(cr, uid, name, args, operator=operator, context=context, limit=limit) def find_or_create(self, cr, uid, email, context=None): """ Find a partner with the given ``email`` or use :py:method:`~.name_create` to create one :param str email: email-like string, which should contain at least one email, e.g. ``"Raoul Grosbedon <[email protected]>"``""" assert email, 'an email is required for find_or_create to work' emails = tools.email_split(email) if emails: email = emails[0] ids = self.search(cr, uid, [('email','ilike',email)], context=context) if not ids: return self.name_create(cr, uid, email, context=context)[0] return ids[0] def _email_send(self, cr, uid, ids, email_from, subject, body, on_error=None): partners = self.browse(cr, uid, ids) for partner in partners: if partner.email: tools.email_send(email_from, [partner.email], subject, body, on_error) return True def email_send(self, cr, uid, ids, email_from, subject, body, on_error=''): while len(ids): self.pool['ir.cron'].create(cr, uid, { 'name': 'Send Partner Emails', 'user_id': uid, 'model': 'res.partner', 'function': '_email_send', 'args': repr([ids[:16], email_from, subject, body, on_error]) }) ids = ids[16:] return True def address_get(self, cr, uid, ids, adr_pref=None, context=None): """ Find contacts/addresses of the right type(s) by doing a depth-first-search through descendants within company boundaries (stop at entities flagged ``is_company``) then continuing the search at the ancestors that are within the same company boundaries. Defaults to partners of type ``'default'`` when the exact type is not found, or to the provided partner itself if no type ``'default'`` is found either. """ adr_pref = set(adr_pref or []) if 'default' not in adr_pref: adr_pref.add('default') result = {} visited = set() for partner in self.browse(cr, uid, filter(None, ids), context=context): current_partner = partner while current_partner: to_scan = [current_partner] # Scan descendants, DFS while to_scan: record = to_scan.pop(0) visited.add(record) if record.type in adr_pref and not result.get(record.type): result[record.type] = record.id if len(result) == len(adr_pref): return result to_scan = [c for c in record.child_ids if c not in visited if not c.is_company] + to_scan # Continue scanning at ancestor if current_partner is not a commercial entity if current_partner.is_company or not current_partner.parent_id: break current_partner = current_partner.parent_id # default to type 'default' or the partner itself default = result.get('default', partner.id) for adr_type in adr_pref: result[adr_type] = result.get(adr_type) or default return result def view_header_get(self, cr, uid, view_id, view_type, context): res = super(res_partner, self).view_header_get(cr, uid, view_id, view_type, context) if res: return res if not context.get('category_id', False): return False return _('Partners: ')+self.pool['res.partner.category'].browse(cr, uid, context['category_id'], context).name @api.model @api.returns('self') def main_partner(self): ''' Return the main partner ''' return self.env.ref('base.main_partner') def _display_address(self, cr, uid, address, without_company=False, context=None): ''' The purpose of this function is to build and return an address formatted accordingly to the standards of the country where it belongs. :param address: browse record of the res.partner to format :returns: the address formatted in a display that fit its country habits (or the default ones if not country is specified) :rtype: string ''' # get the information that will be injected into the display format # get the address format address_format = address.country_id.address_format or \ "%(street)s\n%(street2)s\n%(city)s %(state_code)s %(zip)s\n%(country_name)s" args = { 'state_code': address.state_id.code or '', 'state_name': address.state_id.name or '', 'country_code': address.country_id.code or '', 'country_name': address.country_id.name or '', 'company_name': address.parent_id.name or '', } for field in self._address_fields(cr, uid, context=context): args[field] = getattr(address, field) or '' if without_company: args['company_name'] = '' elif address.parent_id: address_format = '%(company_name)s\n' + address_format return address_format % args # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
3,265,273,204,571,432,400
48.570896
186
0.584795
false
dgoedkoop/QGIS
python/plugins/processing/algs/gdal/proximity.py
2
8834
# -*- coding: utf-8 -*- """ *************************************************************************** proximity.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from qgis.PyQt.QtGui import QIcon from qgis.core import (QgsRasterFileWriter, QgsProcessingException, QgsProcessingParameterDefinition, QgsProcessingParameterRasterLayer, QgsProcessingParameterBand, QgsProcessingParameterEnum, QgsProcessingParameterString, QgsProcessingParameterNumber, QgsProcessingParameterRasterDestination) from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm from processing.algs.gdal.GdalUtils import GdalUtils from processing.tools.system import isWindows pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0] class proximity(GdalAlgorithm): INPUT = 'INPUT' BAND = 'BAND' VALUES = 'VALUES' MAX_DISTANCE = 'MAX_DISTANCE' REPLACE = 'REPLACE' UNITS = 'UNITS' NODATA = 'NODATA' OPTIONS = 'OPTIONS' DATA_TYPE = 'DATA_TYPE' OUTPUT = 'OUTPUT' TYPES = ['Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64', 'CInt16', 'CInt32', 'CFloat32', 'CFloat64'] def icon(self): return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'proximity.png')) def __init__(self): super().__init__() def initAlgorithm(self, config=None): self.distanceUnits = ((self.tr('Georeferenced coordinates'), 'GEO'), (self.tr('Pixel coordinates'), 'PIXEL')) self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT, self.tr('Input layer'))) self.addParameter(QgsProcessingParameterBand(self.BAND, self.tr('Band number'), parentLayerParameterName=self.INPUT)) self.addParameter(QgsProcessingParameterString(self.VALUES, self.tr('A list of pixel values in the source image to be considered target pixels'), optional=True)) self.addParameter(QgsProcessingParameterEnum(self.UNITS, self.tr('Distance units'), options=[i[0] for i in self.distanceUnits], allowMultiple=False, defaultValue=1)) self.addParameter(QgsProcessingParameterNumber(self.MAX_DISTANCE, self.tr('The maximum distance to be generated'), type=QgsProcessingParameterNumber.Double, minValue=0.0, defaultValue=0.0, optional=True)) self.addParameter(QgsProcessingParameterNumber(self.REPLACE, self.tr('Value to be applied to all pixels that are within the -maxdist of target pixels'), type=QgsProcessingParameterNumber.Double, defaultValue=0.0, optional=True)) self.addParameter(QgsProcessingParameterNumber(self.NODATA, self.tr('Nodata value to use for the destination proximity raster'), type=QgsProcessingParameterNumber.Double, defaultValue=0.0, optional=True)) options_param = QgsProcessingParameterString(self.OPTIONS, self.tr('Additional creation parameters'), defaultValue='', optional=True) options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced) options_param.setMetadata({ 'widget_wrapper': { 'class': 'processing.algs.gdal.ui.RasterOptionsWidget.RasterOptionsWidgetWrapper'}}) self.addParameter(options_param) self.addParameter(QgsProcessingParameterEnum(self.DATA_TYPE, self.tr('Output data type'), self.TYPES, allowMultiple=False, defaultValue=5)) self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT, self.tr('Proximity map'))) def name(self): return 'proximity' def displayName(self): return self.tr('Proximity (raster distance)') def group(self): return self.tr('Raster analysis') def groupId(self): return 'rasteranalysis' def commandName(self): return 'gdal_proximity' def getConsoleCommands(self, parameters, context, feedback, executing=True): inLayer = self.parameterAsRasterLayer(parameters, self.INPUT, context) if inLayer is None: raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT)) distance = self.parameterAsDouble(parameters, self.MAX_DISTANCE, context) replaceValue = self.parameterAsDouble(parameters, self.REPLACE, context) if self.NODATA in parameters and parameters[self.NODATA] is not None: nodata = self.parameterAsDouble(parameters, self.NODATA, context) else: nodata = None options = self.parameterAsString(parameters, self.OPTIONS, context) out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context) arguments = [] arguments.append('-srcband') arguments.append(str(self.parameterAsInt(parameters, self.BAND, context))) arguments.append('-distunits') arguments.append(self.distanceUnits[self.parameterAsEnum(parameters, self.UNITS, context)][1]) values = self.parameterAsString(parameters, self.VALUES, context) if values: arguments.append('-values') arguments.append(values) if distance: arguments.append('-maxdist') arguments.append(str(distance)) if nodata is not None: arguments.append('-nodata') arguments.append(str(nodata)) if replaceValue: arguments.append('-fixed-buf-val') arguments.append(str(replaceValue)) arguments.append('-ot') arguments.append(self.TYPES[self.parameterAsEnum(parameters, self.DATA_TYPE, context)]) arguments.append('-of') arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1])) if options: arguments.extend(GdalUtils.parseCreationOptions(options)) arguments.append(inLayer.source()) arguments.append(out) commands = [] if isWindows(): commands = ['cmd.exe', '/C ', self.commandName() + '.bat', GdalUtils.escapeAndJoin(arguments)] else: commands = [self.commandName() + '.py', GdalUtils.escapeAndJoin(arguments)] return commands
gpl-2.0
-5,947,998,920,742,397,000
44.071429
146
0.508603
false