repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
tjsavage/sfcsdatabase
|
django/contrib/gis/tests/test_geoip.py
|
290
|
4204
|
import os, unittest
from django.db import settings
from django.contrib.gis.geos import GEOSGeometry
from django.contrib.gis.utils import GeoIP, GeoIPException
# Note: Requires use of both the GeoIP country and city datasets.
# The GEOIP_DATA path should be the only setting set (the directory
# should contain links or the actual database files 'GeoIP.dat' and
# 'GeoLiteCity.dat'.
class GeoIPTest(unittest.TestCase):
def test01_init(self):
"Testing GeoIP initialization."
g1 = GeoIP() # Everything inferred from GeoIP path
path = settings.GEOIP_PATH
g2 = GeoIP(path, 0) # Passing in data path explicitly.
g3 = GeoIP.open(path, 0) # MaxMind Python API syntax.
for g in (g1, g2, g3):
self.assertEqual(True, bool(g._country))
self.assertEqual(True, bool(g._city))
# Only passing in the location of one database.
city = os.path.join(path, 'GeoLiteCity.dat')
cntry = os.path.join(path, 'GeoIP.dat')
g4 = GeoIP(city, country='')
self.assertEqual(None, g4._country)
g5 = GeoIP(cntry, city='')
self.assertEqual(None, g5._city)
# Improper parameters.
bad_params = (23, 'foo', 15.23)
for bad in bad_params:
self.assertRaises(GeoIPException, GeoIP, cache=bad)
if isinstance(bad, basestring):
e = GeoIPException
else:
e = TypeError
self.assertRaises(e, GeoIP, bad, 0)
def test02_bad_query(self):
"Testing GeoIP query parameter checking."
cntry_g = GeoIP(city='<foo>')
# No city database available, these calls should fail.
self.assertRaises(GeoIPException, cntry_g.city, 'google.com')
self.assertRaises(GeoIPException, cntry_g.coords, 'yahoo.com')
# Non-string query should raise TypeError
self.assertRaises(TypeError, cntry_g.country_code, 17)
self.assertRaises(TypeError, cntry_g.country_name, GeoIP)
def test03_country(self):
"Testing GeoIP country querying methods."
g = GeoIP(city='<foo>')
fqdn = 'www.google.com'
addr = '12.215.42.19'
for query in (fqdn, addr):
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code' : 'US', 'country_name' : 'United States'},
g.country(query))
def test04_city(self):
"Testing GeoIP city querying methods."
g = GeoIP(country='<foo>')
addr = '130.80.29.3'
fqdn = 'chron.com'
for query in (fqdn, addr):
# Country queries should still work.
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code' : 'US', 'country_name' : 'United States'},
g.country(query))
# City information dictionary.
d = g.city(query)
self.assertEqual('USA', d['country_code3'])
self.assertEqual('Houston', d['city'])
self.assertEqual('TX', d['region'])
self.assertEqual(713, d['area_code'])
geom = g.geos(query)
self.failIf(not isinstance(geom, GEOSGeometry))
lon, lat = (-95.3670, 29.7523)
lat_lon = g.lat_lon(query)
lat_lon = (lat_lon[1], lat_lon[0])
for tup in (geom.tuple, g.coords(query), g.lon_lat(query), lat_lon):
self.assertAlmostEqual(lon, tup[0], 4)
self.assertAlmostEqual(lat, tup[1], 4)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GeoIPTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
|
bsd-3-clause
|
dparshin/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/models/test_configuration.py
|
126
|
13672
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class TestConfiguration(object):
def __init__(self, version, architecture, build_type):
self.version = version
self.architecture = architecture
self.build_type = build_type
@classmethod
def category_order(cls):
"""The most common human-readable order in which the configuration properties are listed."""
return ['version', 'architecture', 'build_type']
def items(self):
return self.__dict__.items()
def keys(self):
return self.__dict__.keys()
def __str__(self):
return ("<%(version)s, %(architecture)s, %(build_type)s>" %
self.__dict__)
def __repr__(self):
return "TestConfig(version='%(version)s', architecture='%(architecture)s', build_type='%(build_type)s')" % self.__dict__
def __hash__(self):
return hash(self.version + self.architecture + self.build_type)
def __eq__(self, other):
return self.__hash__() == other.__hash__()
def values(self):
"""Returns the configuration values of this instance as a tuple."""
return self.__dict__.values()
class SpecifierSorter(object):
def __init__(self, all_test_configurations=None, macros=None):
self._specifier_to_category = {}
if not all_test_configurations:
return
for test_configuration in all_test_configurations:
for category, specifier in test_configuration.items():
self.add_specifier(category, specifier)
self.add_macros(macros)
def add_specifier(self, category, specifier):
self._specifier_to_category[specifier] = category
def add_macros(self, macros):
if not macros:
return
# Assume well-formed macros.
for macro, specifier_list in macros.items():
self.add_specifier(self.category_for_specifier(specifier_list[0]), macro)
@classmethod
def category_priority(cls, category):
return TestConfiguration.category_order().index(category)
def specifier_priority(self, specifier):
return self.category_priority(self._specifier_to_category[specifier])
def category_for_specifier(self, specifier):
return self._specifier_to_category.get(specifier)
def sort_specifiers(self, specifiers):
category_slots = map(lambda x: [], TestConfiguration.category_order())
for specifier in specifiers:
category_slots[self.specifier_priority(specifier)].append(specifier)
def sort_and_return(result, specifier_list):
specifier_list.sort()
return result + specifier_list
return reduce(sort_and_return, category_slots, [])
class TestConfigurationConverter(object):
def __init__(self, all_test_configurations, configuration_macros=None):
self._all_test_configurations = all_test_configurations
self._configuration_macros = configuration_macros or {}
self._specifier_to_configuration_set = {}
self._specifier_sorter = SpecifierSorter()
self._collapsing_sets_by_size = {}
self._junk_specifier_combinations = {}
self._collapsing_sets_by_category = {}
matching_sets_by_category = {}
for configuration in all_test_configurations:
for category, specifier in configuration.items():
self._specifier_to_configuration_set.setdefault(specifier, set()).add(configuration)
self._specifier_sorter.add_specifier(category, specifier)
self._collapsing_sets_by_category.setdefault(category, set()).add(specifier)
# FIXME: This seems extra-awful.
for cat2, spec2 in configuration.items():
if category == cat2:
continue
matching_sets_by_category.setdefault(specifier, {}).setdefault(cat2, set()).add(spec2)
for collapsing_set in self._collapsing_sets_by_category.values():
self._collapsing_sets_by_size.setdefault(len(collapsing_set), set()).add(frozenset(collapsing_set))
for specifier, sets_by_category in matching_sets_by_category.items():
for category, set_by_category in sets_by_category.items():
if len(set_by_category) == 1 and self._specifier_sorter.category_priority(category) > self._specifier_sorter.specifier_priority(specifier):
self._junk_specifier_combinations[specifier] = set_by_category
self._specifier_sorter.add_macros(configuration_macros)
def specifier_sorter(self):
return self._specifier_sorter
def _expand_macros(self, specifier):
expanded_specifiers = self._configuration_macros.get(specifier)
return expanded_specifiers or [specifier]
def to_config_set(self, specifier_set, error_list=None):
"""Convert a list of specifiers into a set of TestConfiguration instances."""
if len(specifier_set) == 0:
return self._all_test_configurations
matching_sets = {}
for specifier in specifier_set:
for expanded_specifier in self._expand_macros(specifier):
configurations = self._specifier_to_configuration_set.get(expanded_specifier)
if not configurations:
if error_list is not None:
error_list.append("Unrecognized modifier '" + expanded_specifier + "'")
return set()
category = self._specifier_sorter.category_for_specifier(expanded_specifier)
matching_sets.setdefault(category, set()).update(configurations)
return reduce(set.intersection, matching_sets.values())
@classmethod
def collapse_macros(cls, macros_dict, specifiers_list):
for macro_specifier, macro in macros_dict.items():
if len(macro) == 1:
continue
for combination in cls.combinations(specifiers_list, len(macro)):
if cls.symmetric_difference(combination) == set(macro):
for item in combination:
specifiers_list.remove(item)
new_specifier_set = cls.intersect_combination(combination)
new_specifier_set.add(macro_specifier)
specifiers_list.append(frozenset(new_specifier_set))
def collapse_individual_specifier_set(macro_specifier, macro):
specifiers_to_remove = []
specifiers_to_add = []
for specifier_set in specifiers_list:
macro_set = set(macro)
if macro_set.intersection(specifier_set) == macro_set:
specifiers_to_remove.append(specifier_set)
specifiers_to_add.append(frozenset((set(specifier_set) - macro_set) | set([macro_specifier])))
for specifier in specifiers_to_remove:
specifiers_list.remove(specifier)
for specifier in specifiers_to_add:
specifiers_list.append(specifier)
for macro_specifier, macro in macros_dict.items():
collapse_individual_specifier_set(macro_specifier, macro)
# FIXME: itertools.combinations in buggy in Python 2.6.1 (the version that ships on SL).
# It seems to be okay in 2.6.5 or later; until then, this is the implementation given
# in http://docs.python.org/library/itertools.html (from 2.7).
@staticmethod
def combinations(iterable, r):
# combinations('ABCD', 2) --> AB AC AD BC BD CD
# combinations(range(4), 3) --> 012 013 023 123
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = range(r)
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1 # pylint: disable=W0631
for j in range(i + 1, r): # pylint: disable=W0631
indices[j] = indices[j - 1] + 1
yield tuple(pool[i] for i in indices)
@classmethod
def intersect_combination(cls, combination):
return reduce(set.intersection, [set(specifiers) for specifiers in combination])
@classmethod
def symmetric_difference(cls, iterable):
union = set()
intersection = iterable[0]
for item in iterable:
union = union | item
intersection = intersection.intersection(item)
return union - intersection
def to_specifiers_list(self, test_configuration_set):
"""Convert a set of TestConfiguration instances into one or more list of specifiers."""
# Easy out: if the set is all configurations, the modifier is empty.
if len(test_configuration_set) == len(self._all_test_configurations):
return [[]]
# 1) Build a list of specifier sets, discarding specifiers that don't add value.
specifiers_list = []
for config in test_configuration_set:
values = set(config.values())
for specifier, junk_specifier_set in self._junk_specifier_combinations.items():
if specifier in values:
values -= junk_specifier_set
specifiers_list.append(frozenset(values))
def try_collapsing(size, collapsing_sets):
if len(specifiers_list) < size:
return False
for combination in self.combinations(specifiers_list, size):
if self.symmetric_difference(combination) in collapsing_sets:
for item in combination:
specifiers_list.remove(item)
specifiers_list.append(frozenset(self.intersect_combination(combination)))
return True
return False
# 2) Collapse specifier sets with common specifiers:
# (xp, release), (xp, debug) --> (xp, x86)
for size, collapsing_sets in self._collapsing_sets_by_size.items():
while try_collapsing(size, collapsing_sets):
pass
def try_abbreviating(collapsing_sets):
if len(specifiers_list) < 2:
return False
for combination in self.combinations(specifiers_list, 2):
for collapsing_set in collapsing_sets:
diff = self.symmetric_difference(combination)
if diff <= collapsing_set:
common = self.intersect_combination(combination)
for item in combination:
specifiers_list.remove(item)
specifiers_list.append(frozenset(common | diff))
return True
return False
# 3) Abbreviate specifier sets by combining specifiers across categories.
# (xp, release), (win7, release) --> (xp, win7, release)
while try_abbreviating(self._collapsing_sets_by_size.values()):
pass
# 4) Substitute specifier subsets that match macros witin each set:
# (xp, vista, win7, release) -> (win, release)
self.collapse_macros(self._configuration_macros, specifiers_list)
macro_keys = set(self._configuration_macros.keys())
# 5) Collapsing macros may have created combinations the can now be abbreviated.
# (xp, release), (linux, x86, release), (linux, x86_64, release) --> (xp, release), (linux, release) --> (xp, linux, release)
while try_abbreviating([self._collapsing_sets_by_category['version'] | macro_keys]):
pass
# 6) Remove cases where we have collapsed but have all macros.
# (android, win, mac, linux, release) --> (release)
specifiers_to_remove = []
for specifier_set in specifiers_list:
if macro_keys <= specifier_set:
specifiers_to_remove.append(specifier_set)
for specifier_set in specifiers_to_remove:
specifiers_list.remove(specifier_set)
specifiers_list.append(frozenset(specifier_set - macro_keys))
return specifiers_list
|
bsd-3-clause
|
saleemjaveds/https-github.com-openstack-nova
|
nova/tests/scheduler/fakes.py
|
19
|
11486
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fakes For Scheduler tests.
"""
import mox
from nova.compute import vm_states
from nova import db
from nova.openstack.common import jsonutils
from nova.scheduler import filter_scheduler
from nova.scheduler import host_manager
COMPUTE_NODES = [
dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
disk_available_least=None, free_ram_mb=512, vcpus_used=1,
free_disk_gb=512, local_gb_used=0, updated_at=None,
service=dict(host='host1', disabled=False),
hypervisor_hostname='node1', host_ip='127.0.0.1',
hypervisor_version=0),
dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
free_disk_gb=1024, local_gb_used=0, updated_at=None,
service=dict(host='host2', disabled=True),
hypervisor_hostname='node2', host_ip='127.0.0.1',
hypervisor_version=0),
dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
disk_available_least=3333, free_ram_mb=3072, vcpus_used=1,
free_disk_gb=3072, local_gb_used=0, updated_at=None,
service=dict(host='host3', disabled=False),
hypervisor_hostname='node3', host_ip='127.0.0.1',
hypervisor_version=0),
dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
free_disk_gb=8888, local_gb_used=0, updated_at=None,
service=dict(host='host4', disabled=False),
hypervisor_hostname='node4', host_ip='127.0.0.1',
hypervisor_version=0),
# Broken entry
dict(id=5, local_gb=1024, memory_mb=1024, vcpus=1, service=None),
]
COMPUTE_NODES_METRICS = [
dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
disk_available_least=512, free_ram_mb=512, vcpus_used=1,
free_disk_gb=512, local_gb_used=0, updated_at=None,
service=dict(host='host1', disabled=False),
hypervisor_hostname='node1', host_ip='127.0.0.1',
hypervisor_version=0,
metrics=jsonutils.dumps([{'name': 'foo',
'value': 512,
'timestamp': None,
'source': 'host1'
},
{'name': 'bar',
'value': 1.0,
'timestamp': None,
'source': 'host1'
},
])),
dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
free_disk_gb=1024, local_gb_used=0, updated_at=None,
service=dict(host='host2', disabled=True),
hypervisor_hostname='node2', host_ip='127.0.0.1',
hypervisor_version=0,
metrics=jsonutils.dumps([{'name': 'foo',
'value': 1024,
'timestamp': None,
'source': 'host2'
},
{'name': 'bar',
'value': 2.0,
'timestamp': None,
'source': 'host2'
},
])),
dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
disk_available_least=3072, free_ram_mb=3072, vcpus_used=1,
free_disk_gb=3072, local_gb_used=0, updated_at=None,
service=dict(host='host3', disabled=False),
hypervisor_hostname='node3', host_ip='127.0.0.1',
hypervisor_version=0,
metrics=jsonutils.dumps([{'name': 'foo',
'value': 3072,
'timestamp': None,
'source': 'host3'
},
{'name': 'bar',
'value': 1.0,
'timestamp': None,
'source': 'host3'
},
])),
dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
free_disk_gb=8192, local_gb_used=0, updated_at=None,
service=dict(host='host4', disabled=False),
hypervisor_hostname='node4', host_ip='127.0.0.1',
hypervisor_version=0,
metrics=jsonutils.dumps([{'name': 'foo',
'value': 8192,
'timestamp': None,
'source': 'host4'
},
{'name': 'bar',
'value': 0,
'timestamp': None,
'source': 'host4'
},
])),
dict(id=5, local_gb=768, memory_mb=768, vcpus=8,
disk_available_least=768, free_ram_mb=768, vcpus_used=0,
free_disk_gb=768, local_gb_used=0, updated_at=None,
service=dict(host='host5', disabled=False),
hypervisor_hostname='node5', host_ip='127.0.0.1',
hypervisor_version=0,
metrics=jsonutils.dumps([{'name': 'foo',
'value': 768,
'timestamp': None,
'source': 'host5'
},
{'name': 'bar',
'value': 0,
'timestamp': None,
'source': 'host5'
},
{'name': 'zot',
'value': 1,
'timestamp': None,
'source': 'host5'
},
])),
dict(id=6, local_gb=2048, memory_mb=2048, vcpus=8,
disk_available_least=2048, free_ram_mb=2048, vcpus_used=0,
free_disk_gb=2048, local_gb_used=0, updated_at=None,
service=dict(host='host6', disabled=False),
hypervisor_hostname='node6', host_ip='127.0.0.1',
hypervisor_version=0,
metrics=jsonutils.dumps([{'name': 'foo',
'value': 2048,
'timestamp': None,
'source': 'host6'
},
{'name': 'bar',
'value': 0,
'timestamp': None,
'source': 'host6'
},
{'name': 'zot',
'value': 2,
'timestamp': None,
'source': 'host6'
},
])),
]
INSTANCES = [
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
host='host1', node='node1'),
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
host='host2', node='node2'),
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
host='host2', node='node2'),
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
host='host3', node='node3'),
# Broken host
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
host=None),
# No matching host
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
host='host5', node='node5'),
]
class FakeFilterScheduler(filter_scheduler.FilterScheduler):
def __init__(self, *args, **kwargs):
super(FakeFilterScheduler, self).__init__(*args, **kwargs)
self.host_manager = host_manager.HostManager()
class FakeHostManager(host_manager.HostManager):
"""host1: free_ram_mb=1024-512-512=0, free_disk_gb=1024-512-512=0
host2: free_ram_mb=2048-512=1536 free_disk_gb=2048-512=1536
host3: free_ram_mb=4096-1024=3072 free_disk_gb=4096-1024=3072
host4: free_ram_mb=8192 free_disk_gb=8192
"""
def __init__(self):
super(FakeHostManager, self).__init__()
self.service_states = {
'host1': {
'compute': {'host_memory_free': 1073741824},
},
'host2': {
'compute': {'host_memory_free': 2147483648},
},
'host3': {
'compute': {'host_memory_free': 3221225472},
},
'host4': {
'compute': {'host_memory_free': 999999999},
},
}
class FakeHostState(host_manager.HostState):
def __init__(self, host, node, attribute_dict):
super(FakeHostState, self).__init__(host, node)
for (key, val) in attribute_dict.iteritems():
setattr(self, key, val)
class FakeInstance(object):
def __init__(self, context=None, params=None):
"""Create a test instance. Returns uuid."""
self.context = context
i = self._create_fake_instance(params=params)
self.uuid = i['uuid']
def _create_fake_instance(self, params=None):
"""Create a test instance."""
if not params:
params = {}
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['image_ref'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
inst['instance_type_id'] = 2
inst['ami_launch_index'] = 0
inst.update(params)
return db.instance_create(self.context, inst)
class FakeComputeAPI(object):
def create_db_entry_for_new_instance(self, *args, **kwargs):
pass
def mox_host_manager_db_calls(mock, context):
mock.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(mox.IgnoreArg()).AndReturn(COMPUTE_NODES)
|
apache-2.0
|
rajanandakumar/DIRAC
|
Core/Utilities/Distribution.py
|
3
|
16784
|
# $HeadURL$
__RCSID__ = "$Id$"
import urllib2, re, tarfile, os, types, sys, subprocess, urlparse, tempfile
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities import CFG, File, List
class Distribution:
cernAnonRoot = 'http://svn.cern.ch/guest/dirac'
googleAnonRoot = 'http://dirac-grid.googlecode.com/svn'
cernDevRoot = 'svn+ssh://svn.cern.ch/reps/dirac'
googleDevRoot = 'https://dirac-grid.googlecode.com/svn'
anonymousSVNRoot = { 'global' : cernAnonRoot,
'DIRAC' : cernAnonRoot,
'LHCbDIRAC' : cernAnonRoot,
'LHCbVMDIRAC' : cernAnonRoot,
'LHCbWebDIRAC' : cernAnonRoot,
'BelleDIRAC' : googleAnonRoot,
'MagicDIRAC' : googleAnonRoot,
'CTADIRAC' : googleAnonRoot,
'EELADIRAC' : googleAnonRoot,
'ILCDIRAC' : cernAnonRoot,
'Docs' : googleAnonRoot,
}
devSVNRoot = { 'global' : cernDevRoot,
'DIRAC' : cernDevRoot,
'LHCbDIRAC' : cernDevRoot,
'LHCbVMDIRAC' : cernDevRoot,
'LHCbWebDIRAC' : cernDevRoot,
'ILCDIRAC' : cernDevRoot,
'BelleDIRAC' : googleDevRoot,
'MagicDIRAC' : googleDevRoot,
'CTADIRAC' : googleDevRoot,
'EELADIRAC' : googleDevRoot,
'Docs' : googleDevRoot,
}
def __init__( self, package = False ):
if not package:
package = 'global'
if package not in Distribution.anonymousSVNRoot:
raise Exception( "Package %s does not have a registered svn root" % package )
self.package = package
self.svnRoot = Distribution.anonymousSVNRoot[ package ]
self.svnPass = False
self.svnUser = False
self.cmdQueue = []
def getSVNPathForPackage( self, package, path ):
if package not in self.anonymousSVNRoot:
return "%s/%s" % ( Distribution.cernAnonRoot, path )
return "%s/%s" % ( self.anonymousSVNRoot[ package ], path )
def getPackageName( self ):
return self.package
def getDevPath( self, path = False ):
devPath = Distribution.devSVNRoot[ self.package ]
if path:
devPath += "/%s" % path
return devPath
def setSVNPassword( self, password ):
self.svnPass = password
def setSVNUser( self, user ):
self.svnUser = user
def addCommandToQueue( self, cmd ):
self.cmdQueue.append( cmd )
def executeCommandQueue( self ):
while self.cmdQueue:
if not self.executeCommand( self.cmdQueue.pop( 0 ), getOutput = False ):
return False
return True
def emptyQueue( self ):
return len( self.cmdQueue ) == 0
def getRepositoryVersions( self ):
if self.package == 'global' :
webLocation = "%s/tags" % self.svnRoot
else:
webLocation = '%s/%s/tags/%s' % ( self.svnRoot, self.package, self.package )
try:
remoteFile = urllib2.urlopen( webLocation )
except urllib2.URLError:
gLogger.exception()
sys.exit( 2 )
remoteData = remoteFile.read()
remoteFile.close()
if not remoteData:
gLogger.error( "Could not retrieve versions for package %s" % self.package )
sys.exit( 1 )
versions = []
rePackage = ".*"
versionRE = re.compile( "<li> *<a *href=.*> *(%s)/ *</a> *</li>" % rePackage )
for line in remoteData.split( "\n" ):
res = versionRE.search( line )
if res:
versions.append( res.groups()[0] )
return versions
def getSVNFileContents( self, svnPath ):
gLogger.info( "Reading %s from %s" % ( svnPath, self.svnRoot) )
remoteLocation = "%s/%s" % ( self.svnRoot, svnPath )
try:
remoteFile = urllib2.urlopen( remoteLocation )
remoteData = remoteFile.read()
remoteFile.close()
if remoteData:
return remoteData
except Exception:
pass
#Web cat failed. Try directly with svn
exitStatus, remoteData = self.executeCommand( "svn cat '%s" % remoteLocation )
if exitStatus:
print "Error: Could not retrieve %s from the web nor via SVN. Aborting..." % svnPath
sys.exit( 1 )
return remoteData
def loadCFGFromRepository( self, svnPath ):
remoteData = self.getSVNFileContents( svnPath )
return CFG.CFG().loadFromBuffer( remoteData )
def getVersionsCFG( self ):
return self.loadCFGFromRepository( '%s/trunk/%s/versions.cfg' % ( self.package, self.package ) )
def executeCommand( self, cmd, getOutput = True ):
env = dict( os.environ )
if self.svnPass:
env[ 'SVN_PASSWORD' ] = self.svnPass
if not getOutput:
return subprocess.Popen( cmd, shell = True, env = env ).wait() == 0
#Get output
proc = subprocess.Popen( cmd,
shell = True, stdout = subprocess.PIPE,
stderr = subprocess.PIPE, close_fds = True, env = env )
stdData = proc.stdout.read()
proc.wait()
return ( proc.returncode, stdData )
def __getDevCmdBase( self, path ):
devRoot = self.getDevPath( path )
isHTTPS = False
urlRes = urlparse.urlparse( devRoot )
# Parse a URL into 6 components:
# <scheme>://<netloc>/<path>;<params>?<query>#<fragment>
# (scheme, netloc, path, params, query, fragment)
args = []
if urlRes[0] == "https":
isHTTPS = True
if self.svnUser:
if isHTTPS:
args.append( "--username '%s'" % self.svnUser )
else:
urlRes = list( urlparse.urlparse( devRoot ) )
urlRes[1] = "%s@%s" % ( self.svnUser, urlRes[1] )
devRoot = urlparse.urlunparse( urlRes )
if self.svnPass and isHTTPS:
args.append( "--password '%s'" % self.svnPass )
return ( " ".join( args ), devRoot )
def doLS( self, path ):
destT = self.__getDevCmdBase( path )
cmd = "svn ls %s %s" % destT
return self.executeCommand( cmd, True )
def __cmdImport( self, origin, dest, comment ):
destT = self.__getDevCmdBase( dest )
cmd = "svn import -m '%s' %s '%s' '%s'" % ( comment, destT[0], origin, destT[1] )
return cmd
def queueImport( self, origin, dest, comment ):
self.addCommandToQueue( self.__cmdImport( origin, dest, comment ) )
def doImport( self, origin, dest, comment ):
return self.executeCommand( self.__cmdImport( origin, dest, comment ), False )
def __cmdCopy( self, origin, dest, comment ):
destT = self.__getDevCmdBase( dest )
orT = self.__getDevCmdBase( origin )
cmd = "svn copy -m '%s' %s '%s' '%s'" % ( comment, destT[0], orT[1], destT[1] )
return cmd
def queueCopy( self, origin, dest, comment ):
self.addCommandToQueue( self.__cmdCopy( origin, dest, comment ) )
def __cmdMultiCopy( self, originList, dest, comment ):
orList = [ "'%s'" % self.__getDevCmdBase( orPath )[1] for orPath in originList ]
destT = self.__getDevCmdBase( dest )
cmd = "svn copy -m '%s' %s %s '%s'" % ( comment, destT[0], " ".join( orList ), destT[1] )
return cmd
def queueMultiCopy( self, originList, dest, comment ):
self.addCommandToQueue( self.__cmdMultiCopy( originList, dest, comment ) )
# def doCopy( self, path, comment ):
# return self.executeCommand( self.__cmdCopy( origin, dest, comment ), False )
def __cmdMakeDir( self, path, comment ):
destT = self.__getDevCmdBase( path )
return "svn mkdir --parents -m '%s' %s %s" % ( comment, destT[0], destT[1] )
def queueMakeDir( self, path, comment ):
self.addCommandToQueue( self.__cmdMakeDir( path, comment ) )
def doMakeDir( self, path, comment ):
return self.executeCommand( self.__cmdMakeDir( path, comment ), False )
def doCheckout( self, path, location ):
destT = self.__getDevCmdBase( path )
cmd = "svn co %s '%s' '%s'" % ( destT[0], destT[1], location )
return self.executeCommand( cmd, False )
def doCommit( self, location, comment ):
destT = self.__getDevCmdBase( "" )
cmd = "svn ci -m '%s' %s '%s'" % ( comment, destT[0], location )
return self.executeCommand( cmd, False )
#Get copy revision
def getCopyRevision( self, location ):
destT = self.__getDevCmdBase( location )
cmd = "svn log --stop-on-copy %s '%s'" % ( destT[0], destT[1] )
exitCode, outData = self.executeCommand( cmd )
if exitCode:
return 0
copyRev = 0
revRE = re.compile( "r([0-9]+)\s*\|\s*(\w+).*" )
for line in List.fromChar( outData, "\n" ):
reM = revRE.match( line )
if reM:
copyRev = reM.groups()[0]
return copyRev
#
def writeVersionToTmpInit( self, version ):
verTup = parseVersionString( version )
if not verTup:
return False
destT = self.__getDevCmdBase( "%s/trunk/%s/__init__.py" % ( self.package, self.package ) )
cmd = "svn cat %s '%s'" % ( destT[0], destT[1] )
exitCode, outData = self.executeCommand( cmd )
if exitCode:
return False
tmpfd, tmpname = tempfile.mkstemp()
versionStrings = ( "majorVersion", "minorVersion", "patchLevel", "preVersion" )
reList = []
for iP in range( len( versionStrings ) ):
if verTup[iP]:
replStr = "%s = %s" % ( versionStrings[iP], verTup[iP] )
else:
replStr = "%s = 0" % versionStrings[iP]
reList.append( ( re.compile( "^(%s\s*=)\s*[0-9]+\s*" % versionStrings[iP] ), replStr ) )
for line in outData.split( "\n" ):
for reCm, replStr in reList:
line = reCm.sub( replStr, line )
os.write( tmpfd, "%s\n" % line )
os.close( tmpfd )
return tmpname
#End of Distribution class
gVersionRE = re.compile( "v([0-9]+)(?:r([0-9]+))?(?:p([0-9]+))?(?:-pre([0-9]+))?" )
def parseVersionString( version ):
result = gVersionRE.match( version.strip() )
if not result:
return False
vN = []
for e in result.groups():
if e:
vN.append( int( e ) )
else:
vN.append( None )
return tuple( vN )
def writeVersionToInit( rootPath, version ):
verTup = parseVersionString( version )
if not verTup:
return S_OK()
initFile = os.path.join( rootPath, "__init__.py" )
if not os.path.isfile( initFile ):
return S_OK()
try:
fd = open( initFile, "r" )
fileData = fd.read()
fd.close()
except Exception, e:
return S_ERROR( "Could not open %s: %s" % ( initFile, str( e ) ) )
versionStrings = ( "majorVersion", "minorVersion", "patchLevel", "preVersion" )
reList = []
for iP in range( len( versionStrings ) ):
if verTup[iP]:
replStr = "%s = %s" % ( versionStrings[iP], verTup[iP] )
else:
replStr = "%s = 0" % versionStrings[iP]
reList.append( ( re.compile( "^(%s\s*=)\s*[0-9]+\s*" % versionStrings[iP] ), replStr ) )
newData = []
for line in fileData.split( "\n" ):
for reCm, replStr in reList:
line = reCm.sub( replStr, line )
newData.append( line )
try:
fd = open( initFile, "w" )
fd.write( "\n".join( newData ) )
fd.close()
except Exception, e:
return S_ERROR( "Could write to %s: %s" % ( initFile, str( e ) ) )
return S_OK()
#
def createTarball( tarballPath, directoryToTar, additionalDirectoriesToTar = None ):
tf = tarfile.open( tarballPath, "w:gz" )
tf.add( directoryToTar, os.path.basename( os.path.abspath( directoryToTar ) ), recursive = True )
if type( additionalDirectoriesToTar ) in ( types.StringType, types.UnicodeType ):
additionalDirectoriesToTar = [ additionalDirectoriesToTar ]
if additionalDirectoriesToTar:
for dirToTar in additionalDirectoriesToTar:
if os.path.isdir( dirToTar ):
tf.add( dirToTar, os.path.basename( os.path.abspath( dirToTar ) ), recursive = True )
tf.close()
md5FilePath = False
for suffix in ( ".tar.gz", ".gz" ):
sLen = len( suffix )
if tarballPath[ len( tarballPath ) - sLen: ] == suffix:
md5FilePath = "%s.md5" % tarballPath[:-sLen]
break
if not md5FilePath:
return S_ERROR( "Could not generate md5 filename" )
md5str = File.getMD5ForFiles( [ tarballPath ] )
fd = open( md5FilePath, "w" )
fd.write( md5str )
fd.close()
return S_OK()
#Start of release notes
gAllowedNoteTypes = ( "NEW", "CHANGE", "BUGFIX", 'FIX' )
gNoteTypeAlias = { 'FIX' : 'BUGFIX' }
def retrieveReleaseNotes( packages ):
if type( packages ) in ( types.StringType, types.UnicodeType ):
packages = [ str( packages ) ]
packageCFGDict = {}
#Get the versions.cfg
for package in packages:
packageCFGDict[ package ] = Distribution( package ).getVersionsCFG()
#Parse the release notes
pkgNotesDict = {}
for package in packageCFGDict:
versionsCFG = packageCFGDict[ package ][ 'Versions' ]
pkgNotesDict[ package ] = []
for mainVersion in versionsCFG.listSections( ordered = True ):
vCFG = versionsCFG[ mainVersion ]
versionNotes = {}
for subsys in vCFG.listOptions():
comment = vCFG.getComment( subsys )
if not comment:
continue
versionNotes[ subsys ] = {}
lines = List.fromChar( comment, "\n" )
lastCommentType = False
for line in lines:
processedLine = False
for typeComment in gAllowedNoteTypes:
if line.find( "%s:" % typeComment ) == 0:
if typeComment in gNoteTypeAlias:
effectiveType = gNoteTypeAlias[ typeComment ]
else:
effectiveType = typeComment
if effectiveType not in versionNotes[ subsys ]:
versionNotes[ subsys ][ effectiveType ] = []
versionNotes[ subsys ][ effectiveType ].append( line[ len( typeComment ) + 1: ].strip() )
lastCommentType = effectiveType
processedLine = True
if not processedLine and lastCommentType:
versionNotes[ subsys ][ effectiveType ][-1] += " %s" % line.strip()
if versionNotes:
pkgNotesDict[ package ].append( { 'version' : mainVersion, 'notes' : versionNotes } )
versionComment = versionsCFG.getComment( mainVersion )
if versionComment:
pkgNotesDict[ package ][-1][ 'comment' ] = "\n".join( [ l.strip() for l in versionComment.split( "\n" ) ] )
return pkgNotesDict
def generateReleaseNotes( packages, destinationPath, versionReleased = "", singleVersion = False ):
if type( packages ) in ( types.StringType, types.UnicodeType ):
packages = [ str( packages ) ]
pkgNotesDict = retrieveReleaseNotes( packages )
fileContents = []
foundStartVersion = versionReleased == ""
for package in packages:
if package not in pkgNotesDict:
continue
#Add a section with the package name
dummy = "Package %s" % package
fileContents.append( "-" * len( dummy ) )
fileContents.append( dummy )
fileContents.append( "-" * len( dummy ) )
vNotesDict = pkgNotesDict[ package ]
for versionNotes in vNotesDict:
if singleVersion and versionReleased and versionNotes[ 'version' ] != versionReleased:
continue
if versionReleased and versionReleased == versionNotes[ 'version' ]:
foundStartVersion = True
#Skip until found initial version
if not foundStartVersion:
continue
dummy = "Version %s" % versionNotes[ 'version' ]
fileContents.append( "" )
fileContents.append( dummy )
fileContents.append( "-" * len( dummy ) )
if 'comment' in versionNotes:
fileContents.extend( [ '', versionNotes[ 'comment' ], '' ] )
for noteType in gAllowedNoteTypes:
notes4Type = []
for system in versionNotes[ 'notes' ]:
if noteType in versionNotes[ 'notes' ][ system ] and versionNotes[ 'notes' ][ system ][ noteType ]:
notes4Type.append( " %s" % system )
for line in versionNotes[ 'notes' ][ system ][ noteType ]:
notes4Type.append( " - %s" % line )
if notes4Type:
fileContents.append( "" )
fileContents.append( "%s" % noteType )
fileContents.append( ":" * len( noteType ) )
fileContents.append( "" )
fileContents.extend( notes4Type )
fd = open( destinationPath, "w" )
fd.write( "%s\n\n" % "\n".join( fileContents ) )
fd.close()
def generateHTMLReleaseNotesFromRST( rstFile, htmlFile ):
try:
import docutils.core
except ImportError:
gLogger.error( "Docutils is not installed, skipping generation of release notes in html format" )
return False
try:
fd = open( rstFile )
rstData = fd.read()
fd.close()
except Exception:
gLogger.error( "Oops! Could not read the rst file :P" )
return False
parts = docutils.core.publish_parts( rstData, writer_name = 'html' )
try:
fd = open( htmlFile, "w" )
fd.write( parts[ 'whole' ] )
fd.close()
except Exception:
gLogger.error( "Oops! Could not write the html file :P" )
return False
return True
|
gpl-3.0
|
gurneyalex/vertical-travel
|
railway_station/res_partner.py
|
2
|
1263
|
# -*- encoding: utf-8 -*-
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from openerp.osv import orm, fields
class res_partner(orm.Model):
"""
Inherits partner and adds airport and iata_code fields in the partner
form
"""
_inherit = 'res.partner'
_columns = {
'railway_station': fields.boolean('Railway Station'),
}
_defaults = {
'railway_station': 0,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
JianfengXu/crosswalk-test-suite
|
webapi/tct-navigationtiming-w3c-tests/inst.wgt.py
|
44
|
6786
|
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user)
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex + 1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".wgt"):
if not doRemoteCopy(
os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
for item in glob.glob("%s/*" % SCRIPT_DIR):
if item.endswith(".wgt"):
continue
elif item.endswith("inst.py"):
continue
else:
item_name = os.path.basename(item)
if not doRemoteCopy(item, "%s/%s" % (PKG_SRC_DIR, item_name)):
# if not doRemoteCopy(item, PKG_SRC_DIR):
action_status = False
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0:
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket" % str(
userid)
else:
print "[Error] cmd commands error : %s" % str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
bsd-3-clause
|
cubledesarrollo/cubledotes
|
cuble/static/vendor/bootstrap/test-infra/s3_cache.py
|
1700
|
3523
|
#!/usr/bin/env python2.7
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, remove as _delete_file
from os.path import isfile, dirname, basename, abspath
from hashlib import sha256
from subprocess import check_call as run
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
NEED_TO_UPLOAD_MARKER = '.need-to-upload'
BYTES_PER_MB = 1024 * 1024
try:
BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
def _sha256_of_file(filename):
hasher = sha256()
with open(filename, 'rb') as input_file:
hasher.update(input_file.read())
file_hash = hasher.hexdigest()
print('sha256({}) = {}'.format(filename, file_hash))
return file_hash
def _delete_file_quietly(filename):
try:
_delete_file(filename)
except (OSError, IOError):
pass
def _tarball_size(directory):
kib = stat(_tarball_filename_for(directory)).st_size // BYTES_PER_MB
return "{} MiB".format(kib)
def _tarball_filename_for(directory):
return abspath('./{}.tar.gz'.format(basename(directory)))
def _create_tarball(directory):
print("Creating tarball of {}...".format(directory))
run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
def _extract_tarball(directory):
print("Extracting tarball of {}...".format(directory))
run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
try:
print("Downloading {} tarball from S3...".format(friendly_name))
key.get_contents_to_filename(_tarball_filename_for(directory))
except S3ResponseError as err:
open(NEED_TO_UPLOAD_MARKER, 'a').close()
print(err)
raise SystemExit("Cached {} download failed!".format(friendly_name))
print("Downloaded {}.".format(_tarball_size(directory)))
_extract_tarball(directory)
print("{} successfully installed from cache.".format(friendly_name))
def upload(directory):
_create_tarball(directory)
print("Uploading {} tarball to S3... ({})".format(friendly_name, _tarball_size(directory)))
key.set_contents_from_filename(_tarball_filename_for(directory))
print("{} cache successfully updated.".format(friendly_name))
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
if __name__ == '__main__':
# Uses environment variables:
# AWS_ACCESS_KEY_ID -- AWS Access Key ID
# AWS_SECRET_ACCESS_KEY -- AWS Secret Access Key
argv.pop(0)
if len(argv) != 4:
raise SystemExit("USAGE: s3_cache.py <download | upload> <friendly name> <dependencies file> <directory>")
mode, friendly_name, dependencies_file, directory = argv
conn = S3Connection()
bucket = conn.lookup(BUCKET_NAME, validate=False)
if bucket is None:
raise SystemExit("Could not access bucket!")
dependencies_file_hash = _sha256_of_file(dependencies_file)
key = Key(bucket, dependencies_file_hash)
key.storage_class = 'REDUCED_REDUNDANCY'
if mode == 'download':
download(directory)
elif mode == 'upload':
if isfile(NEED_TO_UPLOAD_MARKER): # FIXME
upload(directory)
else:
print("No need to upload anything.")
else:
raise SystemExit("Unrecognized mode {!r}".format(mode))
|
mit
|
ProfessorKaos64/openlierox
|
tools/DedicatedServerVideo/gdata/books/__init__.py
|
124
|
18532
|
#!/usr/bin/python
"""
Data Models for books.service
All classes can be instantiated from an xml string using their FromString
class method.
Notes:
* Book.title displays the first dc:title because the returned XML
repeats that datum as atom:title.
There is an undocumented gbs:openAccess element that is not parsed.
"""
__author__ = "James Sams <[email protected]>"
__copyright__ = "Apache License v2.0"
import atom
import gdata
BOOK_SEARCH_NAMESPACE = 'http://schemas.google.com/books/2008'
DC_NAMESPACE = 'http://purl.org/dc/terms'
ANNOTATION_REL = "http://schemas.google.com/books/2008/annotation"
INFO_REL = "http://schemas.google.com/books/2008/info"
LABEL_SCHEME = "http://schemas.google.com/books/2008/labels"
PREVIEW_REL = "http://schemas.google.com/books/2008/preview"
THUMBNAIL_REL = "http://schemas.google.com/books/2008/thumbnail"
FULL_VIEW = "http://schemas.google.com/books/2008#view_all_pages"
PARTIAL_VIEW = "http://schemas.google.com/books/2008#view_partial"
NO_VIEW = "http://schemas.google.com/books/2008#view_no_pages"
UNKNOWN_VIEW = "http://schemas.google.com/books/2008#view_unknown"
EMBEDDABLE = "http://schemas.google.com/books/2008#embeddable"
NOT_EMBEDDABLE = "http://schemas.google.com/books/2008#not_embeddable"
class _AtomFromString(atom.AtomBase):
#@classmethod
def FromString(cls, s):
return atom.CreateClassFromXMLString(cls, s)
FromString = classmethod(FromString)
class Creator(_AtomFromString):
"""
The <dc:creator> element identifies an author-or more generally, an entity
responsible for creating the volume in question. Examples of a creator
include a person, an organization, or a service. In the case of
anthologies, proceedings, or other edited works, this field may be used to
indicate editors or other entities responsible for collecting the volume's
contents.
This element appears as a child of <entry>. If there are multiple authors or
contributors to the book, there may be multiple <dc:creator> elements in the
volume entry (one for each creator or contributor).
"""
_tag = 'creator'
_namespace = DC_NAMESPACE
class Date(_AtomFromString): #iso 8601 / W3CDTF profile
"""
The <dc:date> element indicates the publication date of the specific volume
in question. If the book is a reprint, this is the reprint date, not the
original publication date. The date is encoded according to the ISO-8601
standard (and more specifically, the W3CDTF profile).
The <dc:date> element can appear only as a child of <entry>.
Usually only the year or the year and the month are given.
YYYY-MM-DDThh:mm:ssTZD TZD = -hh:mm or +hh:mm
"""
_tag = 'date'
_namespace = DC_NAMESPACE
class Description(_AtomFromString):
"""
The <dc:description> element includes text that describes a book or book
result. In a search result feed, this may be a search result "snippet" that
contains the words around the user's search term. For a single volume feed,
this element may contain a synopsis of the book.
The <dc:description> element can appear only as a child of <entry>
"""
_tag = 'description'
_namespace = DC_NAMESPACE
class Format(_AtomFromString):
"""
The <dc:format> element describes the physical properties of the volume.
Currently, it indicates the number of pages in the book, but more
information may be added to this field in the future.
This element can appear only as a child of <entry>.
"""
_tag = 'format'
_namespace = DC_NAMESPACE
class Identifier(_AtomFromString):
"""
The <dc:identifier> element provides an unambiguous reference to a
particular book.
* Every <entry> contains at least one <dc:identifier> child.
* The first identifier is always the unique string Book Search has assigned
to the volume (such as s1gVAAAAYAAJ). This is the ID that appears in the
book's URL in the Book Search GUI, as well as in the URL of that book's
single item feed.
* Many books contain additional <dc:identifier> elements. These provide
alternate, external identifiers to the volume. Such identifiers may
include the ISBNs, ISSNs, Library of Congress Control Numbers (LCCNs),
and OCLC numbers; they are prepended with a corresponding namespace
prefix (such as "ISBN:").
* Any <dc:identifier> can be passed to the Dynamic Links, used to
instantiate an Embedded Viewer, or even used to construct static links to
Book Search.
The <dc:identifier> element can appear only as a child of <entry>.
"""
_tag = 'identifier'
_namespace = DC_NAMESPACE
class Publisher(_AtomFromString):
"""
The <dc:publisher> element contains the name of the entity responsible for
producing and distributing the volume (usually the specific edition of this
book). Examples of a publisher include a person, an organization, or a
service.
This element can appear only as a child of <entry>. If there is more than
one publisher, multiple <dc:publisher> elements may appear.
"""
_tag = 'publisher'
_namespace = DC_NAMESPACE
class Subject(_AtomFromString):
"""
The <dc:subject> element identifies the topic of the book. Usually this is
a Library of Congress Subject Heading (LCSH) or Book Industry Standards
and Communications Subject Heading (BISAC).
The <dc:subject> element can appear only as a child of <entry>. There may
be multiple <dc:subject> elements per entry.
"""
_tag = 'subject'
_namespace = DC_NAMESPACE
class Title(_AtomFromString):
"""
The <dc:title> element contains the title of a book as it was published. If
a book has a subtitle, it appears as a second <dc:title> element in the book
result's <entry>.
"""
_tag = 'title'
_namespace = DC_NAMESPACE
class Viewability(_AtomFromString):
"""
Google Book Search respects the user's local copyright restrictions. As a
result, previews or full views of some books are not available in all
locations. The <gbs:viewability> element indicates whether a book is fully
viewable, can be previewed, or only has "about the book" information. These
three "viewability modes" are the same ones returned by the Dynamic Links
API.
The <gbs:viewability> element can appear only as a child of <entry>.
The value attribute will take the form of the following URIs to represent
the relevant viewing capability:
Full View: http://schemas.google.com/books/2008#view_all_pages
Limited Preview: http://schemas.google.com/books/2008#view_partial
Snippet View/No Preview: http://schemas.google.com/books/2008#view_no_pages
Unknown view: http://schemas.google.com/books/2008#view_unknown
"""
_tag = 'viewability'
_namespace = BOOK_SEARCH_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, value=None, text=None,
extension_elements=None, extension_attributes=None):
self.value = value
_AtomFromString.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
class Embeddability(_AtomFromString):
"""
Many of the books found on Google Book Search can be embedded on third-party
sites using the Embedded Viewer. The <gbs:embeddability> element indicates
whether a particular book result is available for embedding. By definition,
a book that cannot be previewed on Book Search cannot be embedded on third-
party sites.
The <gbs:embeddability> element can appear only as a child of <entry>.
The value attribute will take on one of the following URIs:
embeddable: http://schemas.google.com/books/2008#embeddable
not embeddable: http://schemas.google.com/books/2008#not_embeddable
"""
_tag = 'embeddability'
_namespace = BOOK_SEARCH_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, value=None, text=None, extension_elements=None,
extension_attributes=None):
self.value = value
_AtomFromString.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
class Review(_AtomFromString):
"""
When present, the <gbs:review> element contains a user-generated review for
a given book. This element currently appears only in the user library and
user annotation feeds, as a child of <entry>.
type: text, html, xhtml
xml:lang: id of the language, a guess, (always two letters?)
"""
_tag = 'review'
_namespace = BOOK_SEARCH_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['type'] = 'type'
_attributes['{http://www.w3.org/XML/1998/namespace}lang'] = 'lang'
def __init__(self, type=None, lang=None, text=None,
extension_elements=None, extension_attributes=None):
self.type = type
self.lang = lang
_AtomFromString.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
class Rating(_AtomFromString):
"""All attributes must take an integral string between 1 and 5.
The min, max, and average attributes represent 'community' ratings. The
value attribute is the user's (of the feed from which the item is fetched,
not necessarily the authenticated user) rating of the book.
"""
_tag = 'rating'
_namespace = gdata.GDATA_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['min'] = 'min'
_attributes['max'] = 'max'
_attributes['average'] = 'average'
_attributes['value'] = 'value'
def __init__(self, min=None, max=None, average=None, value=None, text=None,
extension_elements=None, extension_attributes=None):
self.min = min
self.max = max
self.average = average
self.value = value
_AtomFromString.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
class Book(_AtomFromString, gdata.GDataEntry):
"""
Represents an <entry> from either a search, annotation, library, or single
item feed. Note that dc_title attribute is the proper title of the volume,
title is an atom element and may not represent the full title.
"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
for i in (Creator, Identifier, Publisher, Subject,):
_children['{%s}%s' % (i._namespace, i._tag)] = (i._tag, [i])
for i in (Date, Description, Format, Viewability, Embeddability,
Review, Rating): # Review, Rating maybe only in anno/lib entrys
_children['{%s}%s' % (i._namespace, i._tag)] = (i._tag, i)
# there is an atom title as well, should we clobber that?
del(i)
_children['{%s}%s' % (Title._namespace, Title._tag)] = ('dc_title', [Title])
def to_dict(self):
"""Returns a dictionary of the book's available metadata. If the data
cannot be discovered, it is not included as a key in the returned dict.
The possible keys are: authors, embeddability, date, description,
format, identifiers, publishers, rating, review, subjects, title, and
viewability.
Notes:
* Plural keys will be lists
* Singular keys will be strings
* Title, despite usually being a list, joins the title and subtitle
with a space as a single string.
* embeddability and viewability only return the portion of the URI
after #
* identifiers is a list of tuples, where the first item of each tuple
is the type of identifier and the second item is the identifying
string. Note that while doing dict() on this tuple may be possible,
some items may have multiple of the same identifier and converting
to a dict may resulted in collisions/dropped data.
* Rating returns only the user's rating. See Rating class for precise
definition.
"""
d = {}
if self.GetAnnotationLink():
d['annotation'] = self.GetAnnotationLink().href
if self.creator:
d['authors'] = [x.text for x in self.creator]
if self.embeddability:
d['embeddability'] = self.embeddability.value.split('#')[-1]
if self.date:
d['date'] = self.date.text
if self.description:
d['description'] = self.description.text
if self.format:
d['format'] = self.format.text
if self.identifier:
d['identifiers'] = [('google_id', self.identifier[0].text)]
for x in self.identifier[1:]:
l = x.text.split(':') # should we lower the case of the ids?
d['identifiers'].append((l[0], ':'.join(l[1:])))
if self.GetInfoLink():
d['info'] = self.GetInfoLink().href
if self.GetPreviewLink():
d['preview'] = self.GetPreviewLink().href
if self.publisher:
d['publishers'] = [x.text for x in self.publisher]
if self.rating:
d['rating'] = self.rating.value
if self.review:
d['review'] = self.review.text
if self.subject:
d['subjects'] = [x.text for x in self.subject]
if self.GetThumbnailLink():
d['thumbnail'] = self.GetThumbnailLink().href
if self.dc_title:
d['title'] = ' '.join([x.text for x in self.dc_title])
if self.viewability:
d['viewability'] = self.viewability.value.split('#')[-1]
return d
def __init__(self, creator=None, date=None,
description=None, format=None, author=None, identifier=None,
publisher=None, subject=None, dc_title=None, viewability=None,
embeddability=None, review=None, rating=None, category=None,
content=None, contributor=None, atom_id=None, link=None,
published=None, rights=None, source=None, summary=None,
title=None, control=None, updated=None, text=None,
extension_elements=None, extension_attributes=None):
self.creator = creator
self.date = date
self.description = description
self.format = format
self.identifier = identifier
self.publisher = publisher
self.subject = subject
self.dc_title = dc_title or []
self.viewability = viewability
self.embeddability = embeddability
self.review = review
self.rating = rating
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content, contributor=contributor, atom_id=atom_id,
link=link, published=published, rights=rights, source=source,
summary=summary, title=title, control=control, updated=updated,
text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
def GetThumbnailLink(self):
"""Returns the atom.Link object representing the thumbnail URI."""
for i in self.link:
if i.rel == THUMBNAIL_REL:
return i
def GetInfoLink(self):
"""
Returns the atom.Link object representing the human-readable info URI.
"""
for i in self.link:
if i.rel == INFO_REL:
return i
def GetPreviewLink(self):
"""Returns the atom.Link object representing the preview URI."""
for i in self.link:
if i.rel == PREVIEW_REL:
return i
def GetAnnotationLink(self):
"""
Returns the atom.Link object representing the Annotation URI.
Note that the use of www.books in the href of this link seems to make
this information useless. Using books.service.ANNOTATION_FEED and
BOOK_SERVER to construct your URI seems to work better.
"""
for i in self.link:
if i.rel == ANNOTATION_REL:
return i
def set_rating(self, value):
"""Set user's rating. Must be an integral string between 1 nad 5"""
assert (value in ('1','2','3','4','5'))
if not isinstance(self.rating, Rating):
self.rating = Rating()
self.rating.value = value
def set_review(self, text, type='text', lang='en'):
"""Set user's review text"""
self.review = Review(text=text, type=type, lang=lang)
def get_label(self):
"""Get users label for the item as a string"""
for i in self.category:
if i.scheme == LABEL_SCHEME:
return i.term
def set_label(self, term):
"""Clear pre-existing label for the item and set term as the label."""
self.remove_label()
self.category.append(atom.Category(term=term, scheme=LABEL_SCHEME))
def remove_label(self):
"""Clear the user's label for the item"""
ln = len(self.category)
for i, j in enumerate(self.category[::-1]):
if j.scheme == LABEL_SCHEME:
del(self.category[ln-1-i])
def clean_annotations(self):
"""Clear all annotations from an item. Useful for taking an item from
another user's library/annotation feed and adding it to the
authenticated user's library without adopting annotations."""
self.remove_label()
self.review = None
self.rating = None
def get_google_id(self):
"""Get Google's ID of the item."""
return self.id.text.split('/')[-1]
class BookFeed(_AtomFromString, gdata.GDataFeed):
"""Represents a feed of entries from a search."""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_children['{%s}%s' % (Book._namespace, Book._tag)] = (Book._tag, [Book])
if __name__ == '__main__':
import doctest
doctest.testfile('datamodels.txt')
|
lgpl-2.1
|
hgl888/chromium-crosswalk-efl
|
net/tools/testserver/minica.py
|
78
|
10487
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import asn1
import hashlib
import os
# This file implements very minimal certificate and OCSP generation. It's
# designed to test revocation checking.
def RandomNumber(length_in_bytes):
'''RandomNumber returns a random number of length 8*|length_in_bytes| bits'''
rand = os.urandom(length_in_bytes)
n = 0
for x in rand:
n <<= 8
n |= ord(x)
return n
def ModExp(n, e, p):
'''ModExp returns n^e mod p'''
r = 1
while e != 0:
if e & 1:
r = (r*n) % p
e >>= 1
n = (n*n) % p
return r
# PKCS1v15_SHA256_PREFIX is the ASN.1 prefix for a SHA256 signature.
PKCS1v15_SHA256_PREFIX = '3031300d060960864801650304020105000420'.decode('hex')
class RSA(object):
def __init__(self, modulus, e, d):
self.m = modulus
self.e = e
self.d = d
self.modlen = 0
m = modulus
while m != 0:
self.modlen += 1
m >>= 8
def Sign(self, message):
digest = hashlib.sha256(message).digest()
prefix = PKCS1v15_SHA256_PREFIX
em = ['\xff'] * (self.modlen - 1 - len(prefix) - len(digest))
em[0] = '\x00'
em[1] = '\x01'
em += "\x00" + prefix + digest
n = 0
for x in em:
n <<= 8
n |= ord(x)
s = ModExp(n, self.d, self.m)
out = []
while s != 0:
out.append(s & 0xff)
s >>= 8
out.reverse()
return '\x00' * (self.modlen - len(out)) + asn1.ToBytes(out)
def ToDER(self):
return asn1.ToDER(asn1.SEQUENCE([self.m, self.e]))
def Name(cn = None, c = None, o = None):
names = asn1.SEQUENCE([])
if cn is not None:
names.children.append(
asn1.SET([
asn1.SEQUENCE([
COMMON_NAME, cn,
])
])
)
if c is not None:
names.children.append(
asn1.SET([
asn1.SEQUENCE([
COUNTRY, c,
])
])
)
if o is not None:
names.children.append(
asn1.SET([
asn1.SEQUENCE([
ORGANIZATION, o,
])
])
)
return names
# The private key and root certificate name are hard coded here:
# This is the private key
KEY = RSA(0x00a71998f2930bfe73d031a87f133d2f378eeeeed52a77e44d0fc9ff6f07ff32cbf3da999de4ed65832afcb0807f98787506539d258a0ce3c2c77967653099a9034a9b115a876c39a8c4e4ed4acd0c64095946fb39eeeb47a0704dbb018acf48c3a1c4b895fc409fb4a340a986b1afc45519ab9eca47c30185c771c64aa5ecf07d,
3,
0x6f6665f70cb2a9a28acbc5aa0cd374cfb49f49e371a542de0a86aa4a0554cc87f7e71113edf399021ca875aaffbafaf8aee268c3b15ded2c84fb9a4375bbc6011d841e57833bc6f998d25daf6fa7f166b233e3e54a4bae7a5aaaba21431324967d5ff3e1d4f413827994262115ca54396e7068d0afa7af787a5782bc7040e6d3)
# And the same thing in PEM format
KEY_PEM = '''-----BEGIN RSA PRIVATE KEY-----
MIICXAIBAAKBgQCnGZjykwv+c9AxqH8TPS83ju7u1Sp35E0Pyf9vB/8yy/PamZ3k
7WWDKvywgH+YeHUGU50ligzjwsd5Z2UwmakDSpsRWodsOajE5O1KzQxkCVlG+znu
60egcE27AYrPSMOhxLiV/ECftKNAqYaxr8RVGaueykfDAYXHccZKpezwfQIBAwKB
gG9mZfcMsqmiisvFqgzTdM+0n0njcaVC3gqGqkoFVMyH9+cRE+3zmQIcqHWq/7r6
+K7iaMOxXe0shPuaQ3W7xgEdhB5XgzvG+ZjSXa9vp/FmsjPj5UpLrnpaqrohQxMk
ln1f8+HU9BOCeZQmIRXKVDlucGjQr6eveHpXgrxwQObTAkEA2wBAfuduw5G0/VfN
Wx66D5fbPccfYFqLM5LuTimLmNqzK2gIKXckB2sm44gJZ6wVlumaB1CSNug2LNYx
3cAjUwJBAMNUo1hbI8ugqqwI9kpxv9+2Heea4BlnXbS6tYF8pvkHMoliuxNbXmmB
u4zNB5iZ6V0ZZ4nvtUNo2cGr/h/Lcu8CQQCSACr/RPSCYSNTj948vya1D+d+hL+V
kbIiYfQ0G7Jl5yIc8AVw+hgE8hntBVuacrkPRmaviwwkms7IjsvpKsI3AkEAgjhs
5ZIX3RXHHVtO3EvVP86+mmdAEO+TzdHOVlMZ+1ohsOx8t5I+8QEnszNaZbvw6Lua
W/UjgkXmgR1UFTJMnwJBAKErmAw21/g3SST0a4wlyaGT/MbXL8Ouwnb5IOKQVe55
CZdeVeSh6cJ4hAcQKfr2s1JaZTJFIBPGKAif5HqpydA=
-----END RSA PRIVATE KEY-----
'''
# Root certificate CN
ISSUER_CN = "Testing CA"
# All certificates are issued under this policy OID, in the Google arc:
CERT_POLICY_OID = asn1.OID([1, 3, 6, 1, 4, 1, 11129, 2, 4, 1])
# These result in the following root certificate:
# -----BEGIN CERTIFICATE-----
# MIIB0TCCATqgAwIBAgIBATANBgkqhkiG9w0BAQUFADAVMRMwEQYDVQQDEwpUZXN0aW5nIENBMB4X
# DTEwMDEwMTA2MDAwMFoXDTMyMTIwMTA2MDAwMFowFTETMBEGA1UEAxMKVGVzdGluZyBDQTCBnTAN
# BgkqhkiG9w0BAQEFAAOBiwAwgYcCgYEApxmY8pML/nPQMah/Ez0vN47u7tUqd+RND8n/bwf/Msvz
# 2pmd5O1lgyr8sIB/mHh1BlOdJYoM48LHeWdlMJmpA0qbEVqHbDmoxOTtSs0MZAlZRvs57utHoHBN
# uwGKz0jDocS4lfxAn7SjQKmGsa/EVRmrnspHwwGFx3HGSqXs8H0CAQOjMzAxMBIGA1UdEwEB/wQI
# MAYBAf8CAQAwGwYDVR0gAQEABBEwDzANBgsrBgEEAdZ5AgHODzANBgkqhkiG9w0BAQUFAAOBgQA/
# STb40A6D+93jMfLGQzXc997IsaJZdoPt7tYa8PqGJBL62EiTj+erd/H5pDZx/2/bcpOG4m9J56yg
# wOohbllw2TM+oeEd8syzV6X+1SIPnGI56JRrm3UXcHYx1Rq5loM9WKAiz/WmIWmskljsEQ7+542p
# q0pkHjs8nuXovSkUYA==
# -----END CERTIFICATE-----
# If you update any of the above, you can generate a new root with the
# following line:
# print DERToPEM(MakeCertificate(ISSUER_CN, ISSUER_CN, 1, KEY, KEY, None))
# Various OIDs
AIA_OCSP = asn1.OID([1, 3, 6, 1, 5, 5, 7, 48, 1])
AUTHORITY_INFORMATION_ACCESS = asn1.OID([1, 3, 6, 1, 5, 5, 7, 1, 1])
BASIC_CONSTRAINTS = asn1.OID([2, 5, 29, 19])
CERT_POLICIES = asn1.OID([2, 5, 29, 32])
COMMON_NAME = asn1.OID([2, 5, 4, 3])
COUNTRY = asn1.OID([2, 5, 4, 6])
HASH_SHA1 = asn1.OID([1, 3, 14, 3, 2, 26])
OCSP_TYPE_BASIC = asn1.OID([1, 3, 6, 1, 5, 5, 7, 48, 1, 1])
ORGANIZATION = asn1.OID([2, 5, 4, 10])
PUBLIC_KEY_RSA = asn1.OID([1, 2, 840, 113549, 1, 1, 1])
SHA256_WITH_RSA_ENCRYPTION = asn1.OID([1, 2, 840, 113549, 1, 1, 11])
def MakeCertificate(
issuer_cn, subject_cn, serial, pubkey, privkey, ocsp_url = None):
'''MakeCertificate returns a DER encoded certificate, signed by privkey.'''
extensions = asn1.SEQUENCE([])
# Default subject name fields
c = "XX"
o = "Testing Org"
if issuer_cn == subject_cn:
# Root certificate.
c = None
o = None
extensions.children.append(
asn1.SEQUENCE([
basic_constraints,
True,
asn1.OCTETSTRING(asn1.ToDER(asn1.SEQUENCE([
True, # IsCA
0, # Path len
]))),
]))
if ocsp_url is not None:
extensions.children.append(
asn1.SEQUENCE([
AUTHORITY_INFORMATION_ACCESS,
False,
asn1.OCTETSTRING(asn1.ToDER(asn1.SEQUENCE([
asn1.SEQUENCE([
AIA_OCSP,
asn1.Raw(asn1.TagAndLength(0x86, len(ocsp_url)) + ocsp_url),
]),
]))),
]))
extensions.children.append(
asn1.SEQUENCE([
CERT_POLICIES,
False,
asn1.OCTETSTRING(asn1.ToDER(asn1.SEQUENCE([
asn1.SEQUENCE([ # PolicyInformation
CERT_POLICY_OID,
]),
]))),
])
)
tbsCert = asn1.ToDER(asn1.SEQUENCE([
asn1.Explicit(0, 2), # Version
serial,
asn1.SEQUENCE([SHA256_WITH_RSA_ENCRYPTION, None]), # SignatureAlgorithm
Name(cn = issuer_cn), # Issuer
asn1.SEQUENCE([ # Validity
asn1.UTCTime("100101060000Z"), # NotBefore
asn1.UTCTime("321201060000Z"), # NotAfter
]),
Name(cn = subject_cn, c = c, o = o), # Subject
asn1.SEQUENCE([ # SubjectPublicKeyInfo
asn1.SEQUENCE([ # Algorithm
PUBLIC_KEY_RSA,
None,
]),
asn1.BitString(asn1.ToDER(pubkey)),
]),
asn1.Explicit(3, extensions),
]))
return asn1.ToDER(asn1.SEQUENCE([
asn1.Raw(tbsCert),
asn1.SEQUENCE([
SHA256_WITH_RSA_ENCRYPTION,
None,
]),
asn1.BitString(privkey.Sign(tbsCert)),
]))
def MakeOCSPResponse(issuer_cn, issuer_key, serial, ocsp_state):
# https://tools.ietf.org/html/rfc2560
issuer_name_hash = asn1.OCTETSTRING(
hashlib.sha1(asn1.ToDER(Name(cn = issuer_cn))).digest())
issuer_key_hash = asn1.OCTETSTRING(
hashlib.sha1(asn1.ToDER(issuer_key)).digest())
cert_status = None
if ocsp_state == OCSP_STATE_REVOKED:
cert_status = asn1.Explicit(1, asn1.GeneralizedTime("20100101060000Z"))
elif ocsp_state == OCSP_STATE_UNKNOWN:
cert_status = asn1.Raw(asn1.TagAndLength(0x80 | 2, 0))
elif ocsp_state == OCSP_STATE_GOOD:
cert_status = asn1.Raw(asn1.TagAndLength(0x80 | 0, 0))
else:
raise ValueError('Bad OCSP state: ' + str(ocsp_state))
basic_resp_data_der = asn1.ToDER(asn1.SEQUENCE([
asn1.Explicit(2, issuer_key_hash),
asn1.GeneralizedTime("20100101060000Z"), # producedAt
asn1.SEQUENCE([
asn1.SEQUENCE([ # SingleResponse
asn1.SEQUENCE([ # CertID
asn1.SEQUENCE([ # hashAlgorithm
HASH_SHA1,
None,
]),
issuer_name_hash,
issuer_key_hash,
serial,
]),
cert_status,
asn1.GeneralizedTime("20100101060000Z"), # thisUpdate
asn1.Explicit(0, asn1.GeneralizedTime("20300101060000Z")), # nextUpdate
]),
]),
]))
basic_resp = asn1.SEQUENCE([
asn1.Raw(basic_resp_data_der),
asn1.SEQUENCE([
SHA256_WITH_RSA_ENCRYPTION,
None,
]),
asn1.BitString(issuer_key.Sign(basic_resp_data_der)),
])
resp = asn1.SEQUENCE([
asn1.ENUMERATED(0),
asn1.Explicit(0, asn1.SEQUENCE([
OCSP_TYPE_BASIC,
asn1.OCTETSTRING(asn1.ToDER(basic_resp)),
]))
])
return asn1.ToDER(resp)
def DERToPEM(der):
pem = '-----BEGIN CERTIFICATE-----\n'
pem += der.encode('base64')
pem += '-----END CERTIFICATE-----\n'
return pem
OCSP_STATE_GOOD = 1
OCSP_STATE_REVOKED = 2
OCSP_STATE_INVALID = 3
OCSP_STATE_UNAUTHORIZED = 4
OCSP_STATE_UNKNOWN = 5
# unauthorizedDER is an OCSPResponse with a status of 6:
# SEQUENCE { ENUM(6) }
unauthorizedDER = '30030a0106'.decode('hex')
def GenerateCertKeyAndOCSP(subject = "127.0.0.1",
ocsp_url = "http://127.0.0.1",
ocsp_state = OCSP_STATE_GOOD,
serial = 0):
'''GenerateCertKeyAndOCSP returns a (cert_and_key_pem, ocsp_der) where:
* cert_and_key_pem contains a certificate and private key in PEM format
with the given subject common name and OCSP URL.
* ocsp_der contains a DER encoded OCSP response or None if ocsp_url is
None'''
if serial == 0:
serial = RandomNumber(16)
cert_der = MakeCertificate(ISSUER_CN, bytes(subject), serial, KEY, KEY,
bytes(ocsp_url))
cert_pem = DERToPEM(cert_der)
ocsp_der = None
if ocsp_url is not None:
if ocsp_state == OCSP_STATE_UNAUTHORIZED:
ocsp_der = unauthorizedDER
elif ocsp_state == OCSP_STATE_INVALID:
ocsp_der = '3'
else:
ocsp_der = MakeOCSPResponse(ISSUER_CN, KEY, serial, ocsp_state)
return (cert_pem + KEY_PEM, ocsp_der)
|
bsd-3-clause
|
hernandito/SickRage
|
lib/hachoir_core/field/fake_array.py
|
95
|
2294
|
import itertools
from hachoir_core.field import MissingField
class FakeArray:
"""
Simulate an array for GenericFieldSet.array(): fielset.array("item")[0] is
equivalent to fielset.array("item[0]").
It's possible to iterate over the items using::
for element in fieldset.array("item"):
...
And to get array size using len(fieldset.array("item")).
"""
def __init__(self, fieldset, name):
pos = name.rfind("/")
if pos != -1:
self.fieldset = fieldset[name[:pos]]
self.name = name[pos+1:]
else:
self.fieldset = fieldset
self.name = name
self._format = "%s[%%u]" % self.name
self._cache = {}
self._known_size = False
self._max_index = -1
def __nonzero__(self):
"Is the array empty or not?"
if self._cache:
return True
else:
return (0 in self)
def __len__(self):
"Number of fields in the array"
total = self._max_index+1
if not self._known_size:
for index in itertools.count(total):
try:
field = self[index]
total += 1
except MissingField:
break
return total
def __contains__(self, index):
try:
field = self[index]
return True
except MissingField:
return False
def __getitem__(self, index):
"""
Get a field of the array. Returns a field, or raise MissingField
exception if the field doesn't exist.
"""
try:
value = self._cache[index]
except KeyError:
try:
value = self.fieldset[self._format % index]
except MissingField:
self._known_size = True
raise
self._cache[index] = value
self._max_index = max(index, self._max_index)
return value
def __iter__(self):
"""
Iterate in the fields in their index order: field[0], field[1], ...
"""
for index in itertools.count(0):
try:
yield self[index]
except MissingField:
raise StopIteration()
|
gpl-3.0
|
ubc/edx-platform
|
common/djangoapps/util/migrations/0002_default_rate_limit_config.py
|
102
|
4097
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""Ensure that rate limiting is enabled by default. """
orm['util.RateLimitConfiguration'].objects.create(enabled=True)
def backwards(self, orm):
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'util.ratelimitconfiguration': {
'Meta': {'object_name': 'RateLimitConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['util']
symmetrical = True
|
agpl-3.0
|
undoware/neutron-drive
|
google_appengine/lib/django_1_2/django/core/files/utils.py
|
901
|
1230
|
class FileProxyMixin(object):
"""
A mixin class used to forward file methods to an underlaying file
object. The internal file object has to be called "file"::
class FileProxy(FileProxyMixin):
def __init__(self, file):
self.file = file
"""
encoding = property(lambda self: self.file.encoding)
fileno = property(lambda self: self.file.fileno)
flush = property(lambda self: self.file.flush)
isatty = property(lambda self: self.file.isatty)
newlines = property(lambda self: self.file.newlines)
read = property(lambda self: self.file.read)
readinto = property(lambda self: self.file.readinto)
readline = property(lambda self: self.file.readline)
readlines = property(lambda self: self.file.readlines)
seek = property(lambda self: self.file.seek)
softspace = property(lambda self: self.file.softspace)
tell = property(lambda self: self.file.tell)
truncate = property(lambda self: self.file.truncate)
write = property(lambda self: self.file.write)
writelines = property(lambda self: self.file.writelines)
xreadlines = property(lambda self: self.file.xreadlines)
def __iter__(self):
return iter(self.file)
|
bsd-3-clause
|
PennartLoettring/Poettrix
|
rootfs/usr/lib/python3.4/distutils/tests/test_bdist_rpm.py
|
71
|
4868
|
"""Tests for distutils.command.bdist_rpm."""
import unittest
import sys
import os
import tempfile
import shutil
from test.support import run_unittest
from distutils.core import Distribution
from distutils.command.bdist_rpm import bdist_rpm
from distutils.tests import support
from distutils.spawn import find_executable
from distutils import spawn
from distutils.errors import DistutilsExecError
SETUP_PY = """\
from distutils.core import setup
import foo
setup(name='foo', version='0.1', py_modules=['foo'],
url='xxx', author='xxx', author_email='xxx')
"""
class BuildRpmTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def setUp(self):
try:
sys.executable.encode("UTF-8")
except UnicodeEncodeError:
raise unittest.SkipTest("sys.executable is not encodable to UTF-8")
super(BuildRpmTestCase, self).setUp()
self.old_location = os.getcwd()
self.old_sys_argv = sys.argv, sys.argv[:]
def tearDown(self):
os.chdir(self.old_location)
sys.argv = self.old_sys_argv[0]
sys.argv[:] = self.old_sys_argv[1]
super(BuildRpmTestCase, self).tearDown()
# XXX I am unable yet to make this test work without
# spurious sdtout/stderr output under Mac OS X
@unittest.skipUnless(sys.platform.startswith('linux'),
'spurious sdtout/stderr output under Mac OS X')
@unittest.skipIf(find_executable('rpm') is None,
'the rpm command is not found')
@unittest.skipIf(find_executable('rpmbuild') is None,
'the rpmbuild command is not found')
def test_quiet(self):
# let's create a package
tmp_dir = self.mkdtemp()
pkg_dir = os.path.join(tmp_dir, 'foo')
os.mkdir(pkg_dir)
self.write_file((pkg_dir, 'setup.py'), SETUP_PY)
self.write_file((pkg_dir, 'foo.py'), '#')
self.write_file((pkg_dir, 'MANIFEST.in'), 'include foo.py')
self.write_file((pkg_dir, 'README'), '')
dist = Distribution({'name': 'foo', 'version': '0.1',
'py_modules': ['foo'],
'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx'})
dist.script_name = 'setup.py'
os.chdir(pkg_dir)
sys.argv = ['setup.py']
cmd = bdist_rpm(dist)
cmd.fix_python = True
# running in quiet mode
cmd.quiet = 1
cmd.ensure_finalized()
cmd.run()
dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
self.assertIn('foo-0.1-1.noarch.rpm', dist_created)
# bug #2945: upload ignores bdist_rpm files
self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.src.rpm'), dist.dist_files)
self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.noarch.rpm'), dist.dist_files)
# XXX I am unable yet to make this test work without
# spurious sdtout/stderr output under Mac OS X
@unittest.skipUnless(sys.platform.startswith('linux'),
'spurious sdtout/stderr output under Mac OS X')
# http://bugs.python.org/issue1533164
@unittest.skipIf(find_executable('rpm') is None,
'the rpm command is not found')
@unittest.skipIf(find_executable('rpmbuild') is None,
'the rpmbuild command is not found')
def test_no_optimize_flag(self):
# let's create a package that brakes bdist_rpm
tmp_dir = self.mkdtemp()
pkg_dir = os.path.join(tmp_dir, 'foo')
os.mkdir(pkg_dir)
self.write_file((pkg_dir, 'setup.py'), SETUP_PY)
self.write_file((pkg_dir, 'foo.py'), '#')
self.write_file((pkg_dir, 'MANIFEST.in'), 'include foo.py')
self.write_file((pkg_dir, 'README'), '')
dist = Distribution({'name': 'foo', 'version': '0.1',
'py_modules': ['foo'],
'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx'})
dist.script_name = 'setup.py'
os.chdir(pkg_dir)
sys.argv = ['setup.py']
cmd = bdist_rpm(dist)
cmd.fix_python = True
cmd.quiet = 1
cmd.ensure_finalized()
cmd.run()
dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
self.assertIn('foo-0.1-1.noarch.rpm', dist_created)
# bug #2945: upload ignores bdist_rpm files
self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.src.rpm'), dist.dist_files)
self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.noarch.rpm'), dist.dist_files)
os.remove(os.path.join(pkg_dir, 'dist', 'foo-0.1-1.noarch.rpm'))
def test_suite():
return unittest.makeSuite(BuildRpmTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
|
gpl-2.0
|
noroutine/ansible
|
lib/ansible/modules/remote_management/oneview/oneview_ethernet_network_facts.py
|
125
|
4863
|
#!/usr/bin/python
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_ethernet_network_facts
short_description: Retrieve the facts about one or more of the OneView Ethernet Networks
description:
- Retrieve the facts about one or more of the Ethernet Networks from OneView.
version_added: "2.4"
requirements:
- hpOneView >= 2.0.1
author:
- Felipe Bulsoni (@fgbulsoni)
- Thiago Miotto (@tmiotto)
- Adriane Cardozo (@adriane-cardozo)
options:
name:
description:
- Ethernet Network name.
options:
description:
- "List with options to gather additional facts about an Ethernet Network and related resources.
Options allowed: C(associatedProfiles) and C(associatedUplinkGroups)."
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all Ethernet Networks
oneview_ethernet_network_facts:
config: /etc/oneview/oneview_config.json
delegate_to: localhost
- debug: var=ethernet_networks
- name: Gather paginated and filtered facts about Ethernet Networks
oneview_ethernet_network_facts:
config: /etc/oneview/oneview_config.json
params:
start: 1
count: 3
sort: 'name:descending'
filter: 'purpose=General'
delegate_to: localhost
- debug: var=ethernet_networks
- name: Gather facts about an Ethernet Network by name
oneview_ethernet_network_facts:
config: /etc/oneview/oneview_config.json
name: Ethernet network name
delegate_to: localhost
- debug: var=ethernet_networks
- name: Gather facts about an Ethernet Network by name with options
oneview_ethernet_network_facts:
config: /etc/oneview/oneview_config.json
name: eth1
options:
- associatedProfiles
- associatedUplinkGroups
delegate_to: localhost
- debug: var=enet_associated_profiles
- debug: var=enet_associated_uplink_groups
'''
RETURN = '''
ethernet_networks:
description: Has all the OneView facts about the Ethernet Networks.
returned: Always, but can be null.
type: dict
enet_associated_profiles:
description: Has all the OneView facts about the profiles which are using the Ethernet network.
returned: When requested, but can be null.
type: dict
enet_associated_uplink_groups:
description: Has all the OneView facts about the uplink sets which are using the Ethernet network.
returned: When requested, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase
class EthernetNetworkFactsModule(OneViewModuleBase):
argument_spec = dict(
name=dict(type='str'),
options=dict(type='list'),
params=dict(type='dict')
)
def __init__(self):
super(EthernetNetworkFactsModule, self).__init__(additional_arg_spec=self.argument_spec)
self.resource_client = self.oneview_client.ethernet_networks
def execute_module(self):
ansible_facts = {}
if self.module.params['name']:
ethernet_networks = self.resource_client.get_by('name', self.module.params['name'])
if self.module.params.get('options') and ethernet_networks:
ansible_facts = self.__gather_optional_facts(ethernet_networks[0])
else:
ethernet_networks = self.resource_client.get_all(**self.facts_params)
ansible_facts['ethernet_networks'] = ethernet_networks
return dict(changed=False, ansible_facts=ansible_facts)
def __gather_optional_facts(self, ethernet_network):
ansible_facts = {}
if self.options.get('associatedProfiles'):
ansible_facts['enet_associated_profiles'] = self.__get_associated_profiles(ethernet_network)
if self.options.get('associatedUplinkGroups'):
ansible_facts['enet_associated_uplink_groups'] = self.__get_associated_uplink_groups(ethernet_network)
return ansible_facts
def __get_associated_profiles(self, ethernet_network):
associated_profiles = self.resource_client.get_associated_profiles(ethernet_network['uri'])
return [self.oneview_client.server_profiles.get(x) for x in associated_profiles]
def __get_associated_uplink_groups(self, ethernet_network):
uplink_groups = self.resource_client.get_associated_uplink_groups(ethernet_network['uri'])
return [self.oneview_client.uplink_sets.get(x) for x in uplink_groups]
def main():
EthernetNetworkFactsModule().run()
if __name__ == '__main__':
main()
|
gpl-3.0
|
manthansharma/kivy
|
examples/canvas/mesh_manipulation.py
|
21
|
3123
|
'''
Mesh Manipulation Example
=========================
This demonstrates creating a mesh and using it to deform the texture (the
kivy log). You should see the kivy logo with a five sliders to right.
The sliders change the mesh points' x and y offsets, radius, and a
'wobble' deformation's magnitude and speed.
This example is developed in gabriel's blog post at
http://kivy.org/planet/2014/01/kivy-image-manipulations-with-mesh-and-textures/
'''
from kivy.app import App
from kivy.lang import Builder
from kivy.core.image import Image as CoreImage
from kivy.properties import ListProperty, ObjectProperty, NumericProperty
from kivy.clock import Clock
from kivy.core.window import Window
from math import sin, cos, pi
kv = '''
BoxLayout:
Widget:
canvas:
Color:
rgba: 1, 1, 1, 1
Mesh:
vertices: app.mesh_points
indices: range(len(app.mesh_points) // 4)
texture: app.mesh_texture
mode: 'triangle_fan'
BoxLayout:
orientation: 'vertical'
size_hint_x: None
width: 100
Slider:
value: app.offset_x
on_value: app.offset_x = args[1]
min: -1
max: 1
Slider:
value: app.offset_y
on_value: app.offset_y = args[1]
min: -1
max: 1
Slider:
value: app.radius
on_value: app.radius = args[1]
min: 10
max: 1000
Slider:
value: app.sin_wobble
on_value: app.sin_wobble = args[1]
min: -50
max: 50
Slider:
value: app.sin_wobble_speed
on_value: app.sin_wobble_speed = args[1]
min: 0
max: 50
step: 1
'''
class MeshBallApp(App):
mesh_points = ListProperty([])
mesh_texture = ObjectProperty(None)
radius = NumericProperty(500)
offset_x = NumericProperty(.5)
offset_y = NumericProperty(.5)
sin_wobble = NumericProperty(0)
sin_wobble_speed = NumericProperty(0)
def build(self):
self.mesh_texture = CoreImage('data/logo/kivy-icon-512.png').texture
Clock.schedule_interval(self.update_points, 0)
return Builder.load_string(kv)
def update_points(self, *args):
""" replace self.mesh_points based on current slider positions.
Called continuously by a timer because this only sample code.
"""
points = [Window.width / 2, Window.height / 2, .5, .5]
i = 0
while i < 2 * pi:
i += 0.01 * pi
points.extend([
Window.width / 2 + cos(i) * (self.radius + self.sin_wobble *
sin(i * self.sin_wobble_speed)),
Window.height / 2 + sin(i) * (self.radius + self.sin_wobble *
sin(i * self.sin_wobble_speed)),
self.offset_x + sin(i),
self.offset_y + cos(i)])
self.mesh_points = points
if __name__ == '__main__':
MeshBallApp().run()
|
mit
|
rnikiforova/GuruTubeProject
|
GuruTube/libraries/django/contrib/gis/tests/geo3d/models.py
|
222
|
2064
|
from django.contrib.gis.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class City3D(models.Model):
name = models.CharField(max_length=30)
point = models.PointField(dim=3)
objects = models.GeoManager()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Interstate2D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(srid=4269)
objects = models.GeoManager()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Interstate3D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(dim=3, srid=4269)
objects = models.GeoManager()
def __str__(self):
return self.name
@python_2_unicode_compatible
class InterstateProj2D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(srid=32140)
objects = models.GeoManager()
def __str__(self):
return self.name
@python_2_unicode_compatible
class InterstateProj3D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(dim=3, srid=32140)
objects = models.GeoManager()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Polygon2D(models.Model):
name = models.CharField(max_length=30)
poly = models.PolygonField(srid=32140)
objects = models.GeoManager()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Polygon3D(models.Model):
name = models.CharField(max_length=30)
poly = models.PolygonField(dim=3, srid=32140)
objects = models.GeoManager()
def __str__(self):
return self.name
class Point2D(models.Model):
point = models.PointField()
objects = models.GeoManager()
class Point3D(models.Model):
point = models.PointField(dim=3)
objects = models.GeoManager()
class MultiPoint3D(models.Model):
mpoint = models.MultiPointField(dim=3)
objects = models.GeoManager()
|
bsd-3-clause
|
amrdraz/kodr
|
app/brython/www/src/Lib/unittest/test/_test_warnings.py
|
858
|
2304
|
# helper module for test_runner.Test_TextTestRunner.test_warnings
"""
This module has a number of tests that raise different kinds of warnings.
When the tests are run, the warnings are caught and their messages are printed
to stdout. This module also accepts an arg that is then passed to
unittest.main to affect the behavior of warnings.
Test_TextTestRunner.test_warnings executes this script with different
combinations of warnings args and -W flags and check that the output is correct.
See #10535.
"""
import sys
import unittest
import warnings
def warnfun():
warnings.warn('rw', RuntimeWarning)
class TestWarnings(unittest.TestCase):
# unittest warnings will be printed at most once per type (max one message
# for the fail* methods, and one for the assert* methods)
def test_assert(self):
self.assertEquals(2+2, 4)
self.assertEquals(2*2, 4)
self.assertEquals(2**2, 4)
def test_fail(self):
self.failUnless(1)
self.failUnless(True)
def test_other_unittest(self):
self.assertAlmostEqual(2+2, 4)
self.assertNotAlmostEqual(4+4, 2)
# these warnings are normally silenced, but they are printed in unittest
def test_deprecation(self):
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
def test_import(self):
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
# user warnings should always be printed
def test_warning(self):
warnings.warn('uw')
warnings.warn('uw')
warnings.warn('uw')
# these warnings come from the same place; they will be printed
# only once by default or three times if the 'always' filter is used
def test_function(self):
warnfun()
warnfun()
warnfun()
if __name__ == '__main__':
with warnings.catch_warnings(record=True) as ws:
# if an arg is provided pass it to unittest.main as 'warnings'
if len(sys.argv) == 2:
unittest.main(exit=False, warnings=sys.argv.pop())
else:
unittest.main(exit=False)
# print all the warning messages collected
for w in ws:
print(w.message)
|
mit
|
boxed/WCS-Hub
|
wcs-hub/models.py
|
1
|
1177
|
from google.appengine.ext import db
from google.appengine.api import users
from json import JSONDecoder
class DictModel(db.Model):
def to_dict(self):
decoder = JSONDecoder()
result = dict(
[
(p[:-len('_json')], decoder.decode(getattr(self, p))) if p.endswith('_json') else (p, getattr(self, p))
for p in self.properties()
]
+[('id', unicode(self.key().id()))])
return result
class Event(DictModel):
user = db.UserProperty(auto_current_user_add=True)
name = db.StringProperty(required=True)
description = db.TextProperty()
date = db.DateProperty()
registration_opens = db.DateProperty()
registration_closes = db.DateProperty()
competitions_json = db.TextProperty()
class Registration(DictModel):
# NOTE: the parent object must be an Event instance
user = db.UserProperty(auto_current_user_add=True)
first_name = db.StringProperty(required=True)
last_name = db.StringProperty(required=True)
lead_follow = db.StringProperty(required=True)
competitions_json = db.TextProperty(required=True)
wsdc_number = db.StringProperty()
|
mit
|
jmighion/ansible
|
lib/ansible/modules/windows/win_user.py
|
40
|
6163
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Matt Martz <[email protected]>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_user
version_added: "1.7"
short_description: Manages local Windows user accounts
description:
- Manages local Windows user accounts
- For non-Windows targets, use the M(user) module instead.
options:
name:
description:
- Name of the user to create, remove or modify.
required: true
fullname:
description:
- Full name of the user
required: false
default: null
version_added: "1.9"
description:
description:
- Description of the user
required: false
default: null
version_added: "1.9"
password:
description:
- Optionally set the user's password to this (plain text) value.
required: false
default: null
update_password:
description:
- C(always) will update passwords if they differ. C(on_create) will
only set the password for newly created users.
required: false
choices: [ 'always', 'on_create' ]
default: always
version_added: "1.9"
password_expired:
description:
- C(yes) will require the user to change their password at next login.
C(no) will clear the expired password flag.
required: false
choices: [ 'yes', 'no' ]
default: null
version_added: "1.9"
password_never_expires:
description:
- C(yes) will set the password to never expire. C(no) will allow the
password to expire.
required: false
choices: [ 'yes', 'no' ]
default: null
version_added: "1.9"
user_cannot_change_password:
description:
- C(yes) will prevent the user from changing their password. C(no) will
allow the user to change their password.
required: false
choices: [ 'yes', 'no' ]
default: null
version_added: "1.9"
account_disabled:
description:
- C(yes) will disable the user account. C(no) will clear the disabled
flag.
required: false
choices: [ 'yes', 'no' ]
default: null
version_added: "1.9"
account_locked:
description:
- C(no) will unlock the user account if locked.
required: false
choices: [ 'no' ]
default: null
version_added: "1.9"
groups:
description:
- Adds or removes the user from this comma-separated lis of groups,
depending on the value of I(groups_action). When I(groups_action) is
C(replace) and I(groups) is set to the empty string ('groups='), the
user is removed from all groups.
required: false
version_added: "1.9"
groups_action:
description:
- If C(replace), the user is added as a member of each group in
I(groups) and removed from any other groups. If C(add), the user is
added to each group in I(groups) where not already a member. If
C(remove), the user is removed from each group in I(groups).
required: false
choices: [ "replace", "add", "remove" ]
default: "replace"
version_added: "1.9"
state:
description:
- When C(present), creates or updates the user account. When C(absent),
removes the user account if it exists. When C(query) (new in 1.9),
retrieves the user account details without making any changes.
required: false
choices:
- present
- absent
- query
default: present
aliases: []
notes:
- For non-Windows targets, use the M(user) module instead.
author:
- "Paul Durivage (@angstwad)"
- "Chris Church (@cchurch)"
'''
EXAMPLES = r'''
- name: Ensure user bob is present
win_user:
name: bob
password: B0bP4ssw0rd
state: present
groups:
- Users
- name: Ensure user bob is absent
win_user:
name: bob
state: absent
'''
RETURN = r'''
account_disabled:
description: Whether the user is disabled.
returned: user exists
type: bool
sample: false
account_locked:
description: Whether the user is locked.
returned: user exists
type: bool
sample: false
description:
description: The description set for the user.
returned: user exists
type: str
sample: Username for test
fullname:
description: The full name set for the user.
returned: user exists
type: str
sample: Test Username
groups:
description: A list of groups and their ADSI path the user is a member of.
returned: user exists
type: list
sample: [
{
"name": "Administrators",
"path": "WinNT://WORKGROUP/USER-PC/Administrators"
}
]
name:
description: The name of the user
returned: always
type: str
sample: username
password_expired:
description: Whether the password is expired.
returned: user exists
type: bool
sample: false
password_never_expires:
description: Whether the password is set to never expire.
returned: user exists
type: bool
sample: true
path:
description: The ADSI path for the user.
returned: user exists
type: str
sample: "WinNT://WORKGROUP/USER-PC/username"
sid:
description: The SID for the user.
returned: user exists
type: str
sample: S-1-5-21-3322259488-2828151810-3939402796-1001
user_cannot_change_password:
description: Whether the user can change their own password.
returned: user exists
type: bool
sample: false
'''
|
gpl-3.0
|
nxppru/zydiy
|
scripts/dl_cleanup.py
|
223
|
6094
|
#!/usr/bin/env python3
"""
# OpenWrt download directory cleanup utility.
# Delete all but the very last version of the program tarballs.
#
# Copyright (C) 2010-2015 Michael Buesch <[email protected]>
# Copyright (C) 2013-2015 OpenWrt.org
"""
from __future__ import print_function
import sys
import os
import re
import getopt
# Commandline options
opt_dryrun = False
def parseVer_1234(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32) |\
(int(match.group(5)) << 16)
return (progname, progversion)
def parseVer_123(match, filepath):
progname = match.group(1)
try:
patchlevel = match.group(5)
except IndexError as e:
patchlevel = None
if patchlevel:
patchlevel = ord(patchlevel[0])
else:
patchlevel = 0
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32) |\
patchlevel
return (progname, progversion)
def parseVer_12(match, filepath):
progname = match.group(1)
try:
patchlevel = match.group(4)
except IndexError as e:
patchlevel = None
if patchlevel:
patchlevel = ord(patchlevel[0])
else:
patchlevel = 0
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
patchlevel
return (progname, progversion)
def parseVer_r(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64)
return (progname, progversion)
def parseVer_ymd(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32)
return (progname, progversion)
def parseVer_GIT(match, filepath):
progname = match.group(1)
st = os.stat(filepath)
progversion = int(st.st_mtime) << 64
return (progname, progversion)
extensions = (
".tar.gz",
".tar.bz2",
".tar.xz",
".orig.tar.gz",
".orig.tar.bz2",
".orig.tar.xz",
".zip",
".tgz",
".tbz",
".txz",
)
versionRegex = (
(re.compile(r"(.+)[-_](\d+)\.(\d+)\.(\d+)\.(\d+)"), parseVer_1234), # xxx-1.2.3.4
(re.compile(r"(.+)[-_](\d\d\d\d)-?(\d\d)-?(\d\d)"), parseVer_ymd), # xxx-YYYY-MM-DD
(re.compile(r"(.+)[-_]([0-9a-fA-F]{40,40})"), parseVer_GIT), # xxx-GIT_SHASUM
(re.compile(r"(.+)[-_](\d+)\.(\d+)\.(\d+)(\w?)"), parseVer_123), # xxx-1.2.3a
(re.compile(r"(.+)[-_](\d+)_(\d+)_(\d+)"), parseVer_123), # xxx-1_2_3
(re.compile(r"(.+)[-_](\d+)\.(\d+)(\w?)"), parseVer_12), # xxx-1.2a
(re.compile(r"(.+)[-_]r?(\d+)"), parseVer_r), # xxx-r1111
)
blacklist = [
("linux", re.compile(r"linux-\d.*")),
("gcc", re.compile(r"gcc-.*")),
("wl_apsta", re.compile(r"wl_apsta.*")),
(".fw", re.compile(r".*\.fw")),
(".arm", re.compile(r".*\.arm")),
(".bin", re.compile(r".*\.bin")),
("rt-firmware", re.compile(r"RT[\d\w]+_Firmware.*")),
]
class EntryParseError(Exception): pass
class Entry:
def __init__(self, directory, filename):
self.directory = directory
self.filename = filename
self.progname = ""
self.fileext = ""
for ext in extensions:
if filename.endswith(ext):
filename = filename[0:0-len(ext)]
self.fileext = ext
break
else:
print(self.filename, "has an unknown file-extension")
raise EntryParseError("ext")
for (regex, parseVersion) in versionRegex:
match = regex.match(filename)
if match:
(self.progname, self.version) = parseVersion(
match, directory + "/" + filename + self.fileext)
break
else:
print(self.filename, "has an unknown version pattern")
raise EntryParseError("ver")
def getPath(self):
return (self.directory + "/" + self.filename).replace("//", "/")
def deleteFile(self):
path = self.getPath()
print("Deleting", path)
if not opt_dryrun:
os.unlink(path)
def __ge__(self, y):
return self.version >= y.version
def usage():
print("OpenWrt download directory cleanup utility")
print("Usage: " + sys.argv[0] + " [OPTIONS] <path/to/dl>")
print("")
print(" -d|--dry-run Do a dry-run. Don't delete any files")
print(" -B|--show-blacklist Show the blacklist and exit")
print(" -w|--whitelist ITEM Remove ITEM from blacklist")
def main(argv):
global opt_dryrun
try:
(opts, args) = getopt.getopt(argv[1:],
"hdBw:",
[ "help", "dry-run", "show-blacklist", "whitelist=", ])
if len(args) != 1:
usage()
return 1
except getopt.GetoptError as e:
usage()
return 1
directory = args[0]
for (o, v) in opts:
if o in ("-h", "--help"):
usage()
return 0
if o in ("-d", "--dry-run"):
opt_dryrun = True
if o in ("-w", "--whitelist"):
for i in range(0, len(blacklist)):
(name, regex) = blacklist[i]
if name == v:
del blacklist[i]
break
else:
print("Whitelist error: Item", v,\
"is not in blacklist")
return 1
if o in ("-B", "--show-blacklist"):
for (name, regex) in blacklist:
sep = "\t\t"
if len(name) >= 8:
sep = "\t"
print("%s%s(%s)" % (name, sep, regex.pattern))
return 0
# Create a directory listing and parse the file names.
entries = []
for filename in os.listdir(directory):
if filename == "." or filename == "..":
continue
for (name, regex) in blacklist:
if regex.match(filename):
if opt_dryrun:
print(filename, "is blacklisted")
break
else:
try:
entries.append(Entry(directory, filename))
except EntryParseError as e:
pass
# Create a map of programs
progmap = {}
for entry in entries:
if entry.progname in progmap.keys():
progmap[entry.progname].append(entry)
else:
progmap[entry.progname] = [entry,]
# Traverse the program map and delete everything but the last version
for prog in progmap:
lastVersion = None
versions = progmap[prog]
for version in versions:
if lastVersion is None or version >= lastVersion:
lastVersion = version
if lastVersion:
for version in versions:
if version is not lastVersion:
version.deleteFile()
if opt_dryrun:
print("Keeping", lastVersion.getPath())
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
gpl-2.0
|
mindworker/so-bro
|
watcher.py
|
1
|
1650
|
import os
import sys
from select import select
from subprocess import Popen, PIPE
import rpyc
err = ""
def handleInterpreter(conn, fd, data):
global err
if fd == p.stderr.fileno():
datastr = str(data, 'utf8')
if datastr == '>>> ':
return
if 'Type "help", "copyright", "credits" or "license" for more information.' in datastr:
return
err += datastr
# errors seem to always end with >>>
if '>>> ' in datastr:
conn.root.add_err(err)
err = ""
def handleScript(conn, fd, data):
if fd == p.stderr.fileno():
# send to local debug service
conn.root.add_err(str(data, 'utf8'))
def handle(conn, fd, data, mode):
if mode == 'interpreter':
handleInterpreter(conn, fd, data)
else:
handleScript(conn, fd, data)
if __name__ == "__main__":
conn = rpyc.connect("localhost", 18861)
command = ['python']
mode = 'interpreter'
if len(sys.argv) > 1:
command = ['python'] + sys.argv[1:]
mode = 'script'
with Popen(command, stdout=PIPE, stderr=PIPE) as p:
readable = {
p.stdout.fileno(): sys.stdout.buffer,
p.stderr.fileno(): sys.stderr.buffer,
}
while readable:
for fd in select(readable, [], [])[0]:
data = os.read(fd, 1024) # read available
if not data: # EOF
del readable[fd]
continue
readable[fd].write(data)
readable[fd].flush()
handle(conn, fd, data, mode)
|
mit
|
hanlind/nova
|
nova/api/openstack/compute/schemas/fixed_ips.py
|
79
|
1027
|
# Copyright 2015 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.validation import parameter_types
reserve = {
'type': 'object',
'properties': {
'reserve': parameter_types.none,
},
'required': ['reserve'],
'additionalProperties': False,
}
unreserve = {
'type': 'object',
'properties': {
'unreserve': parameter_types.none,
},
'required': ['unreserve'],
'additionalProperties': False,
}
|
apache-2.0
|
pducks32/intergrala
|
python/sympy/sympy/utilities/tests/test_pytest.py
|
105
|
1601
|
from sympy.utilities.pytest import raises, USE_PYTEST
if USE_PYTEST:
import py.test
pytestmark = py.test.mark.skipif(USE_PYTEST,
reason=("using py.test"))
# Test callables
def test_expected_exception_is_silent_callable():
def f():
raise ValueError()
raises(ValueError, f)
def test_lack_of_exception_triggers_AssertionError_callable():
try:
raises(Exception, lambda: 1 + 1)
assert False
except AssertionError as e:
assert str(e) == "DID NOT RAISE"
def test_unexpected_exception_is_passed_through_callable():
def f():
raise ValueError("some error message")
try:
raises(TypeError, f)
assert False
except ValueError as e:
assert str(e) == "some error message"
# Test with statement
def test_expected_exception_is_silent_with():
with raises(ValueError):
raise ValueError()
def test_lack_of_exception_triggers_AssertionError_with():
try:
with raises(Exception):
1 + 1
assert False
except AssertionError as e:
assert str(e) == "DID NOT RAISE"
def test_unexpected_exception_is_passed_through_with():
try:
with raises(TypeError):
raise ValueError("some error message")
assert False
except ValueError as e:
assert str(e) == "some error message"
# Now we can use raises() instead of try/catch
# to test that a specific exception class is raised
def test_second_argument_should_be_callable_or_string():
raises(TypeError, lambda: raises("irrelevant", 42))
|
mit
|
pidah/st2contrib
|
packs/smartthings/sensors/smartthings_sensor.py
|
10
|
2071
|
import eventlet
import json
from flask import request, json, Flask, Response # noqa
from st2reactor.sensor.base import Sensor
eventlet.monkey_patch(
os=True,
select=True,
socket=True,
thread=True,
time=True)
class SmartThingsSensor(Sensor):
def __init__(self, sensor_service, config=None):
super(SmartThingsSensor, self).__init__(sensor_service=sensor_service,
config=config)
self._trigger = 'smartthings.event'
self._logger = self._sensor_service.get_logger(__name__)
self._listen_ip = self._config.get('listen_ip', '0.0.0.0')
self._listen_port = self._config.get('listen_port', '12000')
self._api_key = self._config.get('api_key', None)
self._app = Flask(__name__)
def setup(self):
pass
def run(self):
if not self._api_key:
raise Exception('[smartthings_sensor]: api_key config option not set')
# Routes
@self._app.route('/', methods=['PUT'])
def process_incoming():
response = None
if request.headers['X-Api-Key'] == self._api_key:
status = self._process_request(request)
response = Response(status[0], status=status[1])
else:
response = Response('fail', status=401)
return response
# Start the Flask App
self._app.run(host=self._listen_ip, port=self._listen_port)
def cleanup(self):
pass
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _process_request(self, request):
if request.headers['Content-Type'] == 'application/json':
payload = request.json
self._logger.debug('[smartthings_sensor]: processing request {}'.format(payload))
self._sensor_service.dispatch(trigger=self._trigger, payload=payload)
return ('ok', 200)
else:
return ('fail', 415)
|
apache-2.0
|
jhamman/xray
|
xarray/tests/test_variable.py
|
1
|
54048
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from copy import copy, deepcopy
from datetime import datetime, timedelta
from textwrap import dedent
import pytest
from distutils.version import LooseVersion
import numpy as np
import pytz
import pandas as pd
from xarray import Variable, IndexVariable, Coordinate, Dataset
from xarray.core import indexing
from xarray.core.variable import as_variable, as_compatible_data
from xarray.core.indexing import PandasIndexAdapter, LazilyIndexedArray
from xarray.core.pycompat import PY3, OrderedDict
from xarray.core.common import full_like, zeros_like, ones_like
from . import TestCase, source_ndarray, requires_dask
class VariableSubclassTestCases(object):
def test_properties(self):
data = 0.5 * np.arange(10)
v = self.cls(['time'], data, {'foo': 'bar'})
self.assertEqual(v.dims, ('time',))
self.assertArrayEqual(v.values, data)
self.assertEqual(v.dtype, float)
self.assertEqual(v.shape, (10,))
self.assertEqual(v.size, 10)
self.assertEqual(v.sizes, {'time': 10})
self.assertEqual(v.nbytes, 80)
self.assertEqual(v.ndim, 1)
self.assertEqual(len(v), 10)
self.assertEqual(v.attrs, {'foo': u'bar'})
def test_attrs(self):
v = self.cls(['time'], 0.5 * np.arange(10))
self.assertEqual(v.attrs, {})
attrs = {'foo': 'bar'}
v.attrs = attrs
self.assertEqual(v.attrs, attrs)
self.assertIsInstance(v.attrs, OrderedDict)
v.attrs['foo'] = 'baz'
self.assertEqual(v.attrs['foo'], 'baz')
def test_getitem_dict(self):
v = self.cls(['x'], np.random.randn(5))
actual = v[{'x': 0}]
expected = v[0]
self.assertVariableIdentical(expected, actual)
def _assertIndexedLikeNDArray(self, variable, expected_value0,
expected_dtype=None):
"""Given a 1-dimensional variable, verify that the variable is indexed
like a numpy.ndarray.
"""
self.assertEqual(variable[0].shape, ())
self.assertEqual(variable[0].ndim, 0)
self.assertEqual(variable[0].size, 1)
# test identity
self.assertTrue(variable.equals(variable.copy()))
self.assertTrue(variable.identical(variable.copy()))
# check value is equal for both ndarray and Variable
self.assertEqual(variable.values[0], expected_value0)
self.assertEqual(variable[0].values, expected_value0)
# check type or dtype is consistent for both ndarray and Variable
if expected_dtype is None:
# check output type instead of array dtype
self.assertEqual(type(variable.values[0]), type(expected_value0))
self.assertEqual(type(variable[0].values), type(expected_value0))
elif expected_dtype is not False:
self.assertEqual(variable.values[0].dtype, expected_dtype)
self.assertEqual(variable[0].values.dtype, expected_dtype)
def test_index_0d_int(self):
for value, dtype in [(0, np.int_),
(np.int32(0), np.int32)]:
x = self.cls(['x'], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_float(self):
for value, dtype in [(0.5, np.float_),
(np.float32(0.5), np.float32)]:
x = self.cls(['x'], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_string(self):
for value, dtype in [('foo', np.dtype('U3' if PY3 else 'S3')),
(u'foo', np.dtype('U3'))]:
x = self.cls(['x'], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_datetime(self):
d = datetime(2000, 1, 1)
x = self.cls(['x'], [d])
self._assertIndexedLikeNDArray(x, np.datetime64(d))
x = self.cls(['x'], [np.datetime64(d)])
self._assertIndexedLikeNDArray(x, np.datetime64(d), 'datetime64[ns]')
x = self.cls(['x'], pd.DatetimeIndex([d]))
self._assertIndexedLikeNDArray(x, np.datetime64(d), 'datetime64[ns]')
def test_index_0d_timedelta64(self):
td = timedelta(hours=1)
x = self.cls(['x'], [np.timedelta64(td)])
self._assertIndexedLikeNDArray(x, np.timedelta64(td), 'timedelta64[ns]')
x = self.cls(['x'], pd.to_timedelta([td]))
self._assertIndexedLikeNDArray(x, np.timedelta64(td), 'timedelta64[ns]')
def test_index_0d_not_a_time(self):
d = np.datetime64('NaT', 'ns')
x = self.cls(['x'], [d])
self._assertIndexedLikeNDArray(x, d)
def test_index_0d_object(self):
class HashableItemWrapper(object):
def __init__(self, item):
self.item = item
def __eq__(self, other):
return self.item == other.item
def __hash__(self):
return hash(self.item)
def __repr__(self):
return '%s(item=%r)' % (type(self).__name__, self.item)
item = HashableItemWrapper((1, 2, 3))
x = self.cls('x', [item])
self._assertIndexedLikeNDArray(x, item, expected_dtype=False)
def test_0d_object_array_with_list(self):
listarray = np.empty((1,), dtype=object)
listarray[0] = [1, 2, 3]
x = self.cls('x', listarray)
assert x.data == listarray
assert x[0].data == listarray.squeeze()
assert x.squeeze().data == listarray.squeeze()
def test_index_and_concat_datetime(self):
# regression test for #125
date_range = pd.date_range('2011-09-01', periods=10)
for dates in [date_range, date_range.values,
date_range.to_pydatetime()]:
expected = self.cls('t', dates)
for times in [[expected[i] for i in range(10)],
[expected[i:(i + 1)] for i in range(10)],
[expected[[i]] for i in range(10)]]:
actual = Variable.concat(times, 't')
self.assertEqual(expected.dtype, actual.dtype)
self.assertArrayEqual(expected, actual)
def test_0d_time_data(self):
# regression test for #105
x = self.cls('time', pd.date_range('2000-01-01', periods=5))
expected = np.datetime64('2000-01-01T00Z', 'ns')
self.assertEqual(x[0].values, expected)
def test_datetime64_conversion(self):
times = pd.date_range('2000-01-01', periods=3)
for values, preserve_source in [
(times, True),
(times.values, True),
(times.values.astype('datetime64[s]'), False),
(times.to_pydatetime(), False),
]:
v = self.cls(['t'], values)
self.assertEqual(v.dtype, np.dtype('datetime64[ns]'))
self.assertArrayEqual(v.values, times.values)
self.assertEqual(v.values.dtype, np.dtype('datetime64[ns]'))
same_source = source_ndarray(v.values) is source_ndarray(values)
assert preserve_source == same_source
def test_timedelta64_conversion(self):
times = pd.timedelta_range(start=0, periods=3)
for values, preserve_source in [
(times, True),
(times.values, True),
(times.values.astype('timedelta64[s]'), False),
(times.to_pytimedelta(), False),
]:
v = self.cls(['t'], values)
self.assertEqual(v.dtype, np.dtype('timedelta64[ns]'))
self.assertArrayEqual(v.values, times.values)
self.assertEqual(v.values.dtype, np.dtype('timedelta64[ns]'))
same_source = source_ndarray(v.values) is source_ndarray(values)
assert preserve_source == same_source
def test_object_conversion(self):
data = np.arange(5).astype(str).astype(object)
actual = self.cls('x', data)
self.assertEqual(actual.dtype, data.dtype)
def test_pandas_data(self):
v = self.cls(['x'], pd.Series([0, 1, 2], index=[3, 2, 1]))
self.assertVariableIdentical(v, v[[0, 1, 2]])
v = self.cls(['x'], pd.Index([0, 1, 2]))
self.assertEqual(v[0].values, v.values[0])
def test_pandas_period_index(self):
v = self.cls(['x'], pd.period_range(start='2000', periods=20, freq='B'))
self.assertEqual(v[0], pd.Period('2000', freq='B'))
assert "Period('2000-01-03', 'B')" in repr(v)
def test_1d_math(self):
x = 1.0 * np.arange(5)
y = np.ones(5)
# should we need `.to_base_variable()`?
# probably a break that `+v` changes type?
v = self.cls(['x'], x)
base_v = v.to_base_variable()
# unary ops
self.assertVariableIdentical(base_v, +v)
self.assertVariableIdentical(base_v, abs(v))
self.assertArrayEqual((-v).values, -x)
# binary ops with numbers
self.assertVariableIdentical(base_v, v + 0)
self.assertVariableIdentical(base_v, 0 + v)
self.assertVariableIdentical(base_v, v * 1)
self.assertArrayEqual((v > 2).values, x > 2)
self.assertArrayEqual((0 == v).values, 0 == x)
self.assertArrayEqual((v - 1).values, x - 1)
self.assertArrayEqual((1 - v).values, 1 - x)
# binary ops with numpy arrays
self.assertArrayEqual((v * x).values, x ** 2)
self.assertArrayEqual((x * v).values, x ** 2)
self.assertArrayEqual(v - y, v - 1)
self.assertArrayEqual(y - v, 1 - v)
# verify attributes are dropped
v2 = self.cls(['x'], x, {'units': 'meters'})
self.assertVariableIdentical(base_v, +v2)
# binary ops with all variables
self.assertArrayEqual(v + v, 2 * v)
w = self.cls(['x'], y, {'foo': 'bar'})
self.assertVariableIdentical(v + w, self.cls(['x'], x + y).to_base_variable())
self.assertArrayEqual((v * w).values, x * y)
# something complicated
self.assertArrayEqual((v ** 2 * w - 1 + x).values, x ** 2 * y - 1 + x)
# make sure dtype is preserved (for Index objects)
self.assertEqual(float, (+v).dtype)
self.assertEqual(float, (+v).values.dtype)
self.assertEqual(float, (0 + v).dtype)
self.assertEqual(float, (0 + v).values.dtype)
# check types of returned data
self.assertIsInstance(+v, Variable)
self.assertNotIsInstance(+v, IndexVariable)
self.assertIsInstance(0 + v, Variable)
self.assertNotIsInstance(0 + v, IndexVariable)
def test_1d_reduce(self):
x = np.arange(5)
v = self.cls(['x'], x)
actual = v.sum()
expected = Variable((), 10)
self.assertVariableIdentical(expected, actual)
self.assertIs(type(actual), Variable)
def test_array_interface(self):
x = np.arange(5)
v = self.cls(['x'], x)
self.assertArrayEqual(np.asarray(v), x)
# test patched in methods
self.assertArrayEqual(v.astype(float), x.astype(float))
# think this is a break, that argsort changes the type
self.assertVariableIdentical(v.argsort(), v.to_base_variable())
self.assertVariableIdentical(v.clip(2, 3),
self.cls('x', x.clip(2, 3)).to_base_variable())
# test ufuncs
self.assertVariableIdentical(np.sin(v), self.cls(['x'], np.sin(x)).to_base_variable())
self.assertIsInstance(np.sin(v), Variable)
self.assertNotIsInstance(np.sin(v), IndexVariable)
def example_1d_objects(self):
for data in [range(3),
0.5 * np.arange(3),
0.5 * np.arange(3, dtype=np.float32),
pd.date_range('2000-01-01', periods=3),
np.array(['a', 'b', 'c'], dtype=object)]:
yield (self.cls('x', data), data)
def test___array__(self):
for v, data in self.example_1d_objects():
self.assertArrayEqual(v.values, np.asarray(data))
self.assertArrayEqual(np.asarray(v), np.asarray(data))
self.assertEqual(v[0].values, np.asarray(data)[0])
self.assertEqual(np.asarray(v[0]), np.asarray(data)[0])
def test_equals_all_dtypes(self):
for v, _ in self.example_1d_objects():
v2 = v.copy()
self.assertTrue(v.equals(v2))
self.assertTrue(v.identical(v2))
self.assertTrue(v.no_conflicts(v2))
self.assertTrue(v[0].equals(v2[0]))
self.assertTrue(v[0].identical(v2[0]))
self.assertTrue(v[0].no_conflicts(v2[0]))
self.assertTrue(v[:2].equals(v2[:2]))
self.assertTrue(v[:2].identical(v2[:2]))
self.assertTrue(v[:2].no_conflicts(v2[:2]))
def test_eq_all_dtypes(self):
# ensure that we don't choke on comparisons for which numpy returns
# scalars
expected = Variable('x', 3 * [False])
for v, _ in self.example_1d_objects():
actual = 'z' == v
self.assertVariableIdentical(expected, actual)
actual = ~('z' != v)
self.assertVariableIdentical(expected, actual)
def test_encoding_preserved(self):
expected = self.cls('x', range(3), {'foo': 1}, {'bar': 2})
for actual in [expected.T,
expected[...],
expected.squeeze(),
expected.isel(x=slice(None)),
expected.set_dims({'x': 3}),
expected.copy(deep=True),
expected.copy(deep=False)]:
self.assertVariableIdentical(expected.to_base_variable(),
actual.to_base_variable())
self.assertEqual(expected.encoding, actual.encoding)
def test_concat(self):
x = np.arange(5)
y = np.arange(5, 10)
v = self.cls(['a'], x)
w = self.cls(['a'], y)
self.assertVariableIdentical(Variable(['b', 'a'], np.array([x, y])),
Variable.concat([v, w], 'b'))
self.assertVariableIdentical(Variable(['b', 'a'], np.array([x, y])),
Variable.concat((v, w), 'b'))
self.assertVariableIdentical(Variable(['b', 'a'], np.array([x, y])),
Variable.concat((v, w), 'b'))
with self.assertRaisesRegexp(ValueError, 'inconsistent dimensions'):
Variable.concat([v, Variable(['c'], y)], 'b')
# test indexers
actual = Variable.concat(
[v, w],
positions=[np.arange(0, 10, 2), np.arange(1, 10, 2)],
dim='a')
expected = Variable('a', np.array([x, y]).ravel(order='F'))
self.assertVariableIdentical(expected, actual)
# test concatenating along a dimension
v = Variable(['time', 'x'], np.random.random((10, 8)))
self.assertVariableIdentical(v, Variable.concat([v[:5], v[5:]], 'time'))
self.assertVariableIdentical(v, Variable.concat([v[:5], v[5:6], v[6:]], 'time'))
self.assertVariableIdentical(v, Variable.concat([v[:1], v[1:]], 'time'))
# test dimension order
self.assertVariableIdentical(v, Variable.concat([v[:, :5], v[:, 5:]], 'x'))
with self.assertRaisesRegexp(ValueError, 'all input arrays must have'):
Variable.concat([v[:, 0], v[:, 1:]], 'x')
def test_concat_attrs(self):
# different or conflicting attributes should be removed
v = self.cls('a', np.arange(5), {'foo': 'bar'})
w = self.cls('a', np.ones(5))
expected = self.cls('a', np.concatenate([np.arange(5), np.ones(5)])).to_base_variable()
self.assertVariableIdentical(expected, Variable.concat([v, w], 'a'))
w.attrs['foo'] = 2
self.assertVariableIdentical(expected, Variable.concat([v, w], 'a'))
w.attrs['foo'] = 'bar'
expected.attrs['foo'] = 'bar'
self.assertVariableIdentical(expected, Variable.concat([v, w], 'a'))
def test_concat_fixed_len_str(self):
# regression test for #217
for kind in ['S', 'U']:
x = self.cls('animal', np.array(['horse'], dtype=kind))
y = self.cls('animal', np.array(['aardvark'], dtype=kind))
actual = Variable.concat([x, y], 'animal')
expected = Variable(
'animal', np.array(['horse', 'aardvark'], dtype=kind))
self.assertVariableEqual(expected, actual)
def test_concat_number_strings(self):
# regression test for #305
a = self.cls('x', ['0', '1', '2'])
b = self.cls('x', ['3', '4'])
actual = Variable.concat([a, b], dim='x')
expected = Variable('x', np.arange(5).astype(str).astype(object))
self.assertVariableIdentical(expected, actual)
self.assertEqual(expected.dtype, object)
self.assertEqual(type(expected.values[0]), str)
def test_copy(self):
v = self.cls('x', 0.5 * np.arange(10), {'foo': 'bar'})
for deep in [True, False]:
w = v.copy(deep=deep)
self.assertIs(type(v), type(w))
self.assertVariableIdentical(v, w)
self.assertEqual(v.dtype, w.dtype)
if self.cls is Variable:
if deep:
self.assertIsNot(source_ndarray(v.values),
source_ndarray(w.values))
else:
self.assertIs(source_ndarray(v.values),
source_ndarray(w.values))
self.assertVariableIdentical(v, copy(v))
def test_copy_index(self):
midx = pd.MultiIndex.from_product([['a', 'b'], [1, 2], [-1, -2]],
names=('one', 'two', 'three'))
v = self.cls('x', midx)
for deep in [True, False]:
w = v.copy(deep=deep)
self.assertIsInstance(w._data, PandasIndexAdapter)
self.assertIsInstance(w.to_index(), pd.MultiIndex)
self.assertArrayEqual(v._data.array, w._data.array)
def test_real_and_imag(self):
v = self.cls('x', np.arange(3) - 1j * np.arange(3), {'foo': 'bar'})
expected_re = self.cls('x', np.arange(3), {'foo': 'bar'})
self.assertVariableIdentical(v.real, expected_re)
expected_im = self.cls('x', -np.arange(3), {'foo': 'bar'})
self.assertVariableIdentical(v.imag, expected_im)
expected_abs = self.cls('x', np.sqrt(2 * np.arange(3) ** 2)).to_base_variable()
self.assertVariableAllClose(abs(v), expected_abs)
def test_aggregate_complex(self):
# should skip NaNs
v = self.cls('x', [1, 2j, np.nan])
expected = Variable((), 0.5 + 1j)
self.assertVariableAllClose(v.mean(), expected)
def test_pandas_cateogrical_dtype(self):
data = pd.Categorical(np.arange(10, dtype='int64'))
v = self.cls('x', data)
print(v) # should not error
assert v.dtype == 'int64'
def test_pandas_datetime64_with_tz(self):
data = pd.date_range(start='2000-01-01',
tz=pytz.timezone('America/New_York'),
periods=10, freq='1h')
v = self.cls('x', data)
print(v) # should not error
if 'America/New_York' in str(data.dtype):
# pandas is new enough that it has datetime64 with timezone dtype
assert v.dtype == 'object'
def test_multiindex(self):
idx = pd.MultiIndex.from_product([list('abc'), [0, 1]])
v = self.cls('x', idx)
self.assertVariableIdentical(Variable((), ('a', 0)), v[0])
self.assertVariableIdentical(v, v[:])
def test_load(self):
array = self.cls('x', np.arange(5))
orig_data = array._data
copied = array.copy(deep=True)
array.load()
assert type(array._data) is type(orig_data)
assert type(copied._data) is type(orig_data)
self.assertVariableIdentical(array, copied)
class TestVariable(TestCase, VariableSubclassTestCases):
cls = staticmethod(Variable)
def setUp(self):
self.d = np.random.random((10, 3)).astype(np.float64)
def test_data_and_values(self):
v = Variable(['time', 'x'], self.d)
self.assertArrayEqual(v.data, self.d)
self.assertArrayEqual(v.values, self.d)
self.assertIs(source_ndarray(v.values), self.d)
with self.assertRaises(ValueError):
# wrong size
v.values = np.random.random(5)
d2 = np.random.random((10, 3))
v.values = d2
self.assertIs(source_ndarray(v.values), d2)
d3 = np.random.random((10, 3))
v.data = d3
self.assertIs(source_ndarray(v.data), d3)
def test_numpy_same_methods(self):
v = Variable([], np.float32(0.0))
self.assertEqual(v.item(), 0)
self.assertIs(type(v.item()), float)
v = IndexVariable('x', np.arange(5))
self.assertEqual(2, v.searchsorted(2))
def test_datetime64_conversion_scalar(self):
expected = np.datetime64('2000-01-01T00:00:00Z', 'ns')
for values in [
np.datetime64('2000-01-01T00Z'),
pd.Timestamp('2000-01-01T00'),
datetime(2000, 1, 1),
]:
v = Variable([], values)
self.assertEqual(v.dtype, np.dtype('datetime64[ns]'))
self.assertEqual(v.values, expected)
self.assertEqual(v.values.dtype, np.dtype('datetime64[ns]'))
def test_timedelta64_conversion_scalar(self):
expected = np.timedelta64(24 * 60 * 60 * 10 ** 9, 'ns')
for values in [
np.timedelta64(1, 'D'),
pd.Timedelta('1 day'),
timedelta(days=1),
]:
v = Variable([], values)
self.assertEqual(v.dtype, np.dtype('timedelta64[ns]'))
self.assertEqual(v.values, expected)
self.assertEqual(v.values.dtype, np.dtype('timedelta64[ns]'))
def test_0d_str(self):
v = Variable([], u'foo')
self.assertEqual(v.dtype, np.dtype('U3'))
self.assertEqual(v.values, 'foo')
v = Variable([], np.string_('foo'))
self.assertEqual(v.dtype, np.dtype('S3'))
self.assertEqual(v.values, bytes('foo', 'ascii') if PY3 else 'foo')
def test_0d_datetime(self):
v = Variable([], pd.Timestamp('2000-01-01'))
self.assertEqual(v.dtype, np.dtype('datetime64[ns]'))
self.assertEqual(v.values, np.datetime64('2000-01-01T00Z', 'ns'))
def test_0d_timedelta(self):
for td in [pd.to_timedelta('1s'), np.timedelta64(1, 's')]:
v = Variable([], td)
self.assertEqual(v.dtype, np.dtype('timedelta64[ns]'))
self.assertEqual(v.values, np.timedelta64(10 ** 9, 'ns'))
def test_equals_and_identical(self):
d = np.random.rand(10, 3)
d[0, 0] = np.nan
v1 = Variable(('dim1', 'dim2'), data=d,
attrs={'att1': 3, 'att2': [1, 2, 3]})
v2 = Variable(('dim1', 'dim2'), data=d,
attrs={'att1': 3, 'att2': [1, 2, 3]})
self.assertTrue(v1.equals(v2))
self.assertTrue(v1.identical(v2))
v3 = Variable(('dim1', 'dim3'), data=d)
self.assertFalse(v1.equals(v3))
v4 = Variable(('dim1', 'dim2'), data=d)
self.assertTrue(v1.equals(v4))
self.assertFalse(v1.identical(v4))
v5 = deepcopy(v1)
v5.values[:] = np.random.rand(10, 3)
self.assertFalse(v1.equals(v5))
self.assertFalse(v1.equals(None))
self.assertFalse(v1.equals(d))
self.assertFalse(v1.identical(None))
self.assertFalse(v1.identical(d))
def test_broadcast_equals(self):
v1 = Variable((), np.nan)
v2 = Variable(('x'), [np.nan, np.nan])
self.assertTrue(v1.broadcast_equals(v2))
self.assertFalse(v1.equals(v2))
self.assertFalse(v1.identical(v2))
v3 = Variable(('x'), [np.nan])
self.assertTrue(v1.broadcast_equals(v3))
self.assertFalse(v1.equals(v3))
self.assertFalse(v1.identical(v3))
self.assertFalse(v1.broadcast_equals(None))
v4 = Variable(('x'), [np.nan] * 3)
self.assertFalse(v2.broadcast_equals(v4))
def test_no_conflicts(self):
v1 = Variable(('x'), [1, 2, np.nan, np.nan])
v2 = Variable(('x'), [np.nan, 2, 3, np.nan])
self.assertTrue(v1.no_conflicts(v2))
self.assertFalse(v1.equals(v2))
self.assertFalse(v1.broadcast_equals(v2))
self.assertFalse(v1.identical(v2))
self.assertFalse(v1.no_conflicts(None))
v3 = Variable(('y'), [np.nan, 2, 3, np.nan])
self.assertFalse(v3.no_conflicts(v1))
d = np.array([1, 2, np.nan, np.nan])
self.assertFalse(v1.no_conflicts(d))
self.assertFalse(v2.no_conflicts(d))
v4 = Variable(('w', 'x'), [d])
self.assertTrue(v1.no_conflicts(v4))
def test_as_variable(self):
data = np.arange(10)
expected = Variable('x', data)
expected_extra = Variable('x', data, attrs={'myattr': 'val'},
encoding={'scale_factor': 1})
self.assertVariableIdentical(expected, as_variable(expected))
ds = Dataset({'x': expected})
var = as_variable(ds['x']).to_base_variable()
self.assertVariableIdentical(expected, var)
self.assertNotIsInstance(ds['x'], Variable)
self.assertIsInstance(as_variable(ds['x']), Variable)
FakeVariable = namedtuple('FakeVariable', 'values dims')
fake_xarray = FakeVariable(expected.values, expected.dims)
self.assertVariableIdentical(expected, as_variable(fake_xarray))
FakeVariable = namedtuple('FakeVariable', 'data dims')
fake_xarray = FakeVariable(expected.data, expected.dims)
self.assertVariableIdentical(expected, as_variable(fake_xarray))
FakeVariable = namedtuple('FakeVariable',
'data values dims attrs encoding')
fake_xarray = FakeVariable(expected_extra.data, expected_extra.values,
expected_extra.dims, expected_extra.attrs,
expected_extra.encoding)
self.assertVariableIdentical(expected_extra, as_variable(fake_xarray))
xarray_tuple = (expected_extra.dims, expected_extra.values,
expected_extra.attrs, expected_extra.encoding)
self.assertVariableIdentical(expected_extra, as_variable(xarray_tuple))
with self.assertRaisesRegexp(TypeError, 'tuples to convert'):
as_variable(tuple(data))
with self.assertRaisesRegexp(
TypeError, 'without an explicit list of dimensions'):
as_variable(data)
actual = as_variable(data, name='x')
self.assertVariableIdentical(expected.to_index_variable(), actual)
actual = as_variable(0)
expected = Variable([], 0)
self.assertVariableIdentical(expected, actual)
data = np.arange(9).reshape((3, 3))
expected = Variable(('x', 'y'), data)
with self.assertRaisesRegexp(
ValueError, 'without explicit dimension names'):
as_variable(data, name='x')
with self.assertRaisesRegexp(
ValueError, 'has more than 1-dimension'):
as_variable(expected, name='x')
def test_repr(self):
v = Variable(['time', 'x'], [[1, 2, 3], [4, 5, 6]], {'foo': 'bar'})
expected = dedent("""
<xarray.Variable (time: 2, x: 3)>
array([[1, 2, 3],
[4, 5, 6]])
Attributes:
foo: bar
""").strip()
self.assertEqual(expected, repr(v))
def test_repr_lazy_data(self):
v = Variable('x', LazilyIndexedArray(np.arange(2e5)))
self.assertIn('200000 values with dtype', repr(v))
self.assertIsInstance(v._data, LazilyIndexedArray)
def test_items(self):
data = np.random.random((10, 11))
v = Variable(['x', 'y'], data)
# test slicing
self.assertVariableIdentical(v, v[:])
self.assertVariableIdentical(v, v[...])
self.assertVariableIdentical(Variable(['y'], data[0]), v[0])
self.assertVariableIdentical(Variable(['x'], data[:, 0]), v[:, 0])
self.assertVariableIdentical(Variable(['x', 'y'], data[:3, :2]),
v[:3, :2])
# test array indexing
x = Variable(['x'], np.arange(10))
y = Variable(['y'], np.arange(11))
self.assertVariableIdentical(v, v[x.values])
self.assertVariableIdentical(v, v[x])
self.assertVariableIdentical(v[:3], v[x < 3])
self.assertVariableIdentical(v[:, 3:], v[:, y >= 3])
self.assertVariableIdentical(v[:3, 3:], v[x < 3, y >= 3])
self.assertVariableIdentical(v[:3, :2], v[x[:3], y[:2]])
self.assertVariableIdentical(v[:3, :2], v[range(3), range(2)])
# test iteration
for n, item in enumerate(v):
self.assertVariableIdentical(Variable(['y'], data[n]), item)
with self.assertRaisesRegexp(TypeError, 'iteration over a 0-d'):
iter(Variable([], 0))
# test setting
v.values[:] = 0
self.assertTrue(np.all(v.values == 0))
# test orthogonal setting
v[range(10), range(11)] = 1
self.assertArrayEqual(v.values, np.ones((10, 11)))
def test_isel(self):
v = Variable(['time', 'x'], self.d)
self.assertVariableIdentical(v.isel(time=slice(None)), v)
self.assertVariableIdentical(v.isel(time=0), v[0])
self.assertVariableIdentical(v.isel(time=slice(0, 3)), v[:3])
self.assertVariableIdentical(v.isel(x=0), v[:, 0])
with self.assertRaisesRegexp(ValueError, 'do not exist'):
v.isel(not_a_dim=0)
def test_index_0d_numpy_string(self):
# regression test to verify our work around for indexing 0d strings
v = Variable([], np.string_('asdf'))
self.assertVariableIdentical(v[()], v)
v = Variable([], np.unicode_(u'asdf'))
self.assertVariableIdentical(v[()], v)
def test_indexing_0d_unicode(self):
# regression test for GH568
actual = Variable(('x'), [u'tmax'])[0][()]
expected = Variable((), u'tmax')
self.assertVariableIdentical(actual, expected)
def test_shift(self):
v = Variable('x', [1, 2, 3, 4, 5])
self.assertVariableIdentical(v, v.shift(x=0))
self.assertIsNot(v, v.shift(x=0))
expected = Variable('x', [np.nan, 1, 2, 3, 4])
self.assertVariableIdentical(expected, v.shift(x=1))
expected = Variable('x', [np.nan, np.nan, 1, 2, 3])
self.assertVariableIdentical(expected, v.shift(x=2))
expected = Variable('x', [2, 3, 4, 5, np.nan])
self.assertVariableIdentical(expected, v.shift(x=-1))
expected = Variable('x', [np.nan] * 5)
self.assertVariableIdentical(expected, v.shift(x=5))
self.assertVariableIdentical(expected, v.shift(x=6))
with self.assertRaisesRegexp(ValueError, 'dimension'):
v.shift(z=0)
v = Variable('x', [1, 2, 3, 4, 5], {'foo': 'bar'})
self.assertVariableIdentical(v, v.shift(x=0))
expected = Variable('x', [np.nan, 1, 2, 3, 4], {'foo': 'bar'})
self.assertVariableIdentical(expected, v.shift(x=1))
def test_shift2d(self):
v = Variable(('x', 'y'), [[1, 2], [3, 4]])
expected = Variable(('x', 'y'), [[np.nan, np.nan], [np.nan, 1]])
self.assertVariableIdentical(expected, v.shift(x=1, y=1))
def test_roll(self):
v = Variable('x', [1, 2, 3, 4, 5])
self.assertVariableIdentical(v, v.roll(x=0))
self.assertIsNot(v, v.roll(x=0))
expected = Variable('x', [5, 1, 2, 3, 4])
self.assertVariableIdentical(expected, v.roll(x=1))
self.assertVariableIdentical(expected, v.roll(x=-4))
self.assertVariableIdentical(expected, v.roll(x=6))
expected = Variable('x', [4, 5, 1, 2, 3])
self.assertVariableIdentical(expected, v.roll(x=2))
self.assertVariableIdentical(expected, v.roll(x=-3))
with self.assertRaisesRegexp(ValueError, 'dimension'):
v.roll(z=0)
def test_roll_consistency(self):
v = Variable(('x', 'y'), np.random.randn(5, 6))
for axis, dim in [(0, 'x'), (1, 'y')]:
for shift in [-3, 0, 1, 7, 11]:
expected = np.roll(v.values, shift, axis=axis)
actual = v.roll(**{dim: shift}).values
self.assertArrayEqual(expected, actual)
def test_transpose(self):
v = Variable(['time', 'x'], self.d)
v2 = Variable(['x', 'time'], self.d.T)
self.assertVariableIdentical(v, v2.transpose())
self.assertVariableIdentical(v.transpose(), v.T)
x = np.random.randn(2, 3, 4, 5)
w = Variable(['a', 'b', 'c', 'd'], x)
w2 = Variable(['d', 'b', 'c', 'a'], np.einsum('abcd->dbca', x))
self.assertEqual(w2.shape, (5, 3, 4, 2))
self.assertVariableIdentical(w2, w.transpose('d', 'b', 'c', 'a'))
self.assertVariableIdentical(w, w2.transpose('a', 'b', 'c', 'd'))
w3 = Variable(['b', 'c', 'd', 'a'], np.einsum('abcd->bcda', x))
self.assertVariableIdentical(w, w3.transpose('a', 'b', 'c', 'd'))
def test_transpose_0d(self):
for value in [
3.5,
('a', 1),
np.datetime64('2000-01-01'),
np.timedelta64(1, 'h'),
None,
object(),
]:
variable = Variable([], value)
actual = variable.transpose()
assert actual.identical(variable)
def test_squeeze(self):
v = Variable(['x', 'y'], [[1]])
self.assertVariableIdentical(Variable([], 1), v.squeeze())
self.assertVariableIdentical(Variable(['y'], [1]), v.squeeze('x'))
self.assertVariableIdentical(Variable(['y'], [1]), v.squeeze(['x']))
self.assertVariableIdentical(Variable(['x'], [1]), v.squeeze('y'))
self.assertVariableIdentical(Variable([], 1), v.squeeze(['x', 'y']))
v = Variable(['x', 'y'], [[1, 2]])
self.assertVariableIdentical(Variable(['y'], [1, 2]), v.squeeze())
self.assertVariableIdentical(Variable(['y'], [1, 2]), v.squeeze('x'))
with self.assertRaisesRegexp(ValueError, 'cannot select a dimension'):
v.squeeze('y')
def test_get_axis_num(self):
v = Variable(['x', 'y', 'z'], np.random.randn(2, 3, 4))
self.assertEqual(v.get_axis_num('x'), 0)
self.assertEqual(v.get_axis_num(['x']), (0,))
self.assertEqual(v.get_axis_num(['x', 'y']), (0, 1))
self.assertEqual(v.get_axis_num(['z', 'y', 'x']), (2, 1, 0))
with self.assertRaisesRegexp(ValueError, 'not found in array dim'):
v.get_axis_num('foobar')
def test_set_dims(self):
v = Variable(['x'], [0, 1])
actual = v.set_dims(['x', 'y'])
expected = Variable(['x', 'y'], [[0], [1]])
self.assertVariableIdentical(actual, expected)
actual = v.set_dims(['y', 'x'])
self.assertVariableIdentical(actual, expected.T)
actual = v.set_dims(OrderedDict([('x', 2), ('y', 2)]))
expected = Variable(['x', 'y'], [[0, 0], [1, 1]])
self.assertVariableIdentical(actual, expected)
v = Variable(['foo'], [0, 1])
actual = v.set_dims('foo')
expected = v
self.assertVariableIdentical(actual, expected)
with self.assertRaisesRegexp(ValueError, 'must be a superset'):
v.set_dims(['z'])
def test_set_dims_object_dtype(self):
v = Variable([], ('a', 1))
actual = v.set_dims(('x',), (3,))
exp_values = np.empty((3,), dtype=object)
for i in range(3):
exp_values[i] = ('a', 1)
expected = Variable(['x'], exp_values)
assert actual.identical(expected)
def test_stack(self):
v = Variable(['x', 'y'], [[0, 1], [2, 3]], {'foo': 'bar'})
actual = v.stack(z=('x', 'y'))
expected = Variable('z', [0, 1, 2, 3], v.attrs)
self.assertVariableIdentical(actual, expected)
actual = v.stack(z=('x',))
expected = Variable(('y', 'z'), v.data.T, v.attrs)
self.assertVariableIdentical(actual, expected)
actual = v.stack(z=(),)
self.assertVariableIdentical(actual, v)
actual = v.stack(X=('x',), Y=('y',)).transpose('X', 'Y')
expected = Variable(('X', 'Y'), v.data, v.attrs)
self.assertVariableIdentical(actual, expected)
def test_stack_errors(self):
v = Variable(['x', 'y'], [[0, 1], [2, 3]], {'foo': 'bar'})
with self.assertRaisesRegexp(ValueError, 'invalid existing dim'):
v.stack(z=('x1',))
with self.assertRaisesRegexp(ValueError, 'cannot create a new dim'):
v.stack(x=('x',))
def test_unstack(self):
v = Variable('z', [0, 1, 2, 3], {'foo': 'bar'})
actual = v.unstack(z=OrderedDict([('x', 2), ('y', 2)]))
expected = Variable(('x', 'y'), [[0, 1], [2, 3]], v.attrs)
self.assertVariableIdentical(actual, expected)
actual = v.unstack(z=OrderedDict([('x', 4), ('y', 1)]))
expected = Variable(('x', 'y'), [[0], [1], [2], [3]], v.attrs)
self.assertVariableIdentical(actual, expected)
actual = v.unstack(z=OrderedDict([('x', 4)]))
expected = Variable('x', [0, 1, 2, 3], v.attrs)
self.assertVariableIdentical(actual, expected)
def test_unstack_errors(self):
v = Variable('z', [0, 1, 2, 3])
with self.assertRaisesRegexp(ValueError, 'invalid existing dim'):
v.unstack(foo={'x': 4})
with self.assertRaisesRegexp(ValueError, 'cannot create a new dim'):
v.stack(z=('z',))
with self.assertRaisesRegexp(ValueError, 'the product of the new dim'):
v.unstack(z={'x': 5})
def test_unstack_2d(self):
v = Variable(['x', 'y'], [[0, 1], [2, 3]])
actual = v.unstack(y={'z': 2})
expected = Variable(['x', 'z'], v.data)
self.assertVariableIdentical(actual, expected)
actual = v.unstack(x={'z': 2})
expected = Variable(['y', 'z'], v.data.T)
self.assertVariableIdentical(actual, expected)
def test_stack_unstack_consistency(self):
v = Variable(['x', 'y'], [[0, 1], [2, 3]])
actual = (v.stack(z=('x', 'y'))
.unstack(z=OrderedDict([('x', 2), ('y', 2)])))
self.assertVariableIdentical(actual, v)
def test_broadcasting_math(self):
x = np.random.randn(2, 3)
v = Variable(['a', 'b'], x)
# 1d to 2d broadcasting
self.assertVariableIdentical(
v * v,
Variable(['a', 'b'], np.einsum('ab,ab->ab', x, x)))
self.assertVariableIdentical(
v * v[0],
Variable(['a', 'b'], np.einsum('ab,b->ab', x, x[0])))
self.assertVariableIdentical(
v[0] * v,
Variable(['b', 'a'], np.einsum('b,ab->ba', x[0], x)))
self.assertVariableIdentical(
v[0] * v[:, 0],
Variable(['b', 'a'], np.einsum('b,a->ba', x[0], x[:, 0])))
# higher dim broadcasting
y = np.random.randn(3, 4, 5)
w = Variable(['b', 'c', 'd'], y)
self.assertVariableIdentical(
v * w, Variable(['a', 'b', 'c', 'd'],
np.einsum('ab,bcd->abcd', x, y)))
self.assertVariableIdentical(
w * v, Variable(['b', 'c', 'd', 'a'],
np.einsum('bcd,ab->bcda', y, x)))
self.assertVariableIdentical(
v * w[0], Variable(['a', 'b', 'c', 'd'],
np.einsum('ab,cd->abcd', x, y[0])))
def test_broadcasting_failures(self):
a = Variable(['x'], np.arange(10))
b = Variable(['x'], np.arange(5))
c = Variable(['x', 'x'], np.arange(100).reshape(10, 10))
with self.assertRaisesRegexp(ValueError, 'mismatched lengths'):
a + b
with self.assertRaisesRegexp(ValueError, 'duplicate dimensions'):
a + c
def test_inplace_math(self):
x = np.arange(5)
v = Variable(['x'], x)
v2 = v
v2 += 1
self.assertIs(v, v2)
# since we provided an ndarray for data, it is also modified in-place
self.assertIs(source_ndarray(v.values), x)
self.assertArrayEqual(v.values, np.arange(5) + 1)
with self.assertRaisesRegexp(ValueError, 'dimensions cannot change'):
v += Variable('y', np.arange(5))
def test_reduce(self):
v = Variable(['x', 'y'], self.d, {'ignored': 'attributes'})
self.assertVariableIdentical(v.reduce(np.std, 'x'),
Variable(['y'], self.d.std(axis=0)))
self.assertVariableIdentical(v.reduce(np.std, axis=0),
v.reduce(np.std, dim='x'))
self.assertVariableIdentical(v.reduce(np.std, ['y', 'x']),
Variable([], self.d.std(axis=(0, 1))))
self.assertVariableIdentical(v.reduce(np.std),
Variable([], self.d.std()))
self.assertVariableIdentical(
v.reduce(np.mean, 'x').reduce(np.std, 'y'),
Variable([], self.d.mean(axis=0).std()))
self.assertVariableAllClose(v.mean('x'), v.reduce(np.mean, 'x'))
with self.assertRaisesRegexp(ValueError, 'cannot supply both'):
v.mean(dim='x', axis=0)
@pytest.mark.skipif(LooseVersion(np.__version__) < LooseVersion('1.10.0'),
reason='requires numpy version 1.10.0 or later')
def test_quantile(self):
v = Variable(['x', 'y'], self.d)
for q in [0.25, [0.50], [0.25, 0.75]]:
for axis, dim in zip([None, 0, [0], [0, 1]],
[None, 'x', ['x'], ['x', 'y']]):
actual = v.quantile(q, dim=dim)
expected = np.nanpercentile(self.d, np.array(q) * 100,
axis=axis)
np.testing.assert_allclose(actual.values, expected)
@requires_dask
def test_quantile_dask_raises(self):
# regression for GH1524
v = Variable(['x', 'y'], self.d).chunk(2)
with self.assertRaisesRegexp(TypeError, 'arrays stored as dask'):
v.quantile(0.5, dim='x')
def test_big_endian_reduce(self):
# regression test for GH489
data = np.ones(5, dtype='>f4')
v = Variable(['x'], data)
expected = Variable([], 5)
self.assertVariableIdentical(expected, v.sum())
def test_reduce_funcs(self):
v = Variable('x', np.array([1, np.nan, 2, 3]))
self.assertVariableIdentical(v.mean(), Variable([], 2))
self.assertVariableIdentical(v.mean(skipna=True), Variable([], 2))
self.assertVariableIdentical(v.mean(skipna=False), Variable([], np.nan))
self.assertVariableIdentical(np.mean(v), Variable([], 2))
self.assertVariableIdentical(v.prod(), Variable([], 6))
self.assertVariableIdentical(v.cumsum(axis=0),
Variable('x', np.array([1, 1, 3, 6])))
self.assertVariableIdentical(v.cumprod(axis=0),
Variable('x', np.array([1, 1, 2, 6])))
self.assertVariableIdentical(v.var(), Variable([], 2.0 / 3))
if LooseVersion(np.__version__) < '1.9':
with self.assertRaises(NotImplementedError):
v.median()
else:
self.assertVariableIdentical(v.median(), Variable([], 2))
v = Variable('x', [True, False, False])
self.assertVariableIdentical(v.any(), Variable([], True))
self.assertVariableIdentical(v.all(dim='x'), Variable([], False))
v = Variable('t', pd.date_range('2000-01-01', periods=3))
with self.assertRaises(NotImplementedError):
v.max(skipna=True)
self.assertVariableIdentical(
v.max(), Variable([], pd.Timestamp('2000-01-03')))
def test_reduce_keep_attrs(self):
_attrs = {'units': 'test', 'long_name': 'testing'}
v = Variable(['x', 'y'], self.d, _attrs)
# Test dropped attrs
vm = v.mean()
self.assertEqual(len(vm.attrs), 0)
self.assertEqual(vm.attrs, OrderedDict())
# Test kept attrs
vm = v.mean(keep_attrs=True)
self.assertEqual(len(vm.attrs), len(_attrs))
self.assertEqual(vm.attrs, _attrs)
def test_count(self):
expected = Variable([], 3)
actual = Variable(['x'], [1, 2, 3, np.nan]).count()
self.assertVariableIdentical(expected, actual)
v = Variable(['x'], np.array(['1', '2', '3', np.nan], dtype=object))
actual = v.count()
self.assertVariableIdentical(expected, actual)
actual = Variable(['x'], [True, False, True]).count()
self.assertVariableIdentical(expected, actual)
self.assertEqual(actual.dtype, int)
expected = Variable(['x'], [2, 3])
actual = Variable(['x', 'y'], [[1, 0, np.nan], [1, 1, 1]]).count('y')
self.assertVariableIdentical(expected, actual)
class TestIndexVariable(TestCase, VariableSubclassTestCases):
cls = staticmethod(IndexVariable)
def test_init(self):
with self.assertRaisesRegexp(ValueError, 'must be 1-dimensional'):
IndexVariable((), 0)
def test_to_index(self):
data = 0.5 * np.arange(10)
v = IndexVariable(['time'], data, {'foo': 'bar'})
self.assertTrue(pd.Index(data, name='time').identical(v.to_index()))
def test_multiindex_default_level_names(self):
midx = pd.MultiIndex.from_product([['a', 'b'], [1, 2]])
v = IndexVariable(['x'], midx, {'foo': 'bar'})
self.assertEqual(v.to_index().names, ('x_level_0', 'x_level_1'))
def test_data(self):
x = IndexVariable('x', np.arange(3.0))
self.assertIsInstance(x._data, PandasIndexAdapter)
self.assertIsInstance(x.data, np.ndarray)
self.assertEqual(float, x.dtype)
self.assertArrayEqual(np.arange(3), x)
self.assertEqual(float, x.values.dtype)
with self.assertRaisesRegexp(TypeError, 'cannot be modified'):
x[:] = 0
def test_name(self):
coord = IndexVariable('x', [10.0])
self.assertEqual(coord.name, 'x')
with self.assertRaises(AttributeError):
coord.name = 'y'
def test_level_names(self):
midx = pd.MultiIndex.from_product([['a', 'b'], [1, 2]],
names=['level_1', 'level_2'])
x = IndexVariable('x', midx)
self.assertEqual(x.level_names, midx.names)
self.assertIsNone(IndexVariable('y', [10.0]).level_names)
def test_get_level_variable(self):
midx = pd.MultiIndex.from_product([['a', 'b'], [1, 2]],
names=['level_1', 'level_2'])
x = IndexVariable('x', midx)
level_1 = IndexVariable('x', midx.get_level_values('level_1'))
self.assertVariableIdentical(x.get_level_variable('level_1'), level_1)
with self.assertRaisesRegexp(ValueError, 'has no MultiIndex'):
IndexVariable('y', [10.0]).get_level_variable('level')
def test_concat_periods(self):
periods = pd.period_range('2000-01-01', periods=10)
coords = [IndexVariable('t', periods[:5]), IndexVariable('t', periods[5:])]
expected = IndexVariable('t', periods)
actual = IndexVariable.concat(coords, dim='t')
assert actual.identical(expected)
assert isinstance(actual.to_index(), pd.PeriodIndex)
positions = [list(range(5)), list(range(5, 10))]
actual = IndexVariable.concat(coords, dim='t', positions=positions)
assert actual.identical(expected)
assert isinstance(actual.to_index(), pd.PeriodIndex)
def test_concat_multiindex(self):
idx = pd.MultiIndex.from_product([[0, 1, 2], ['a', 'b']])
coords = [IndexVariable('x', idx[:2]), IndexVariable('x', idx[2:])]
expected = IndexVariable('x', idx)
actual = IndexVariable.concat(coords, dim='x')
assert actual.identical(expected)
assert isinstance(actual.to_index(), pd.MultiIndex)
def test_coordinate_alias(self):
with self.assertWarns('deprecated'):
x = Coordinate('x', [1, 2, 3])
self.assertIsInstance(x, IndexVariable)
class TestAsCompatibleData(TestCase):
def test_unchanged_types(self):
types = (np.asarray, PandasIndexAdapter, indexing.LazilyIndexedArray)
for t in types:
for data in [np.arange(3),
pd.date_range('2000-01-01', periods=3),
pd.date_range('2000-01-01', periods=3).values]:
x = t(data)
self.assertIs(source_ndarray(x),
source_ndarray(as_compatible_data(x)))
def test_converted_types(self):
for input_array in [[[0, 1, 2]], pd.DataFrame([[0, 1, 2]])]:
actual = as_compatible_data(input_array)
self.assertArrayEqual(np.asarray(input_array), actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.asarray(input_array).dtype, actual.dtype)
def test_masked_array(self):
original = np.ma.MaskedArray(np.arange(5))
expected = np.arange(5)
actual = as_compatible_data(original)
self.assertArrayEqual(expected, actual)
self.assertEqual(np.dtype(int), actual.dtype)
original = np.ma.MaskedArray(np.arange(5), mask=4 * [False] + [True])
expected = np.arange(5.0)
expected[-1] = np.nan
actual = as_compatible_data(original)
self.assertArrayEqual(expected, actual)
self.assertEqual(np.dtype(float), actual.dtype)
def test_datetime(self):
expected = np.datetime64('2000-01-01T00Z')
actual = as_compatible_data(expected)
self.assertEqual(expected, actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype)
expected = np.array([np.datetime64('2000-01-01T00Z')])
actual = as_compatible_data(expected)
self.assertEqual(np.asarray(expected), actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype)
expected = np.array([np.datetime64('2000-01-01T00Z', 'ns')])
actual = as_compatible_data(expected)
self.assertEqual(np.asarray(expected), actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype)
self.assertIs(expected, source_ndarray(np.asarray(actual)))
expected = np.datetime64('2000-01-01T00Z', 'ns')
actual = as_compatible_data(datetime(2000, 1, 1))
self.assertEqual(np.asarray(expected), actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype)
def test_full_like(self):
# For more thorough tests, see test_variable.py
orig = Variable(dims=('x', 'y'), data=[[1.5 ,2.0], [3.1, 4.3]],
attrs={'foo': 'bar'})
expect = orig.copy(deep=True)
expect.values = [[2.0, 2.0], [2.0, 2.0]]
self.assertVariableIdentical(expect, full_like(orig, 2))
# override dtype
expect.values = [[True, True], [True, True]]
self.assertEquals(expect.dtype, bool)
self.assertVariableIdentical(expect, full_like(orig, True, dtype=bool))
@requires_dask
def test_full_like_dask(self):
orig = Variable(dims=('x', 'y'), data=[[1.5, 2.0], [3.1, 4.3]],
attrs={'foo': 'bar'}).chunk(((1, 1), (2,)))
def check(actual, expect_dtype, expect_values):
self.assertEqual(actual.dtype, expect_dtype)
self.assertEqual(actual.shape, orig.shape)
self.assertEqual(actual.dims, orig.dims)
self.assertEqual(actual.attrs, orig.attrs)
self.assertEqual(actual.chunks, orig.chunks)
self.assertArrayEqual(actual.values, expect_values)
check(full_like(orig, 2),
orig.dtype, np.full_like(orig.values, 2))
# override dtype
check(full_like(orig, True, dtype=bool),
bool, np.full_like(orig.values, True, dtype=bool))
# Check that there's no array stored inside dask
# (e.g. we didn't create a numpy array and then we chunked it!)
dsk = full_like(orig, 1).data.dask
for v in dsk.values():
if isinstance(v, tuple):
for vi in v:
assert not isinstance(vi, np.ndarray)
else:
assert not isinstance(v, np.ndarray)
def test_zeros_like(self):
orig = Variable(dims=('x', 'y'), data=[[1.5 ,2.0], [3.1, 4.3]],
attrs={'foo': 'bar'})
self.assertVariableIdentical(zeros_like(orig),
full_like(orig, 0))
self.assertVariableIdentical(zeros_like(orig, dtype=int),
full_like(orig, 0, dtype=int))
def test_ones_like(self):
orig = Variable(dims=('x', 'y'), data=[[1.5 ,2.0], [3.1, 4.3]],
attrs={'foo': 'bar'})
self.assertVariableIdentical(ones_like(orig),
full_like(orig, 1))
self.assertVariableIdentical(ones_like(orig, dtype=int),
full_like(orig, 1, dtype=int))
|
apache-2.0
|
maxamillion/ansible-modules-extras
|
cloud/amazon/lambda_alias.py
|
25
|
12180
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
try:
import boto3
from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
DOCUMENTATION = '''
---
module: lambda_alias
short_description: Creates, updates or deletes AWS Lambda function aliases.
description:
- This module allows the management of AWS Lambda functions aliases via the Ansible
framework. It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda function
itself and M(lambda_event) to manage event source mappings.
version_added: "2.2"
author: Pierre Jodouin (@pjodouin), Ryan Scott Brown (@ryansb)
options:
function_name:
description:
- The name of the function alias.
required: true
state:
description:
- Describes the desired state.
required: true
default: "present"
choices: ["present", "absent"]
name:
description:
- Name of the function alias.
required: true
aliases: ['alias_name']
description:
description:
- A short, user-defined function alias description.
required: false
version:
description:
- Version associated with the Lambda function alias.
A value of 0 (or omitted parameter) sets the alias to the $LATEST version.
required: false
aliases: ['function_version']
requirements:
- boto3
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
---
# Simple example to create a lambda function and publish a version
- hosts: localhost
gather_facts: no
vars:
state: present
project_folder: /path/to/deployment/package
deployment_package: lambda.zip
account: 123456789012
production_version: 5
tasks:
- name: AWS Lambda Function
lambda:
state: "{{ state | default('present') }}"
name: myLambdaFunction
publish: True
description: lambda function description
code_s3_bucket: package-bucket
code_s3_key: "lambda/{{ deployment_package }}"
local_path: "{{ project_folder }}/{{ deployment_package }}"
runtime: python2.7
timeout: 5
handler: lambda.handler
memory_size: 128
role: "arn:aws:iam::{{ account }}:role/API2LambdaExecRole"
- name: show results
debug: var=lambda_facts
# The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0)
- name: "alias 'Dev' for function {{ lambda_facts.FunctionName }} "
lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_facts.FunctionName }}"
name: Dev
description: Development is $LATEST version
# The QA alias will only be created when a new version is published (i.e. not = '$LATEST')
- name: "alias 'QA' for function {{ lambda_facts.FunctionName }} "
lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_facts.FunctionName }}"
name: QA
version: "{{ lambda_facts.Version }}"
description: "QA is version {{ lambda_facts.Version }}"
when: lambda_facts.Version != "$LATEST"
# The Prod alias will have a fixed version based on a variable
- name: "alias 'Prod' for function {{ lambda_facts.FunctionName }} "
lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_facts.FunctionName }}"
name: Prod
version: "{{ production_version }}"
description: "Production is version {{ production_version }}"
'''
RETURN = '''
---
alias_arn:
description: Full ARN of the function, including the alias
returned: success
type: string
sample: arn:aws:lambda:us-west-2:123456789012:function:myFunction:dev
description:
description: A short description of the alias
returned: success
type: string
sample: The development stage for my hot new app
function_version:
description: The qualifier that the alias refers to
returned: success
type: string
sample: $LATEST
name:
description: The name of the alias assigned
returned: success
type: string
sample: dev
'''
class AWSConnection:
"""
Create the connection object and client objects as required.
"""
def __init__(self, ansible_obj, resources, boto3=True):
try:
self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=boto3)
self.resource_client = dict()
if not resources:
resources = ['lambda']
resources.append('iam')
for resource in resources:
aws_connect_kwargs.update(dict(region=self.region,
endpoint=self.endpoint,
conn_type='client',
resource=resource
))
self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
# if region is not provided, then get default profile/session region
if not self.region:
self.region = self.resource_client['lambda'].meta.region_name
except (ClientError, ParamValidationError, MissingParametersError) as e:
ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
try:
self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
except (ClientError, ValueError, KeyError, IndexError):
self.account_id = ''
def client(self, resource='lambda'):
return self.resource_client[resource]
def pc(key):
"""
Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
:param key:
:return:
"""
return "".join([token.capitalize() for token in key.split('_')])
def set_api_params(module, module_params):
"""
Sets module parameters to those expected by the boto3 API.
:param module:
:param module_params:
:return:
"""
api_params = dict()
for param in module_params:
module_param = module.params.get(param, None)
if module_param:
api_params[pc(param)] = module_param
return api_params
def validate_params(module, aws):
"""
Performs basic parameter validation.
:param module: Ansible module reference
:param aws: AWS client connection
:return:
"""
function_name = module.params['function_name']
# validate function name
if not re.search('^[\w\-:]+$', function_name):
module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
)
if len(function_name) > 64:
module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
# if parameter 'function_version' is zero, set it to $LATEST, else convert it to a string
if module.params['function_version'] == 0:
module.params['function_version'] = '$LATEST'
else:
module.params['function_version'] = str(module.params['function_version'])
return
def get_lambda_alias(module, aws):
"""
Returns the lambda function alias if it exists.
:param module: Ansible module reference
:param aws: AWS client connection
:return:
"""
client = aws.client('lambda')
# set API parameters
api_params = set_api_params(module, ('function_name', 'name'))
# check if alias exists and get facts
try:
results = client.get_alias(**api_params)
except (ClientError, ParamValidationError, MissingParametersError) as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
results = None
else:
module.fail_json(msg='Error retrieving function alias: {0}'.format(e))
return results
def lambda_alias(module, aws):
"""
Adds, updates or deletes lambda function aliases.
:param module: Ansible module reference
:param aws: AWS client connection
:return dict:
"""
client = aws.client('lambda')
results = dict()
changed = False
current_state = 'absent'
state = module.params['state']
facts = get_lambda_alias(module, aws)
if facts:
current_state = 'present'
if state == 'present':
if current_state == 'present':
# check if alias has changed -- only version and description can change
alias_params = ('function_version', 'description')
for param in alias_params:
if module.params.get(param) != facts.get(pc(param)):
changed = True
break
if changed:
api_params = set_api_params(module, ('function_name', 'name'))
api_params.update(set_api_params(module, alias_params))
if not module.check_mode:
try:
results = client.update_alias(**api_params)
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error updating function alias: {0}'.format(e))
else:
# create new function alias
api_params = set_api_params(module, ('function_name', 'name', 'function_version', 'description'))
try:
if not module.check_mode:
results = client.create_alias(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error creating function alias: {0}'.format(e))
else: # state = 'absent'
if current_state == 'present':
# delete the function
api_params = set_api_params(module, ('function_name', 'name'))
try:
if not module.check_mode:
results = client.delete_alias(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error deleting function alias: {0}'.format(e))
return dict(changed=changed, **dict(results or facts))
def main():
"""
Main entry point.
:return dict: ansible facts
"""
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
function_name=dict(required=True, default=None),
name=dict(required=True, default=None, aliases=['alias_name']),
function_version=dict(type='int', required=False, default=0, aliases=['version']),
description=dict(required=False, default=None),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[],
required_together=[]
)
# validate dependencies
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module.')
aws = AWSConnection(module, ['lambda'])
validate_params(module, aws)
results = lambda_alias(module, aws)
module.exit_json(**camel_dict_to_snake_dict(results))
# ansible import module(s) kept at ~eof as recommended
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
westinedu/similarinterest
|
django/conf/locale/nl/formats.py
|
329
|
3056
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y' # '20 januari 2009'
TIME_FORMAT = 'H:i' # '15:23'
DATETIME_FORMAT = 'j F Y H:i' # '20 januari 2009 15:23'
YEAR_MONTH_FORMAT = 'F Y' # 'januari 2009'
MONTH_DAY_FORMAT = 'j F' # '20 januari'
SHORT_DATE_FORMAT = 'j-n-Y' # '20-1-2009'
SHORT_DATETIME_FORMAT = 'j-n-Y H:i' # '20-1-2009 15:23'
FIRST_DAY_OF_WEEK = 1 # Monday (in Dutch 'maandag')
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d-%m-%Y', '%d-%m-%y', '%Y-%m-%d', # '20-01-2009', '20-01-09', '2009-01-20'
# '%d %b %Y', '%d %b %y', # '20 jan 2009', '20 jan 09'
# '%d %B %Y', '%d %B %y', # '20 januari 2009', '20 januari 09'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '15:23:35'
'%H.%M:%S', # '15.23:35'
'%H.%M', # '15.23'
'%H:%M', # '15:23'
)
DATETIME_INPUT_FORMATS = (
# With time in %H:%M:%S :
'%d-%m-%Y %H:%M:%S', '%d-%m-%y %H:%M:%S', '%Y-%m-%d %H:%M:%S', # '20-01-2009 15:23:35', '20-01-09 15:23:35', '2009-01-20 15:23:35'
# '%d %b %Y %H:%M:%S', '%d %b %y %H:%M:%S', # '20 jan 2009 15:23:35', '20 jan 09 15:23:35'
# '%d %B %Y %H:%M:%S', '%d %B %y %H:%M:%S', # '20 januari 2009 15:23:35', '20 januari 2009 15:23:35'
# With time in %H.%M:%S :
'%d-%m-%Y %H.%M:%S', '%d-%m-%y %H.%M:%S', # '20-01-2009 15.23:35', '20-01-09 15.23:35'
# '%d %b %Y %H.%M:%S', '%d %b %y %H.%M:%S', # '20 jan 2009 15.23:35', '20 jan 09 15.23:35'
# '%d %B %Y %H.%M:%S', '%d %B %y %H.%M:%S', # '20 januari 2009 15.23:35', '20 januari 2009 15.23:35'
# With time in %H:%M :
'%d-%m-%Y %H:%M', '%d-%m-%y %H:%M', '%Y-%m-%d %H:%M', # '20-01-2009 15:23', '20-01-09 15:23', '2009-01-20 15:23'
# '%d %b %Y %H:%M', '%d %b %y %H:%M', # '20 jan 2009 15:23', '20 jan 09 15:23'
# '%d %B %Y %H:%M', '%d %B %y %H:%M', # '20 januari 2009 15:23', '20 januari 2009 15:23'
# With time in %H.%M :
'%d-%m-%Y %H.%M', '%d-%m-%y %H.%M', # '20-01-2009 15.23', '20-01-09 15.23'
# '%d %b %Y %H.%M', '%d %b %y %H.%M', # '20 jan 2009 15.23', '20 jan 09 15.23'
# '%d %B %Y %H.%M', '%d %B %y %H.%M', # '20 januari 2009 15.23', '20 januari 2009 15.23'
# Without time :
'%d-%m-%Y', '%d-%m-%y', '%Y-%m-%d', # '20-01-2009', '20-01-09', '2009-01-20'
# '%d %b %Y', '%d %b %y', # '20 jan 2009', '20 jan 09'
# '%d %B %Y', '%d %B %y', # '20 januari 2009', '20 januari 2009'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
bsd-3-clause
|
adammaikai/OmicsPipe2.0
|
omics_pipe/modules/bwa.py
|
2
|
6757
|
#!/usr/bin/env python
from omics_pipe.parameters.default_parameters import default_parameters
from omics_pipe.utils import *
p = Bunch(default_parameters)
def bwa1(sample, bwa1_flag):
'''BWA aligner for read1 of paired_end reads.
input:
.fastq
output:
.sam
citation:
Li H. and Durbin R. (2009) Fast and accurate short read alignment with Burrows-Wheeler transform. Bioinformatics, 25, 1754-1760. [PMID: 19451168]
link:
http://bio-bwa.sourceforge.net/bwa.shtml
parameters from parameters file:
BWA_RESULTS:
TEMP_DIR:
SAMTOOLS_VERSION:
BWA_VERSION:
BWA_INDEX:
RAW_DATA_DIR:
GATK_READ_GROUP_INFO:
COMPRESSION:
'''
SAMPLE1 = sample + "_1"
spawn_job(jobname = 'bwa1', SAMPLE = SAMPLE1, LOG_PATH = p.LOG_PATH, RESULTS_EMAIL = p.RESULTS_EMAIL, SCHEDULER = p.SCHEDULER, walltime = "240:00:00", queue = p.QUEUE, nodes = 1, ppn = 30, memory = "30gb", script = "/bwa_drmaa_RNA.sh", args_list = [p.BWA_RESULTS, p.TEMP_DIR, p.SAMTOOLS_VERSION, p.BWA_VERSION, p.BWA_INDEX, SAMPLE1, p.RAW_DATA_DIR, p.GATK_READ_GROUP_INFO, p.COMPRESSION])
job_status(jobname = 'bwa1', resultspath = p.BWA_RESULTS, SAMPLE = sample, outputfilename = SAMPLE1 + "/" + SAMPLE1 + ".sam", FLAG_PATH = p.FLAG_PATH)
return
def bwa2(sample, bwa2_flag):
'''BWA aligner for read2 of paired_end reads.
input:
.fastq
output:
.sam
citation:
Li H. and Durbin R. (2009) Fast and accurate short read alignment with Burrows-Wheeler transform. Bioinformatics, 25, 1754-1760. [PMID: 19451168]
link:
http://bio-bwa.sourceforge.net/bwa.shtml
parameters from parameters file:
BWA_RESULTS:
TEMP_DIR:
SAMTOOLS_VERSION:
BWA_VERSION:
BWA_INDEX:
RAW_DATA_DIR:
GATK_READ_GROUP_INFO:
COMPRESSION:
'''
SAMPLE2 = sample + "_2"
spawn_job(jobname = 'bwa2', SAMPLE = SAMPLE2, LOG_PATH = p.LOG_PATH, RESULTS_EMAIL = p.RESULTS_EMAIL, SCHEDULER = p.SCHEDULER, walltime = "240:00:00", queue = p.QUEUE, nodes = 1, ppn = 30, memory = "30gb", script = "/bwa_drmaa_RNA.sh", args_list = [p.BWA_RESULTS, p.TEMP_DIR, p.SAMTOOLS_VERSION, p.BWA_VERSION, p.BWA_INDEX, SAMPLE2, p.RAW_DATA_DIR, p.GATK_READ_GROUP_INFO, p.COMPRESSION])
job_status(jobname = 'bwa2', resultspath = p.BWA_RESULTS, SAMPLE = sample, outputfilename = SAMPLE2 + "/" + SAMPLE2 + ".sam", FLAG_PATH = p.FLAG_PATH)
return
def bwa_RNA(sample, bwa_flag):
'''BWA aligner for single end reads.
input:
.fastq
output:
.sam
citation:
Li H. and Durbin R. (2009) Fast and accurate short read alignment with Burrows-Wheeler transform. Bioinformatics, 25, 1754-1760. [PMID: 19451168]
link:
http://bio-bwa.sourceforge.net/bwa.shtml
parameters from parameters file:
BWA_RESULTS:
TEMP_DIR:
SAMTOOLS_VERSION:
BWA_VERSION:
BWA_INDEX:
RAW_DATA_DIR:
GATK_READ_GROUP_INFO:
COMPRESSION:
'''
spawn_job(jobname = 'bwa', SAMPLE = sample, LOG_PATH = p.LOG_PATH, RESULTS_EMAIL = p.RESULTS_EMAIL, SCHEDULER = p.SCHEDULER, walltime = "240:00:00", queue = p.QUEUE, nodes = 1, ppn = 30, memory = "30gb", script = "/bwa_drmaa_RNA.sh", args_list = [p.BWA_RESULTS, p.TEMP_DIR, p.SAMTOOLS_VERSION, p.BWA_VERSION, p.BWA_INDEX, sample, p.RAW_DATA_DIR, p.GATK_READ_GROUP_INFO, p.COMPRESSION])
job_status(jobname = 'bwa', resultspath = p.BWA_RESULTS, SAMPLE = sample, outputfilename = sample + "/" + sample + ".sam", FLAG_PATH = p.FLAG_PATH)
return
def bwa_mem(sample,bwa_mem_flag):
'''BWA aligner with BWA-MEM algorithm.
input:
.fastq
output:
.sam
citation:
Li H. and Durbin R. (2009) Fast and accurate short read alignment with Burrows-Wheeler transform. Bioinformatics, 25, 1754-1760. [PMID: 19451168]
link:
http://bio-bwa.sourceforge.net/bwa.shtml
parameters from parameters file:
BWA_RESULTS:
TEMP_DIR:
SAMTOOLS_VERSION:
BWA_VERSION:
GENOME:
RAW_DATA_DIR:
BWA_OPTIONS:
COMPRESSION:
'''
spawn_job(jobname = 'bwa_mem', SAMPLE = sample, LOG_PATH = p.LOG_PATH, RESULTS_EMAIL = p.RESULTS_EMAIL, SCHEDULER = p.SCHEDULER, walltime = "240:00:00", queue = p.QUEUE, nodes = 1, ppn = 30, memory = "30gb", script = "/bwa_drmaa_" + p.ENDS + "_DNA.sh", args_list = [p.BWA_RESULTS, p.TEMP_DIR, p.SAMTOOLS_VERSION, p.BWA_VERSION, p.BWA_INDEX, sample, p.RAW_DATA_DIR, p.BWA_OPTIONS, p.COMPRESSION])
job_status(jobname = 'bwa_mem', resultspath = p.BWA_RESULTS, SAMPLE = sample, outputfilename = sample + "/" + sample + "_sorted.bam", FLAG_PATH = p.FLAG_PATH)
return
def bwa_mem_pipe(sample,bwa_mem_pipe_flag):
'''BWA aligner with BWA-MEM algorithm.
input:
.fastq
output:
.sam
citation:
Li H. and Durbin R. (2009) Fast and accurate short read alignment with Burrows-Wheeler transform. Bioinformatics, 25, 1754-1760. [PMID: 19451168]
link:
http://bio-bwa.sourceforge.net/bwa.shtml
parameters from parameters file:
BWA_RESULTS:
TEMP_DIR:
SAMTOOLS_VERSION:
BWA_VERSION:
GENOME:
RAW_DATA_DIR:
BWA_OPTIONS:
COMPRESSION:
SAMBAMBA_VERSION:
SAMBLASTER_VERSION:
SAMBAMBA_OPTIONS:
'''
spawn_job(jobname = 'bwa_mem_pipe', SAMPLE = sample, LOG_PATH = p.LOG_PATH, RESULTS_EMAIL = p.RESULTS_EMAIL, SCHEDULER = p.SCHEDULER, walltime = "240:00:00", queue = p.QUEUE, nodes = 1, ppn = 30, memory = "30gb", script = "/bwa_drmaa_" + p.ENDS + "_DNA_piped.sh", args_list = [p.BWA_RESULTS, p.TEMP_DIR, p.SAMTOOLS_VERSION, p.BWA_VERSION, p.BWA_INDEX, sample, p.RAW_DATA_DIR, p.BWA_OPTIONS, p.COMPRESSION, p.SAMBAMBA_VERSION, p.SAMBLASTER_VERSION, p.SAMBAMBA_OPTIONS])
job_status(jobname = 'bwa_mem_pipe', resultspath = p.BWA_RESULTS, SAMPLE = sample, outputfilename = sample + "/" + sample + "_sorted.bam", FLAG_PATH = p.FLAG_PATH)
return
#(resultspath + "/" + outputfilename)
if __name__ == '__main__':
bwa1(sample, bwa1_flag)
bwa2(sample, bwa2_flag)
bwa_RNA(sample, bwa_flag)
bwa_mem(sample,bwa_mem_flag)
bwa_mem_pipe(sample, bwa_mem_pipe_flag)
sys.exit(0)
|
mit
|
Rafiot/botchallenge
|
client/google/protobuf/internal/cpp_message.py
|
2
|
23568
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains helper functions used to create protocol message classes from
Descriptor objects at runtime backed by the protocol buffer C++ API.
"""
__author__ = '[email protected] (Petar Petrov)'
import copyreg
import operator
from google.protobuf.internal import _net_proto2___python
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import message
import collections
_LABEL_REPEATED = _net_proto2___python.LABEL_REPEATED
_LABEL_OPTIONAL = _net_proto2___python.LABEL_OPTIONAL
_CPPTYPE_MESSAGE = _net_proto2___python.CPPTYPE_MESSAGE
_TYPE_MESSAGE = _net_proto2___python.TYPE_MESSAGE
def GetDescriptorPool():
"""Creates a new DescriptorPool C++ object."""
return _net_proto2___python.NewCDescriptorPool()
_pool = GetDescriptorPool()
def GetFieldDescriptor(full_field_name):
"""Searches for a field descriptor given a full field name."""
return _pool.FindFieldByName(full_field_name)
def BuildFile(content):
"""Registers a new proto file in the underlying C++ descriptor pool."""
_net_proto2___python.BuildFile(content)
def GetExtensionDescriptor(full_extension_name):
"""Searches for extension descriptor given a full field name."""
return _pool.FindExtensionByName(full_extension_name)
def NewCMessage(full_message_name):
"""Creates a new C++ protocol message by its name."""
return _net_proto2___python.NewCMessage(full_message_name)
def ScalarProperty(cdescriptor):
"""Returns a scalar property for the given descriptor."""
def Getter(self):
return self._cmsg.GetScalar(cdescriptor)
def Setter(self, value):
self._cmsg.SetScalar(cdescriptor, value)
return property(Getter, Setter)
def CompositeProperty(cdescriptor, message_type):
"""Returns a Python property the given composite field."""
def Getter(self):
sub_message = self._composite_fields.get(cdescriptor.name, None)
if sub_message is None:
cmessage = self._cmsg.NewSubMessage(cdescriptor)
sub_message = message_type._concrete_class(__cmessage=cmessage)
self._composite_fields[cdescriptor.name] = sub_message
return sub_message
return property(Getter)
class RepeatedScalarContainer(object):
"""Container for repeated scalar fields."""
__slots__ = ['_message', '_cfield_descriptor', '_cmsg']
def __init__(self, msg, cfield_descriptor):
self._message = msg
self._cmsg = msg._cmsg
self._cfield_descriptor = cfield_descriptor
def append(self, value):
self._cmsg.AddRepeatedScalar(
self._cfield_descriptor, value)
def extend(self, sequence):
for element in sequence:
self.append(element)
def insert(self, key, value):
values = self[slice(None, None, None)]
values.insert(key, value)
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def remove(self, value):
values = self[slice(None, None, None)]
values.remove(value)
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def __setitem__(self, key, value):
values = self[slice(None, None, None)]
values[key] = value
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def __getitem__(self, key):
return self._cmsg.GetRepeatedScalar(self._cfield_descriptor, key)
def __delitem__(self, key):
self._cmsg.DeleteRepeatedField(self._cfield_descriptor, key)
def __len__(self):
return len(self[slice(None, None, None)])
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, collections.Sequence):
raise TypeError(
'Can only compare repeated scalar fields against sequences.')
# We are presumably comparing against some other sequence type.
return other == self[slice(None, None, None)]
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def sort(self, *args, **kwargs):
# Maintain compatibility with the previous interface.
if 'sort_function' in kwargs:
kwargs['cmp'] = kwargs.pop('sort_function')
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor,
sorted(self, *args, **kwargs))
def RepeatedScalarProperty(cdescriptor):
"""Returns a Python property the given repeated scalar field."""
def Getter(self):
container = self._composite_fields.get(cdescriptor.name, None)
if container is None:
container = RepeatedScalarContainer(self, cdescriptor)
self._composite_fields[cdescriptor.name] = container
return container
def Setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % cdescriptor.name)
doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name
return property(Getter, Setter, doc=doc)
class RepeatedCompositeContainer(object):
"""Container for repeated composite fields."""
__slots__ = ['_message', '_subclass', '_cfield_descriptor', '_cmsg']
def __init__(self, msg, cfield_descriptor, subclass):
self._message = msg
self._cmsg = msg._cmsg
self._subclass = subclass
self._cfield_descriptor = cfield_descriptor
def add(self, **kwargs):
cmessage = self._cmsg.AddMessage(self._cfield_descriptor)
return self._subclass(__cmessage=cmessage, __owner=self._message, **kwargs)
def extend(self, elem_seq):
"""Extends by appending the given sequence of elements of the same type
as this one, copying each individual message.
"""
for message in elem_seq:
self.add().MergeFrom(message)
def remove(self, value):
# TODO(protocol-devel): This is inefficient as it needs to generate a
# message pointer for each message only to do index(). Move this to a C++
# extension function.
self.__delitem__(self[slice(None, None, None)].index(value))
def MergeFrom(self, other):
for message in other[:]:
self.add().MergeFrom(message)
def __getitem__(self, key):
cmessages = self._cmsg.GetRepeatedMessage(
self._cfield_descriptor, key)
subclass = self._subclass
if not isinstance(cmessages, list):
return subclass(__cmessage=cmessages, __owner=self._message)
return [subclass(__cmessage=m, __owner=self._message) for m in cmessages]
def __delitem__(self, key):
self._cmsg.DeleteRepeatedField(
self._cfield_descriptor, key)
def __len__(self):
return self._cmsg.FieldLength(self._cfield_descriptor)
def __eq__(self, other):
"""Compares the current instance with another one."""
if self is other:
return True
if not isinstance(other, self.__class__):
raise TypeError('Can only compare repeated composite fields against '
'other repeated composite fields.')
messages = self[slice(None, None, None)]
other_messages = other[slice(None, None, None)]
return messages == other_messages
def __hash__(self):
raise TypeError('unhashable object')
def sort(self, cmp=None, key=None, reverse=False, **kwargs):
# Maintain compatibility with the old interface.
if cmp is None and 'sort_function' in kwargs:
cmp = kwargs.pop('sort_function')
# The cmp function, if provided, is passed the results of the key function,
# so we only need to wrap one of them.
if key is None:
index_key = self.__getitem__
else:
index_key = lambda i: key(self[i])
# Sort the list of current indexes by the underlying object.
indexes = list(range(len(self)))
indexes.sort(cmp=cmp, key=index_key, reverse=reverse)
# Apply the transposition.
for dest, src in enumerate(indexes):
if dest == src:
continue
self._cmsg.SwapRepeatedFieldElements(self._cfield_descriptor, dest, src)
# Don't swap the same value twice.
indexes[src] = src
def RepeatedCompositeProperty(cdescriptor, message_type):
"""Returns a Python property for the given repeated composite field."""
def Getter(self):
container = self._composite_fields.get(cdescriptor.name, None)
if container is None:
container = RepeatedCompositeContainer(
self, cdescriptor, message_type._concrete_class)
self._composite_fields[cdescriptor.name] = container
return container
def Setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % cdescriptor.name)
doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name
return property(Getter, Setter, doc=doc)
class ExtensionDict(object):
"""Extension dictionary added to each protocol message."""
def __init__(self, msg):
self._message = msg
self._cmsg = msg._cmsg
self._values = {}
def __setitem__(self, extension, value):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_OPTIONAL or
cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
raise TypeError('Extension %r is repeated and/or a composite type.' % (
extension.full_name,))
self._cmsg.SetScalar(cdescriptor, value)
self._values[extension] = value
def __getitem__(self, extension):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_REPEATED and
cdescriptor.cpp_type != _CPPTYPE_MESSAGE):
return self._cmsg.GetScalar(cdescriptor)
ext = self._values.get(extension, None)
if ext is not None:
return ext
ext = self._CreateNewHandle(extension)
self._values[extension] = ext
return ext
def ClearExtension(self, extension):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
self._cmsg.ClearFieldByDescriptor(extension._cdescriptor)
if extension in self._values:
del self._values[extension]
def HasExtension(self, extension):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
return self._cmsg.HasFieldByDescriptor(extension._cdescriptor)
def _FindExtensionByName(self, name):
"""Tries to find a known extension with the specified name.
Args:
name: Extension full name.
Returns:
Extension field descriptor.
"""
return self._message._extensions_by_name.get(name, None)
def _CreateNewHandle(self, extension):
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_REPEATED and
cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
cmessage = self._cmsg.NewSubMessage(cdescriptor)
return extension.message_type._concrete_class(__cmessage=cmessage)
if cdescriptor.label == _LABEL_REPEATED:
if cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
return RepeatedCompositeContainer(
self._message, cdescriptor, extension.message_type._concrete_class)
else:
return RepeatedScalarContainer(self._message, cdescriptor)
# This shouldn't happen!
assert False
return None
def NewMessage(bases, message_descriptor, dictionary):
"""Creates a new protocol message *class*."""
_AddClassAttributesForNestedExtensions(message_descriptor, dictionary)
_AddEnumValues(message_descriptor, dictionary)
_AddDescriptors(message_descriptor, dictionary)
return bases
def InitMessage(message_descriptor, cls):
"""Constructs a new message instance (called before instance's __init__)."""
cls._extensions_by_name = {}
_AddInitMethod(message_descriptor, cls)
_AddMessageMethods(message_descriptor, cls)
_AddPropertiesForExtensions(message_descriptor, cls)
copyreg.pickle(cls, lambda obj: (cls, (), obj.__getstate__()))
def _AddDescriptors(message_descriptor, dictionary):
"""Sets up a new protocol message class dictionary.
Args:
message_descriptor: A Descriptor instance describing this message type.
dictionary: Class dictionary to which we'll add a '__slots__' entry.
"""
dictionary['__descriptors'] = {}
for field in message_descriptor.fields:
dictionary['__descriptors'][field.name] = GetFieldDescriptor(
field.full_name)
dictionary['__slots__'] = list(dictionary['__descriptors'].keys()) + [
'_cmsg', '_owner', '_composite_fields', 'Extensions', '_HACK_REFCOUNTS']
def _AddEnumValues(message_descriptor, dictionary):
"""Sets class-level attributes for all enum fields defined in this message.
Args:
message_descriptor: Descriptor object for this message type.
dictionary: Class dictionary that should be populated.
"""
for enum_type in message_descriptor.enum_types:
dictionary[enum_type.name] = enum_type_wrapper.EnumTypeWrapper(enum_type)
for enum_value in enum_type.values:
dictionary[enum_value.name] = enum_value.number
def _AddClassAttributesForNestedExtensions(message_descriptor, dictionary):
"""Adds class attributes for the nested extensions."""
extension_dict = message_descriptor.extensions_by_name
for extension_name, extension_field in list(extension_dict.items()):
assert extension_name not in dictionary
dictionary[extension_name] = extension_field
def _AddInitMethod(message_descriptor, cls):
"""Adds an __init__ method to cls."""
# Create and attach message field properties to the message class.
# This can be done just once per message class, since property setters and
# getters are passed the message instance.
# This makes message instantiation extremely fast, and at the same time it
# doesn't require the creation of property objects for each message instance,
# which saves a lot of memory.
for field in message_descriptor.fields:
field_cdescriptor = cls.__descriptors[field.name]
if field.label == _LABEL_REPEATED:
if field.cpp_type == _CPPTYPE_MESSAGE:
value = RepeatedCompositeProperty(field_cdescriptor, field.message_type)
else:
value = RepeatedScalarProperty(field_cdescriptor)
elif field.cpp_type == _CPPTYPE_MESSAGE:
value = CompositeProperty(field_cdescriptor, field.message_type)
else:
value = ScalarProperty(field_cdescriptor)
setattr(cls, field.name, value)
# Attach a constant with the field number.
constant_name = field.name.upper() + '_FIELD_NUMBER'
setattr(cls, constant_name, field.number)
def Init(self, **kwargs):
"""Message constructor."""
cmessage = kwargs.pop('__cmessage', None)
if cmessage:
self._cmsg = cmessage
else:
self._cmsg = NewCMessage(message_descriptor.full_name)
# Keep a reference to the owner, as the owner keeps a reference to the
# underlying protocol buffer message.
owner = kwargs.pop('__owner', None)
if owner:
self._owner = owner
if message_descriptor.is_extendable:
self.Extensions = ExtensionDict(self)
else:
# Reference counting in the C++ code is broken and depends on
# the Extensions reference to keep this object alive during unit
# tests (see b/4856052). Remove this once b/4945904 is fixed.
self._HACK_REFCOUNTS = self
self._composite_fields = {}
for field_name, field_value in list(kwargs.items()):
field_cdescriptor = self.__descriptors.get(field_name, None)
if not field_cdescriptor:
raise ValueError('Protocol message has no "%s" field.' % field_name)
if field_cdescriptor.label == _LABEL_REPEATED:
if field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
field_name = getattr(self, field_name)
for val in field_value:
field_name.add().MergeFrom(val)
else:
getattr(self, field_name).extend(field_value)
elif field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
getattr(self, field_name).MergeFrom(field_value)
else:
setattr(self, field_name, field_value)
Init.__module__ = None
Init.__doc__ = None
cls.__init__ = Init
def _IsMessageSetExtension(field):
"""Checks if a field is a message set extension."""
return (field.is_extension and
field.containing_type.has_options and
field.containing_type.GetOptions().message_set_wire_format and
field.type == _TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == _LABEL_OPTIONAL)
def _AddMessageMethods(message_descriptor, cls):
"""Adds the methods to a protocol message class."""
if message_descriptor.is_extendable:
def ClearExtension(self, extension):
self.Extensions.ClearExtension(extension)
def HasExtension(self, extension):
return self.Extensions.HasExtension(extension)
def HasField(self, field_name):
return self._cmsg.HasField(field_name)
def ClearField(self, field_name):
child_cmessage = None
if field_name in self._composite_fields:
child_field = self._composite_fields[field_name]
del self._composite_fields[field_name]
child_cdescriptor = self.__descriptors[field_name]
# TODO(anuraag): Support clearing repeated message fields as well.
if (child_cdescriptor.label != _LABEL_REPEATED and
child_cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
child_field._owner = None
child_cmessage = child_field._cmsg
if child_cmessage is not None:
self._cmsg.ClearField(field_name, child_cmessage)
else:
self._cmsg.ClearField(field_name)
def Clear(self):
cmessages_to_release = []
for field_name, child_field in list(self._composite_fields.items()):
child_cdescriptor = self.__descriptors[field_name]
# TODO(anuraag): Support clearing repeated message fields as well.
if (child_cdescriptor.label != _LABEL_REPEATED and
child_cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
child_field._owner = None
cmessages_to_release.append((child_cdescriptor, child_field._cmsg))
self._composite_fields.clear()
self._cmsg.Clear(cmessages_to_release)
def IsInitialized(self, errors=None):
if self._cmsg.IsInitialized():
return True
if errors is not None:
errors.extend(self.FindInitializationErrors());
return False
def SerializeToString(self):
if not self.IsInitialized():
raise message.EncodeError(
'Message %s is missing required fields: %s' % (
self._cmsg.full_name, ','.join(self.FindInitializationErrors())))
return self._cmsg.SerializeToString()
def SerializePartialToString(self):
return self._cmsg.SerializePartialToString()
def ParseFromString(self, serialized):
self.Clear()
self.MergeFromString(serialized)
def MergeFromString(self, serialized):
byte_size = self._cmsg.MergeFromString(serialized)
if byte_size < 0:
raise message.DecodeError('Unable to merge from string.')
return byte_size
def MergeFrom(self, msg):
if not isinstance(msg, cls):
raise TypeError(
"Parameter to MergeFrom() must be instance of same class: "
"expected %s got %s." % (cls.__name__, type(msg).__name__))
self._cmsg.MergeFrom(msg._cmsg)
def CopyFrom(self, msg):
self._cmsg.CopyFrom(msg._cmsg)
def ByteSize(self):
return self._cmsg.ByteSize()
def SetInParent(self):
return self._cmsg.SetInParent()
def ListFields(self):
all_fields = []
field_list = self._cmsg.ListFields()
fields_by_name = cls.DESCRIPTOR.fields_by_name
for is_extension, field_name in field_list:
if is_extension:
extension = cls._extensions_by_name[field_name]
all_fields.append((extension, self.Extensions[extension]))
else:
field_descriptor = fields_by_name[field_name]
all_fields.append(
(field_descriptor, getattr(self, field_name)))
all_fields.sort(key=lambda item: item[0].number)
return all_fields
def FindInitializationErrors(self):
return self._cmsg.FindInitializationErrors()
def __str__(self):
return str(self._cmsg)
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, self.__class__):
return False
return self.ListFields() == other.ListFields()
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def __unicode__(self):
# Lazy import to prevent circular import when text_format imports this file.
from google.protobuf import text_format
return text_format.MessageToString(self, as_utf8=True).decode('utf-8')
# Attach the local methods to the message class.
for key, value in list(locals().copy().items()):
if key not in ('key', 'value', '__builtins__', '__name__', '__doc__'):
setattr(cls, key, value)
# Static methods:
def RegisterExtension(extension_handle):
extension_handle.containing_type = cls.DESCRIPTOR
cls._extensions_by_name[extension_handle.full_name] = extension_handle
if _IsMessageSetExtension(extension_handle):
# MessageSet extension. Also register under type name.
cls._extensions_by_name[
extension_handle.message_type.full_name] = extension_handle
cls.RegisterExtension = staticmethod(RegisterExtension)
def FromString(string):
msg = cls()
msg.MergeFromString(string)
return msg
cls.FromString = staticmethod(FromString)
def _AddPropertiesForExtensions(message_descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
extension_dict = message_descriptor.extensions_by_name
for extension_name, extension_field in list(extension_dict.items()):
constant_name = extension_name.upper() + '_FIELD_NUMBER'
setattr(cls, constant_name, extension_field.number)
|
mit
|
spatialaudio/jackclient-python
|
examples/midi_chords.py
|
1
|
1118
|
#!/usr/bin/env python3
"""JACK client that creates minor triads from single MIDI notes.
All MIDI events are passed through.
Two additional events are created for each NoteOn and NoteOff event.
"""
import jack
import struct
# First 4 bits of status byte:
NOTEON = 0x9
NOTEOFF = 0x8
INTERVALS = 3, 7 # minor triad
client = jack.Client('MIDI-Chord-Generator')
inport = client.midi_inports.register('input')
outport = client.midi_outports.register('output')
@client.set_process_callback
def process(frames):
outport.clear_buffer()
for offset, indata in inport.incoming_midi_events():
# Note: This may raise an exception:
outport.write_midi_event(offset, indata) # pass through
if len(indata) == 3:
status, pitch, vel = struct.unpack('3B', indata)
if status >> 4 in (NOTEON, NOTEOFF):
for i in INTERVALS:
# Note: This may raise an exception:
outport.write_midi_event(offset, (status, pitch + i, vel))
with client:
print('#' * 80)
print('press Return to quit')
print('#' * 80)
input()
|
mit
|
yuanzhao/gpdb
|
src/test/tinc/tincrepo/mpp/models/regress/sql_related/regress_sql_test_case/regress_sql_test_case.py
|
12
|
36052
|
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tinctest
from tinctest.lib import local_path
from tinctest.runner import TINCTestRunner
from mpp.models import SQLTestCase, SQLTestCaseException
import unittest2 as unittest
import shutil
from contextlib import closing
from datetime import datetime
from StringIO import StringIO
from unittest2.runner import _WritelnDecorator
# we're testing SQLTestCase as it pertains to tinc.py (and only tinc.py)
# as such, any attempts by raw unit2 to discover and load MockSQLTestCase must be averted
@unittest.skip('mock')
class MockSQLTestCase(SQLTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
"""
db_name=os.getenv('USER')
def test_explicit_test_method(self):
pass
@unittest.skip('mock')
class MockSQLTestCaseGenerateAns(SQLTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
"""
sql_dir = 'sql_no_ans/'
generate_ans = 'yes'
def test_explicit_test_method(self):
pass
@unittest.skip('mock')
class MockSQLTestCaseForceGenerateAns(SQLTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
"""
sql_dir = 'sql_no_ans/'
generate_ans = 'force'
def test_explicit_test_method(self):
pass
@unittest.skip('mock')
class MockSQLTestCaseIncorrectGenerateAns(SQLTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
"""
# Misspelled generate_ans. Invalid value.
generate_ans = 'yess'
def test_explicit_test_method(self):
pass
@unittest.skip('mock')
class MockSQLTestCaseGpdiffNoAnsFile(SQLTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
"""
sql_dir = 'sql_no_ans/'
def test_explicit_test_method(self):
pass
@unittest.skip('mock')
class MockSQLTestCaseNoGpdiffNoAnsFile(SQLTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
@gpdiff False
"""
sql_dir = 'sql_no_ans/'
def test_explicit_test_method(self):
pass
@unittest.skip('mock')
class MockSQLTestCaseWithOptimizerOn(SQLTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
@optimizer_mode on
"""
db_name=os.getenv('USER')
@unittest.skip('mock')
class MockSQLTestCaseWithOptimizerOff(SQLTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
@optimizer_mode off
"""
db_name=os.getenv('USER')
@unittest.skip('mock')
class MockSQLTestCaseWithOptimizerBoth(SQLTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
@optimizer_mode both
"""
db_name=os.getenv('USER')
class SQLTestCaseTests(unittest.TestCase):
def test_run_sql_test_failure(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCase)
# Find our desired test case in test_suite.
# This code is a consequence of us only having implemented
# loadTestsFromTestCase. An implementation of loadTestsFromNames
# would likely have allowed us to insolate test_query02 directly.
test_case = None
for temp in test_suite._tests:
if temp.name == "MockSQLTestCase.test_query02":
test_case = temp
self.assertIsNotNone(test_case)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
test_case.__class__.__unittest_skip__ = False
test_result = unittest.TestResult()
test_case.run(test_result)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 1)
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query02.diff')))
shutil.rmtree(test_case.get_out_dir())
def test_run_sql_test_success(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCase)
# Find our desired test case in test_suite.
# This code is a consequence of us only having implemented
# loadTestsFromTestCase. An implementation of loadTestsFromNames
# would likely have allowed us to insolate test_query02 directly.
test_case = None
for temp in test_suite._tests:
if temp.name == "MockSQLTestCase.test_query03":
test_case = temp
self.assertIsNotNone(test_case)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
test_case.__class__.__unittest_skip__ = False
test_result = unittest.TestResult()
test_case.run(test_result)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 0)
shutil.rmtree(test_case.get_out_dir())
def test_run_entire_sql_test_case(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCase)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
test_case = None
for test_case in test_suite._tests:
test_case.__class__.__unittest_skip__ = False
test_result = unittest.TestResult()
test_suite.run(test_result)
# 3 sql files with ans files and 1 explicit method
self.assertEqual(test_result.testsRun, 4)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 1)
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query02.diff')))
shutil.rmtree(test_case.get_out_dir())
def test_verify_setup_teardown(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCase)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
for test_case in test_suite._tests:
test_case.__class__.__unittest_skip__ = False
if os.path.exists(local_path("output/")):
shutil.rmtree(local_path("output/"))
test_result = unittest.TestResult()
test_suite.run(test_result)
self.assertEqual(test_result.testsRun, 4)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 1)
# Verify if setup and teardown sqls were executed
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'setup.out')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'setup', 'setup1.out')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'teardown.out')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'teardown', 'teardown1.out')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_setup.out')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_teardown.out')))
def test_run_explicit_test_method(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCase)
# Find our desired test case in test_suite.
# This code is a consequence of us only having implemented
# loadTestsFromTestCase. An implementation of loadTestsFromNames
# would likely have allowed us to insolate test_query02 directly.
test_case = None
for temp in test_suite._tests:
if temp.name == "MockSQLTestCase.test_explicit_test_method":
test_case = temp
self.assertIsNotNone(test_case)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
test_case.__class__.__unittest_skip__ = False
test_result = unittest.TestResult()
test_case.run(test_result)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 0)
def test_with_local_init_file(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCase)
# Find our desired test case in test_suite.
# This code is a consequence of us only having implemented
# loadTestsFromTestCase. An implementation of loadTestsFromNames
# would likely have allowed us to insolate test_query02 directly.
test_case = None
for temp in test_suite._tests:
if temp.name == "MockSQLTestCase.test_query04":
test_case = temp
self.assertIsNotNone(test_case)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
test_case.__class__.__unittest_skip__ = False
test_result = unittest.TestResult()
test_case.run(test_result)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 0)
def test_run_no_ans_file(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCase)
# Store all test names in a list
test_case_list = []
for temp in test_suite._tests:
test_case_list.append(temp.name)
# Verify that other sql files with ans files and explicit method is in the list
self.assertTrue('MockSQLTestCase.test_explicit_test_method' in test_case_list)
self.assertTrue('MockSQLTestCase.test_query02' in test_case_list)
# Verify that test_query_no_ans_file is not there, even though the sql file is there without the ans file
self.assertTrue('MockSQLTestCase.test_query_no_ans_file' not in test_case_list)
# Verify the default value of generate_ans is no
self.assertTrue(MockSQLTestCase.generate_ans == 'no')
def test_gpdiff_no_ans_file(self):
"""
Test whether we throw an excpetion when there is no ans file for a sql file and if gpdiff is set to True
"""
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseGpdiffNoAnsFile)
# Find our desired test case in test_suite.
# This code is a consequence of us only having implemented
# loadTestsFromTestCase. An implementation of loadTestsFromNames
# would likely have allowed us to insolate test_query02 directly.
test_case = None
for temp in test_suite._tests:
if temp.name == "MockSQLTestCaseGpdiffNoAnsFile.test_query_no_ans_file":
test_case = temp
self.assertIsNotNone(test_case)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
test_case.__class__.__unittest_skip__ = False
test_result = unittest.TestResult()
test_case.run(test_result)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 1)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 0)
def test_no_gpdiff_no_ans_file(self):
"""
Test whether we construct a test for sqls with no ans files when gpdiff is turned off
"""
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseNoGpdiffNoAnsFile)
# Store all test names in a list
test_case_list = []
for temp in test_suite._tests:
test_case_list.append(temp.name)
# Verify that other sql files with ans files and explicit method is in the list
self.assertTrue('MockSQLTestCaseNoGpdiffNoAnsFile.test_explicit_test_method' in test_case_list)
self.assertTrue('MockSQLTestCaseNoGpdiffNoAnsFile.test_query02' in test_case_list)
# Verify that test_query_no_ans_file is there, even though the sql file is there without the ans file
self.assertTrue('MockSQLTestCaseNoGpdiffNoAnsFile.test_query_no_ans_file' in test_case_list)
def test_run_generate_ans_file_class_variable(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseGenerateAns)
# Store all test names in a list
test_case_list = []
for temp in test_suite._tests:
test_case_list.append(temp.name)
# Verify that other sql files with ans files and explicit method is in the list
self.assertTrue('MockSQLTestCaseGenerateAns.test_explicit_test_method' in test_case_list)
self.assertTrue('MockSQLTestCaseGenerateAns.test_query02' in test_case_list)
# Verify that test_query_no_ans_file is also there, even though its ans file is not there
self.assertTrue('MockSQLTestCaseGenerateAns.test_query_no_ans_file' in test_case_list)
def test_run_incorrect_generate_ans_file_class_variable(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseIncorrectGenerateAns)
count = 0
for test in test_suite._tests:
if 'TINCTestCaseLoadFailure' in str(test):
count += 1
self.assertEquals(count, 1)
def test_run_sql_generate_ans(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseGenerateAns)
# Ans file that will be generated
ans_file = local_path("query_no_ans_file.ans")
# If ans file is there for some reason, remove it (not testing force here)
if os.path.exists(ans_file):
os.remove(ans_file)
# Find our desired test case in test_suite.
# This code is a consequence of us only having implemented
# loadTestsFromTestCase. An implementation of loadTestsFromNames
# would likely have allowed us to insolate test_query02 directly.
test_case = None
for temp in test_suite._tests:
if temp.name == "MockSQLTestCaseGenerateAns.test_query_no_ans_file":
test_case = temp
self.assertIsNotNone(test_case)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
test_case.__class__.__unittest_skip__ = False
test_result = unittest.TestResult()
test_case.run(test_result)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 0)
# Verify that ans file is generated
self.assertTrue(os.path.exists(local_path("setup.ans")))
self.assertTrue(os.path.exists(ans_file))
self.assertTrue(os.path.exists(local_path("teardown.ans")))
# Cleanup
os.remove(local_path("setup.ans"))
os.remove(ans_file)
os.remove(local_path("teardown.ans"))
def test_run_sql_force_generate_ans(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseForceGenerateAns)
# Ans file that will be generated
ans_file = local_path("query_no_ans_file.ans")
# Create the empty ans file to allow force to overwrite
open(ans_file, 'w').close()
self.assertTrue(os.path.getsize(ans_file) == 0)
# Find our desired test case in test_suite.
# This code is a consequence of us only having implemented
# loadTestsFromTestCase. An implementation of loadTestsFromNames
# would likely have allowed us to insolate test_query02 directly.
test_case = None
for temp in test_suite._tests:
if temp.name == "MockSQLTestCaseForceGenerateAns.test_query_no_ans_file":
test_case = temp
self.assertIsNotNone(test_case)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
test_case.__class__.__unittest_skip__ = False
test_result = unittest.TestResult()
test_case.run(test_result)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 0)
# Verify that ans file is there
self.assertTrue(os.path.exists(local_path("setup.ans")))
self.assertTrue(os.path.exists(ans_file))
self.assertTrue(os.path.exists(local_path("teardown.ans")))
# Verify that ans file size is greater than 0
self.assertTrue(os.path.getsize(ans_file) > 0)
# Cleanup
os.remove(local_path("setup.ans"))
os.remove(ans_file)
os.remove(local_path("teardown.ans"))
def test_run_sql_force_generate_ans_permission_denied(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseForceGenerateAns)
# Find our desired test case in test_suite.
# This code is a consequence of us only having implemented
# loadTestsFromTestCase. An implementation of loadTestsFromNames
# would likely have allowed us to insolate test_query04 directly.
test_case = None
for temp in test_suite._tests:
if temp.name == "MockSQLTestCaseForceGenerateAns.test_query04":
# query04.ans wouldn't be checked-out from perforce, so it would have no write operation allowed
test_case = temp
self.assertIsNotNone(test_case)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
test_case.__class__.__unittest_skip__ = False
test_result = unittest.TestResult()
test_case.run(test_result)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 1)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 0)
def test_run_sql_file(self):
test_case = MockSQLTestCase('test_query03')
if os.path.exists(test_case.get_out_dir()):
shutil.rmtree(test_case.get_out_dir())
# Default mode
test_case.run_sql_file(local_path('query03.sql'))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03.sql')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03.out')))
self.assertFalse(self._check_str_in_file('SET optimizer',
os.path.join(test_case.get_out_dir(), 'query03.sql')))
# Optimizer on mode
test_case.run_sql_file(local_path('query03.sql'), optimizer=True)
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_orca.sql')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_orca.out')))
self.assertTrue(self._check_str_in_file('SET optimizer=on;',
os.path.join(test_case.get_out_dir(), 'query03_orca.sql')))
# Optimizer off mode
test_case.run_sql_file(local_path('query03.sql'), optimizer=False)
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_planner.sql')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_planner.out')))
self.assertTrue(self._check_str_in_file('SET optimizer=off;',
os.path.join(test_case.get_out_dir(), 'query03_planner.sql')))
def test_run_sql_test_optimizer_on(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseWithOptimizerOn)
# Find our desired test case in test_suite.
# This code is a consequence of us only having implemented
# loadTestsFromTestCase. An implementation of loadTestsFromNames
# would likely have allowed us to insolate test_query02 directly.
test_case = None
for temp in test_suite._tests:
if temp.name == "MockSQLTestCaseWithOptimizerOn.test_query03":
test_case = temp
self.assertIsNotNone(test_case)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
test_case.__class__.__unittest_skip__ = False
test_result = unittest.TestResult()
test_case.run(test_result)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 0)
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_orca.sql')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_orca.out')))
self.assertTrue(self._check_str_in_file("SET optimizer=on;",
os.path.join(test_case.get_out_dir(), 'query03_orca.sql')))
self.assertTrue(self._check_str_in_file("SET optimizer=on;",
os.path.join(test_case.get_out_dir(), 'query03_orca.out')))
shutil.rmtree(test_case.get_out_dir())
def test_run_sql_test_optimizer_off(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseWithOptimizerOff)
# Find our desired test case in test_suite.
# This code is a consequence of us only having implemented
# loadTestsFromTestCase. An implementation of loadTestsFromNames
# would likely have allowed us to insolate test_query02 directly.
test_case = None
for temp in test_suite._tests:
if temp.name == "MockSQLTestCaseWithOptimizerOff.test_query03":
test_case = temp
self.assertIsNotNone(test_case)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
test_case.__class__.__unittest_skip__ = False
test_result = unittest.TestResult()
test_case.run(test_result)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 0)
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_planner.sql')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_planner.out')))
self.assertTrue(self._check_str_in_file("SET optimizer=off;",
os.path.join(test_case.get_out_dir(), 'query03_planner.sql')))
self.assertTrue(self._check_str_in_file("SET optimizer=off;",
os.path.join(test_case.get_out_dir(), 'query03_planner.out')))
shutil.rmtree(test_case.get_out_dir())
def test_run_sql_test_optimizer_both(self):
test_loader = tinctest.TINCTestLoader()
# For data provider test cases, we have to use loadTestsFromName, since loadTestsFromTestCase won't filter and expand
test_suite = test_loader.loadTestsFromName("mpp.models.regress.sql_related.regress_sql_test_case.regress_sql_test_case.MockSQLTestCaseWithOptimizerBoth")
# Find our desired test case in test_suite.
test_case = None
new_test_suite = tinctest.TINCTestSuite()
for temp in test_suite._tests:
if "MockSQLTestCaseWithOptimizerBoth.test_query03" in temp.name:
new_test_suite.addTest(temp)
temp.__class__.__unittest_skip__ = False
test_case = temp
self.assertIsNotNone(new_test_suite)
self.assertEquals(new_test_suite.countTestCases(), 2)
test_result = unittest.TestResult()
new_test_suite.run(test_result)
self.assertEqual(test_result.testsRun, 2)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 0)
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_planner.sql')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_planner.out')))
self.assertTrue(self._check_str_in_file("SET optimizer=off;",
os.path.join(temp.get_out_dir(), 'query03_planner.sql')))
self.assertTrue(self._check_str_in_file("SET optimizer=off;",
os.path.join(test_case.get_out_dir(), 'query03_planner.out')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_orca.sql')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_orca.out')))
self.assertTrue(self._check_str_in_file("SET optimizer=on;",
os.path.join(test_case.get_out_dir(), 'query03_orca.sql')))
self.assertTrue(self._check_str_in_file("SET optimizer=on;",
os.path.join(test_case.get_out_dir(), 'query03_orca.out')))
shutil.rmtree(test_case.get_out_dir())
def _check_str_in_file(self, check_string, file_path):
with open(file_path, 'r') as f:
for line in f:
if check_string in line:
return True
return False
def test_run_sql_test_optimizer_minidump_on_failure(self):
"""
Test whether we gather minidumps on failures when the test is exeucted with optimizer on.
"""
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromName('mpp.models.regress.sql_related.regress_sql_test_case.' + \
'regress_sql_test_case.' + \
'MockSQLTestCaseWithOptimizerOn.test_query02')
self.assertIsNotNone(test_suite)
self.assertTrue(len(test_suite._tests), 1)
test_result = None
test_case = None
for test in test_suite._tests:
test.__class__.__unittest_skip__ = False
test_case = test
if os.path.exists(test_case.get_out_dir()):
shutil.rmtree(test_case.get_out_dir())
with closing(_WritelnDecorator(StringIO())) as buffer:
tinc_test_runner = TINCTestRunner(stream = buffer, descriptions = True, verbosity = 1)
test_result = tinc_test_runner.run(test_suite)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 1)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 1)
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query02_orca.sql')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query02_orca.out')))
self.assertTrue(self._check_str_in_file("SET optimizer=on;",
os.path.join(test_case.get_out_dir(), 'query02_orca.sql')))
self.assertTrue(self._check_str_in_file("SET optimizer=on;",
os.path.join(test_case.get_out_dir(), 'query02_orca.out')))
# Verify that we collect minidump on failure for optimizer execution mode
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query02_minidump.mdp')))
@unittest.skip("QAINF-999")
def test_run_sql_test_optimizer_minidump_on_failure2(self):
"""
Test whether we gather minidumps on failures when the test is exeucted with optimizer_mode both.
"""
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromName('mpp.models.regress.sql_related.regress_sql_test_case.' + \
'regress_sql_test_case.' + \
'MockSQLTestCaseWithOptimizerBoth.test_query02')
self.assertIsNotNone(test_suite)
new_test_suite = tinctest.TINCTestSuite()
self.assertEquals(test_suite.countTestCases(), 2)
test_result = None
test_case = None
for test in test_suite._tests:
if 'test_query02_orca' in test.name:
test.__class__.__unittest_skip__ = False
test_case = test
new_test_suite.addTest(test)
self.assertIsNotNone(test_case)
if os.path.exists(test_case.get_out_dir()):
shutil.rmtree(test_case.get_out_dir())
with closing(_WritelnDecorator(StringIO())) as buffer:
tinc_test_runner = TINCTestRunner(stream = buffer, descriptions = True, verbosity = 1)
test_result = tinc_test_runner.run(new_test_suite)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 1)
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query02_orca.sql')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query02_orca.out')))
self.assertTrue(self._check_str_in_file("SET optimizer=on;",
os.path.join(test_case.get_out_dir(), 'query02_orca.sql')))
self.assertTrue(self._check_str_in_file("SET optimizer=on;",
os.path.join(test_case.get_out_dir(), 'query02_orca.out')))
# Verify that we collect minidump on failure for optimizer execution mode
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query02_minidump.mdp')))
|
apache-2.0
|
Observer-Wu/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/bot/sheriff_unittest.py
|
122
|
3783
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.net.buildbot import Builder
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.bot.sheriff import Sheriff
from webkitpy.tool.mocktool import MockTool
class MockSheriffBot(object):
name = "mock-sheriff-bot"
watchers = [
"[email protected]",
]
def run_webkit_patch(self, args):
return "Created bug https://bugs.webkit.org/show_bug.cgi?id=36936\n"
class SheriffTest(unittest.TestCase):
def test_post_blame_comment_on_bug(self):
def run():
sheriff = Sheriff(MockTool(), MockSheriffBot())
builders = [
Builder("Foo", None),
Builder("Bar", None),
]
commit_info = Mock()
commit_info.bug_id = lambda: None
commit_info.revision = lambda: 4321
# Should do nothing with no bug_id
sheriff.post_blame_comment_on_bug(commit_info, builders, [])
sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1", "mock-test-2"])
# Should try to post a comment to the bug, but MockTool.bugs does nothing.
commit_info.bug_id = lambda: 1234
sheriff.post_blame_comment_on_bug(commit_info, builders, [])
sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1"])
sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1", "mock-test-2"])
expected_logs = u"""MOCK bug comment: bug_id=1234, cc=['[email protected]']
--- Begin comment ---
http://trac.webkit.org/changeset/4321 might have broken Foo and Bar
--- End comment ---
MOCK bug comment: bug_id=1234, cc=['[email protected]']
--- Begin comment ---
http://trac.webkit.org/changeset/4321 might have broken Foo and Bar
The following tests are not passing:
mock-test-1
--- End comment ---
MOCK bug comment: bug_id=1234, cc=['[email protected]']
--- Begin comment ---
http://trac.webkit.org/changeset/4321 might have broken Foo and Bar
The following tests are not passing:
mock-test-1
mock-test-2
--- End comment ---
"""
OutputCapture().assert_outputs(self, run, expected_logs=expected_logs)
|
bsd-3-clause
|
systers/mailman
|
src/mailman/bin/mailman.py
|
3
|
3848
|
# Copyright (C) 2009-2015 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""The 'mailman' command dispatcher."""
__all__ = [
'main',
]
import os
import argparse
from functools import cmp_to_key
from mailman.core.i18n import _
from mailman.core.initialize import initialize
from mailman.interfaces.command import ICLISubCommand
from mailman.utilities.modules import find_components
from mailman.version import MAILMAN_VERSION_FULL
from zope.interface.verify import verifyObject
def main():
"""The `mailman` command dispatcher."""
# Create the basic parser and add all globally common options.
parser = argparse.ArgumentParser(
description=_("""\
The GNU Mailman mailing list management system
Copyright 1998-2015 by the Free Software Foundation, Inc.
http://www.list.org
"""),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'-v', '--version',
action='version', version=MAILMAN_VERSION_FULL,
help=_('Print this version string and exit'))
parser.add_argument(
'-C', '--config',
help=_("""\
Configuration file to use. If not given, the environment variable
MAILMAN_CONFIG_FILE is consulted and used if set. If neither are
given, a default configuration file is loaded."""))
# Look at all modules in the mailman.bin package and if they are prepared
# to add a subcommand, let them do so. I'm still undecided as to whether
# this should be pluggable or not. If so, then we'll probably have to
# partially parse the arguments now, then initialize the system, then find
# the plugins. Punt on this for now.
subparser = parser.add_subparsers(title='Commands')
subcommands = []
for command_class in find_components('mailman.commands', ICLISubCommand):
command = command_class()
verifyObject(ICLISubCommand, command)
subcommands.append(command)
# --help should display the subcommands by alphabetical order, except that
# 'mailman help' should be first.
def sort_function(command, other):
"""Sorting helper."""
if command.name == 'help':
return -1
elif other.name == 'help':
return 1
elif command.name < other.name:
return -1
elif command.name == other.name:
return 0
else:
assert command.name > other.name
return 1
subcommands.sort(key=cmp_to_key(sort_function))
for command in subcommands:
command_parser = subparser.add_parser(
command.name, help=_(command.__doc__))
command.add(parser, command_parser)
command_parser.set_defaults(func=command.process)
args = parser.parse_args()
if len(args.__dict__) <= 1:
# No arguments or subcommands were given.
parser.print_help()
parser.exit()
# Initialize the system. Honor the -C flag if given.
config_path = (None if args.config is None
else os.path.abspath(os.path.expanduser(args.config)))
initialize(config_path)
# Perform the subcommand option.
args.func(args)
|
gpl-3.0
|
beeftornado/sentry
|
src/sentry/migrations/0066_alertrule_manager.py
|
1
|
1754
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-04-15 23:27
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.manager
class Migration(migrations.Migration):
# This flag is used to mark that a migration shouldn't be automatically run in
# production. We set this to True for operations that we think are risky and want
# someone from ops to run manually and monitor.
# General advice is that if in doubt, mark your migration as `is_dangerous`.
# Some things you should always mark as dangerous:
# - Large data migrations. Typically we want these to be run manually by ops so that
# they can be monitored. Since data migrations will now hold a transaction open
# this is even more important.
# - Adding columns to highly active tables, even ones that are NULL.
is_dangerous = False
# This flag is used to decide whether to run this migration in a transaction or not.
# By default we prefer to run in a transaction, but for migrations where you want
# to `CREATE INDEX CONCURRENTLY` this needs to be set to False. Typically you'll
# want to create an index concurrently when adding one to an existing table.
atomic = True
dependencies = [
('sentry', '0065_add_incident_status_method'),
]
operations = [
migrations.AlterModelOptions(
name='alertrule',
options={'base_manager_name': 'objects_with_snapshots', 'default_manager_name': 'objects_with_snapshots'},
),
migrations.AlterModelManagers(
name='alertrule',
managers=[
('objects_with_snapshots', django.db.models.manager.Manager()),
],
),
]
|
bsd-3-clause
|
llonchj/sentry
|
src/sentry/migrations/0085_auto__del_unique_project_slug__add_unique_project_slug_team.py
|
36
|
23551
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'Project', fields ['slug']
db.delete_unique('sentry_project', ['slug'])
# Adding unique constraint on 'Project', fields ['slug', 'team']
db.create_unique('sentry_project', ['slug', 'team_id'])
def backwards(self, orm):
# Removing unique constraint on 'Project', fields ['slug', 'team']
db.delete_unique('sentry_project', ['slug', 'team_id'])
# Adding unique constraint on 'Project', fields ['slug']
db.create_unique('sentry_project', ['slug'])
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.affecteduserbygroup': {
'Meta': {'unique_together': "(('project', 'tuser', 'group'),)", 'object_name': 'AffectedUserByGroup'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'tuser': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.TrackedUser']", 'null': 'True'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filterkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'FilterKey'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'users_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.trackeduser': {
'Meta': {'unique_together': "(('project', 'ident'),)", 'object_name': 'TrackedUser'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Group']", 'through': "orm['sentry.AffectedUserByGroup']", 'symmetrical': 'False'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'num_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry']
|
bsd-3-clause
|
dimtion/jml
|
outputFiles/statistics/archives/ourIA/improved_closest_v2.py/1.0/9/player1.py
|
1
|
11276
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
####################################################################################################################################################################################################################################
######################################################################################################## PRE-DEFINED IMPORTS #######################################################################################################
####################################################################################################################################################################################################################################
# Imports that are necessary for the program architecture to work properly
# Do not edit this code
import ast
import sys
import os
####################################################################################################################################################################################################################################
####################################################################################################### PRE-DEFINED CONSTANTS ######################################################################################################
####################################################################################################################################################################################################################################
# Possible characters to send to the maze application
# Any other will be ignored
# Do not edit this code
UP = 'U'
DOWN = 'D'
LEFT = 'L'
RIGHT = 'R'
####################################################################################################################################################################################################################################
# Name of your team
# It will be displayed in the maze
# You have to edit this code
TEAM_NAME = "Improved closest v2"
####################################################################################################################################################################################################################################
########################################################################################################## YOUR VARIABLES ##########################################################################################################
####################################################################################################################################################################################################################################
# Stores all the moves in a list to restitute them one by one
allMoves = [RIGHT, RIGHT, RIGHT, RIGHT, UP, UP, LEFT, UP, RIGHT, RIGHT, UP, RIGHT, UP, UP, UP, RIGHT, DOWN, RIGHT, UP, UP]
####################################################################################################################################################################################################################################
####################################################################################################### PRE-DEFINED FUNCTIONS ######################################################################################################
####################################################################################################################################################################################################################################
# Writes a message to the shell
# Use for debugging your program
# Channels stdout and stdin are captured to enable communication with the maze
# Do not edit this code
def debug (text) :
# Writes to the stderr channel
sys.stderr.write(str(text) + "\n")
sys.stderr.flush()
####################################################################################################################################################################################################################################
# Reads one line of information sent by the maze application
# This function is blocking, and will wait for a line to terminate
# The received information is automatically converted to the correct type
# Do not edit this code
def readFromPipe () :
# Reads from the stdin channel and returns the structure associated to the string
try :
text = sys.stdin.readline()
return ast.literal_eval(text.strip())
except :
os._exit(-1)
####################################################################################################################################################################################################################################
# Sends the text to the maze application
# Do not edit this code
def writeToPipe (text) :
# Writes to the stdout channel
sys.stdout.write(text)
sys.stdout.flush()
####################################################################################################################################################################################################################################
# Reads the initial maze information
# The function processes the text and returns the associated variables
# The dimensions of the maze are positive integers
# Maze map is a dictionary associating to a location its adjacent locations and the associated weights
# The preparation time gives the time during which 'initializationCode' can make computations before the game starts
# The turn time gives the time during which 'determineNextMove' can make computations before returning a decision
# Player locations are tuples (line, column)
# Coins are given as a list of locations where they appear
# A boolean indicates if the game is over
# Do not edit this code
def processInitialInformation () :
# We read from the pipe
data = readFromPipe()
return (data['mazeWidth'], data['mazeHeight'], data['mazeMap'], data['preparationTime'], data['turnTime'], data['playerLocation'], data['opponentLocation'], data['coins'], data['gameIsOver'])
####################################################################################################################################################################################################################################
# Reads the information after each player moved
# The maze map and allowed times are no longer provided since they do not change
# Do not edit this code
def processNextInformation () :
# We read from the pipe
data = readFromPipe()
return (data['playerLocation'], data['opponentLocation'], data['coins'], data['gameIsOver'])
####################################################################################################################################################################################################################################
########################################################################################################## YOUR FUNCTIONS ##########################################################################################################
####################################################################################################################################################################################################################################
# This is where you should write your code to do things during the initialization delay
# This function should not return anything, but should be used for a short preprocessing
# This function takes as parameters the dimensions and map of the maze, the time it is allowed for computing, the players locations in the maze and the remaining coins locations
# Make sure to have a safety margin for the time to include processing times (communication etc.)
def initializationCode (mazeWidth, mazeHeight, mazeMap, timeAllowed, playerLocation, opponentLocation, coins) :
# Nothing to do
pass
####################################################################################################################################################################################################################################
# This is where you should write your code to determine the next direction
# This function should return one of the directions defined in the CONSTANTS section
# This function takes as parameters the dimensions and map of the maze, the time it is allowed for computing, the players locations in the maze and the remaining coins locations
# Make sure to have a safety margin for the time to include processing times (communication etc.)
def determineNextMove (mazeWidth, mazeHeight, mazeMap, timeAllowed, playerLocation, opponentLocation, coins) :
# We return the next move as described by the list
global allMoves
nextMove = allMoves[0]
allMoves = allMoves[1:]
return nextMove
####################################################################################################################################################################################################################################
############################################################################################################# MAIN LOOP ############################################################################################################
####################################################################################################################################################################################################################################
# This is the entry point when executing this file
# We first send the name of the team to the maze
# The first message we receive from the maze includes its dimensions and map, the times allowed to the various steps, and the players and coins locations
# Then, at every loop iteration, we get the maze status and determine a move
# Do not edit this code
if __name__ == "__main__" :
# We send the team name
writeToPipe(TEAM_NAME + "\n")
# We process the initial information and have a delay to compute things using it
(mazeWidth, mazeHeight, mazeMap, preparationTime, turnTime, playerLocation, opponentLocation, coins, gameIsOver) = processInitialInformation()
initializationCode(mazeWidth, mazeHeight, mazeMap, preparationTime, playerLocation, opponentLocation, coins)
# We decide how to move and wait for the next step
while not gameIsOver :
(playerLocation, opponentLocation, coins, gameIsOver) = processNextInformation()
if gameIsOver :
break
nextMove = determineNextMove(mazeWidth, mazeHeight, mazeMap, turnTime, playerLocation, opponentLocation, coins)
writeToPipe(nextMove)
####################################################################################################################################################################################################################################
####################################################################################################################################################################################################################################
|
mit
|
swdream/neutron
|
neutron/agent/linux/iptables_manager.py
|
17
|
28571
|
# Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# based on
# https://github.com/openstack/nova/blob/master/nova/network/linux_net.py
"""Implements iptables rules using linux utilities."""
import collections
import contextlib
import os
import re
import sys
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
from neutron.agent.common import config
from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import utils as linux_utils
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.i18n import _LE, _LW
LOG = logging.getLogger(__name__)
config.register_iptables_opts(cfg.CONF)
# NOTE(vish): Iptables supports chain names of up to 28 characters, and we
# add up to 12 characters to binary_name which is used as a prefix,
# so we limit it to 16 characters.
# (max_chain_name_length - len('-POSTROUTING') == 16)
def get_binary_name():
"""Grab the name of the binary we're running in."""
return os.path.basename(sys.argv[0])[:16].replace(' ', '_')
binary_name = get_binary_name()
# A length of a chain name must be less than or equal to 11 characters.
# <max length of iptables chain name> - (<binary_name> + '-') = 28-(16+1) = 11
MAX_CHAIN_LEN_WRAP = 11
MAX_CHAIN_LEN_NOWRAP = 28
# Number of iptables rules to print before and after a rule that causes a
# a failure during iptables-restore
IPTABLES_ERROR_LINES_OF_CONTEXT = 5
def comment_rule(rule, comment):
if not cfg.CONF.AGENT.comment_iptables_rules or not comment:
return rule
# iptables-save outputs the comment before the jump so we need to match
# that order so _find_last_entry works
try:
start_of_jump = rule.index(' -j ')
except ValueError:
return '%s -m comment --comment "%s"' % (rule, comment)
return ' '.join([rule[0:start_of_jump],
'-m comment --comment "%s"' % comment,
rule[start_of_jump + 1:]])
def get_chain_name(chain_name, wrap=True):
if wrap:
return chain_name[:MAX_CHAIN_LEN_WRAP]
else:
return chain_name[:MAX_CHAIN_LEN_NOWRAP]
class IptablesRule(object):
"""An iptables rule.
You shouldn't need to use this class directly, it's only used by
IptablesManager.
"""
def __init__(self, chain, rule, wrap=True, top=False,
binary_name=binary_name, tag=None, comment=None):
self.chain = get_chain_name(chain, wrap)
self.rule = rule
self.wrap = wrap
self.top = top
self.wrap_name = binary_name[:16]
self.tag = tag
self.comment = comment
def __eq__(self, other):
return ((self.chain == other.chain) and
(self.rule == other.rule) and
(self.top == other.top) and
(self.wrap == other.wrap))
def __ne__(self, other):
return not self == other
def __str__(self):
if self.wrap:
chain = '%s-%s' % (self.wrap_name, self.chain)
else:
chain = self.chain
return comment_rule('-A %s %s' % (chain, self.rule), self.comment)
class IptablesTable(object):
"""An iptables table."""
def __init__(self, binary_name=binary_name):
self.rules = []
self.remove_rules = []
self.chains = set()
self.unwrapped_chains = set()
self.remove_chains = set()
self.wrap_name = binary_name[:16]
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
so if neutron-openvswitch-agent creates a chain named 'OUTPUT',
it'll actually end up being named 'neutron-openvswi-OUTPUT'.
"""
name = get_chain_name(name, wrap)
if wrap:
self.chains.add(name)
else:
self.unwrapped_chains.add(name)
def _select_chain_set(self, wrap):
if wrap:
return self.chains
else:
return self.unwrapped_chains
def remove_chain(self, name, wrap=True):
"""Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
LOG.debug('Attempted to remove chain %s which does not exist',
name)
return
chain_set.remove(name)
if not wrap:
# non-wrapped chains and rules need to be dealt with specially,
# so we keep a list of them to be iterated over in apply()
self.remove_chains.add(name)
# first, add rules to remove that have a matching chain name
self.remove_rules += [r for r in self.rules if r.chain == name]
# next, remove rules from list that have a matching chain name
self.rules = [r for r in self.rules if r.chain != name]
if not wrap:
jump_snippet = '-j %s' % name
# next, add rules to remove that have a matching jump chain
self.remove_rules += [r for r in self.rules
if jump_snippet in r.rule]
else:
jump_snippet = '-j %s-%s' % (self.wrap_name, name)
# finally, remove rules from list that have a matching jump chain
self.rules = [r for r in self.rules
if jump_snippet not in r.rule]
def add_rule(self, chain, rule, wrap=True, top=False, tag=None,
comment=None):
"""Add a rule to the table.
This is just like what you'd feed to iptables, just without
the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
chain = get_chain_name(chain, wrap)
if wrap and chain not in self.chains:
raise LookupError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.append(IptablesRule(chain, rule, wrap, top, self.wrap_name,
tag, comment))
def _wrap_target_chain(self, s, wrap):
if s.startswith('$'):
s = ('%s-%s' % (self.wrap_name, get_chain_name(s[1:], wrap)))
return s
def remove_rule(self, chain, rule, wrap=True, top=False, comment=None):
"""Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
"""
chain = get_chain_name(chain, wrap)
try:
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.remove(IptablesRule(chain, rule, wrap, top,
self.wrap_name,
comment=comment))
if not wrap:
self.remove_rules.append(IptablesRule(chain, rule, wrap, top,
self.wrap_name,
comment=comment))
except ValueError:
LOG.warn(_LW('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
def _get_chain_rules(self, chain, wrap):
chain = get_chain_name(chain, wrap)
return [rule for rule in self.rules
if rule.chain == chain and rule.wrap == wrap]
def empty_chain(self, chain, wrap=True):
"""Remove all rules from a chain."""
chained_rules = self._get_chain_rules(chain, wrap)
for rule in chained_rules:
self.rules.remove(rule)
def clear_rules_by_tag(self, tag):
if not tag:
return
rules = [rule for rule in self.rules if rule.tag == tag]
for rule in rules:
self.rules.remove(rule)
class IptablesManager(object):
"""Wrapper for iptables.
See IptablesTable for some usage docs
A number of chains are set up to begin with.
First, neutron-filter-top. It's added at the top of FORWARD and OUTPUT.
Its name is not wrapped, so it's shared between the various neutron
workers. It's intended for rules that need to live at the top of the
FORWARD and OUTPUT chains. It's in both the ipv4 and ipv6 set of tables.
For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains
are wrapped, meaning that the "real" INPUT chain has a rule that jumps to
the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
"local" which is jumped to from neutron-filter-top.
For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the built-in filter chains. Additionally,
there's a snat chain that is applied after the POSTROUTING chain.
"""
def __init__(self, _execute=None, state_less=False, use_ipv6=False,
namespace=None, binary_name=binary_name):
if _execute:
self.execute = _execute
else:
self.execute = linux_utils.execute
self.use_ipv6 = use_ipv6
self.namespace = namespace
self.iptables_apply_deferred = False
self.wrap_name = binary_name[:16]
self.ipv4 = {'filter': IptablesTable(binary_name=self.wrap_name)}
self.ipv6 = {'filter': IptablesTable(binary_name=self.wrap_name)}
# Add a neutron-filter-top chain. It's intended to be shared
# among the various neutron components. It sits at the very top
# of FORWARD and OUTPUT.
for tables in [self.ipv4, self.ipv6]:
tables['filter'].add_chain('neutron-filter-top', wrap=False)
tables['filter'].add_rule('FORWARD', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_rule('OUTPUT', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_chain('local')
tables['filter'].add_rule('neutron-filter-top', '-j $local',
wrap=False)
# Wrap the built-in chains
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']},
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
if not state_less:
self.ipv4.update(
{'mangle': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[4].update(
{'mangle': ['PREROUTING', 'INPUT', 'FORWARD', 'OUTPUT',
'POSTROUTING']})
self.ipv4.update(
{'nat': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[4].update({'nat': ['PREROUTING',
'OUTPUT', 'POSTROUTING']})
self.ipv4.update({'raw': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[4].update({'raw': ['PREROUTING', 'OUTPUT']})
self.ipv6.update({'raw': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[6].update({'raw': ['PREROUTING', 'OUTPUT']})
for ip_version in builtin_chains:
if ip_version == 4:
tables = self.ipv4
elif ip_version == 6:
tables = self.ipv6
for table, chains in six.iteritems(builtin_chains[ip_version]):
for chain in chains:
tables[table].add_chain(chain)
tables[table].add_rule(chain, '-j $%s' %
(chain), wrap=False)
if not state_less:
# Add a neutron-postrouting-bottom chain. It's intended to be
# shared among the various neutron components. We set it as the
# last chain of POSTROUTING chain.
self.ipv4['nat'].add_chain('neutron-postrouting-bottom',
wrap=False)
self.ipv4['nat'].add_rule('POSTROUTING',
'-j neutron-postrouting-bottom',
wrap=False)
# We add a snat chain to the shared neutron-postrouting-bottom
# chain so that it's applied last.
self.ipv4['nat'].add_chain('snat')
self.ipv4['nat'].add_rule('neutron-postrouting-bottom',
'-j $snat', wrap=False,
comment=ic.SNAT_OUT)
# And then we add a float-snat chain and jump to first thing in
# the snat chain.
self.ipv4['nat'].add_chain('float-snat')
self.ipv4['nat'].add_rule('snat', '-j $float-snat')
# Add a mark chain to mangle PREROUTING chain. It is used to
# identify ingress packets from a certain interface.
self.ipv4['mangle'].add_chain('mark')
self.ipv4['mangle'].add_rule('PREROUTING', '-j $mark')
def get_chain(self, table, chain, ip_version=4, wrap=True):
try:
requested_table = {4: self.ipv4, 6: self.ipv6}[ip_version][table]
except KeyError:
return []
return requested_table._get_chain_rules(chain, wrap)
def is_chain_empty(self, table, chain, ip_version=4, wrap=True):
return not self.get_chain(table, chain, ip_version, wrap)
@contextlib.contextmanager
def defer_apply(self):
"""Defer apply context."""
self.defer_apply_on()
try:
yield
finally:
try:
self.defer_apply_off()
except Exception:
msg = _LE('Failure applying iptables rules')
LOG.exception(msg)
raise n_exc.IpTablesApplyException(msg)
def defer_apply_on(self):
self.iptables_apply_deferred = True
def defer_apply_off(self):
self.iptables_apply_deferred = False
self._apply()
def apply(self):
if self.iptables_apply_deferred:
return
self._apply()
def _apply(self):
lock_name = 'iptables'
if self.namespace:
lock_name += '-' + self.namespace
with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
return self._apply_synchronized()
def get_rules_for_table(self, table):
"""Runs iptables-save on a table and returns the results."""
args = ['iptables-save', '-t', table]
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
return self.execute(args, run_as_root=True).split('\n')
def _apply_synchronized(self):
"""Apply the current in-memory set of iptables rules.
This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore.
"""
s = [('iptables', self.ipv4)]
if self.use_ipv6:
s += [('ip6tables', self.ipv6)]
for cmd, tables in s:
args = ['%s-save' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
all_tables = self.execute(args, run_as_root=True)
all_lines = all_tables.split('\n')
# Traverse tables in sorted order for predictable dump output
for table_name in sorted(tables):
table = tables[table_name]
start, end = self._find_table(all_lines, table_name)
all_lines[start:end] = self._modify_rules(
all_lines[start:end], table, table_name)
args = ['%s-restore' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
try:
self.execute(args, process_input='\n'.join(all_lines),
run_as_root=True)
except RuntimeError as r_error:
with excutils.save_and_reraise_exception():
try:
line_no = int(re.search(
'iptables-restore: line ([0-9]+?) failed',
str(r_error)).group(1))
context = IPTABLES_ERROR_LINES_OF_CONTEXT
log_start = max(0, line_no - context)
log_end = line_no + context
except AttributeError:
# line error wasn't found, print all lines instead
log_start = 0
log_end = len(all_lines)
log_lines = ('%7d. %s' % (idx, l)
for idx, l in enumerate(
all_lines[log_start:log_end],
log_start + 1)
)
LOG.error(_LE("IPTablesManager.apply failed to apply the "
"following set of iptables rules:\n%s"),
'\n'.join(log_lines))
LOG.debug("IPTablesManager.apply completed with success")
def _find_table(self, lines, table_name):
if len(lines) < 3:
# length only <2 when fake iptables
return (0, 0)
try:
start = lines.index('*%s' % table_name) - 1
except ValueError:
# Couldn't find table_name
LOG.debug('Unable to find table %s', table_name)
return (0, 0)
end = lines[start:].index('COMMIT') + start + 2
return (start, end)
def _find_rules_index(self, lines):
seen_chains = False
rules_index = 0
for rules_index, rule in enumerate(lines):
if not seen_chains:
if rule.startswith(':'):
seen_chains = True
else:
if not rule.startswith(':'):
break
if not seen_chains:
rules_index = 2
return rules_index
def _find_last_entry(self, filter_map, match_str):
# find last matching entry
try:
return filter_map[match_str][-1]
except KeyError:
pass
def _modify_rules(self, current_lines, table, table_name):
# Chains are stored as sets to avoid duplicates.
# Sort the output chains here to make their order predictable.
unwrapped_chains = sorted(table.unwrapped_chains)
chains = sorted(table.chains)
remove_chains = table.remove_chains
rules = table.rules
remove_rules = table.remove_rules
if not current_lines:
fake_table = ['# Generated by iptables_manager',
'*' + table_name, 'COMMIT',
'# Completed by iptables_manager']
current_lines = fake_table
# Fill old_filter with any chains or rules we might have added,
# they could have a [packet:byte] count we want to preserve.
# Fill new_filter with any chains or rules without our name in them.
old_filter, new_filter = [], []
for line in current_lines:
(old_filter if self.wrap_name in line else
new_filter).append(line.strip())
old_filter_map = make_filter_map(old_filter)
new_filter_map = make_filter_map(new_filter)
rules_index = self._find_rules_index(new_filter)
all_chains = [':%s' % name for name in unwrapped_chains]
all_chains += [':%s-%s' % (self.wrap_name, name) for name in chains]
# Iterate through all the chains, trying to find an existing
# match.
our_chains = []
for chain in all_chains:
chain_str = str(chain).strip()
old = self._find_last_entry(old_filter_map, chain_str)
if not old:
dup = self._find_last_entry(new_filter_map, chain_str)
new_filter = [s for s in new_filter if chain_str not in s.strip()]
# if no old or duplicates, use original chain
if old or dup:
chain_str = str(old or dup)
else:
# add-on the [packet:bytes]
chain_str += ' - [0:0]'
our_chains += [chain_str]
# Iterate through all the rules, trying to find an existing
# match.
our_rules = []
bot_rules = []
for rule in rules:
rule_str = str(rule).strip()
# Further down, we weed out duplicates from the bottom of the
# list, so here we remove the dupes ahead of time.
old = self._find_last_entry(old_filter_map, rule_str)
if not old:
dup = self._find_last_entry(new_filter_map, rule_str)
new_filter = [s for s in new_filter if rule_str not in s.strip()]
# if no old or duplicates, use original rule
if old or dup:
rule_str = str(old or dup)
# backup one index so we write the array correctly
if not old:
rules_index -= 1
else:
# add-on the [packet:bytes]
rule_str = '[0:0] ' + rule_str
if rule.top:
# rule.top == True means we want this rule to be at the top.
our_rules += [rule_str]
else:
bot_rules += [rule_str]
our_rules += bot_rules
new_filter[rules_index:rules_index] = our_rules
new_filter[rules_index:rules_index] = our_chains
def _strip_packets_bytes(line):
# strip any [packet:byte] counts at start or end of lines
if line.startswith(':'):
# it's a chain, for example, ":neutron-billing - [0:0]"
line = line.split(':')[1]
line = line.split(' - [', 1)[0]
elif line.startswith('['):
# it's a rule, for example, "[0:0] -A neutron-billing..."
line = line.split('] ', 1)[1]
line = line.strip()
return line
seen_chains = set()
def _weed_out_duplicate_chains(line):
# ignore [packet:byte] counts at end of lines
if line.startswith(':'):
line = _strip_packets_bytes(line)
if line in seen_chains:
return False
else:
seen_chains.add(line)
# Leave it alone
return True
seen_rules = set()
def _weed_out_duplicate_rules(line):
if line.startswith('['):
line = _strip_packets_bytes(line)
if line in seen_rules:
return False
else:
seen_rules.add(line)
# Leave it alone
return True
def _weed_out_removes(line):
# We need to find exact matches here
if line.startswith(':'):
line = _strip_packets_bytes(line)
for chain in remove_chains:
if chain == line:
remove_chains.remove(chain)
return False
elif line.startswith('['):
line = _strip_packets_bytes(line)
for rule in remove_rules:
rule_str = _strip_packets_bytes(str(rule))
if rule_str == line:
remove_rules.remove(rule)
return False
# Leave it alone
return True
# We filter duplicates. Go through the chains and rules, letting
# the *last* occurrence take precedence since it could have a
# non-zero [packet:byte] count we want to preserve. We also filter
# out anything in the "remove" list.
new_filter.reverse()
new_filter = [line for line in new_filter
if _weed_out_duplicate_chains(line) and
_weed_out_duplicate_rules(line) and
_weed_out_removes(line)]
new_filter.reverse()
# flush lists, just in case we didn't find something
remove_chains.clear()
for rule in remove_rules:
remove_rules.remove(rule)
return new_filter
def _get_traffic_counters_cmd_tables(self, chain, wrap=True):
name = get_chain_name(chain, wrap)
cmd_tables = [('iptables', key) for key, table in self.ipv4.items()
if name in table._select_chain_set(wrap)]
if self.use_ipv6:
cmd_tables += [('ip6tables', key)
for key, table in self.ipv6.items()
if name in table._select_chain_set(wrap)]
return cmd_tables
def get_traffic_counters(self, chain, wrap=True, zero=False):
"""Return the sum of the traffic counters of all rules of a chain."""
cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap)
if not cmd_tables:
LOG.warn(_LW('Attempted to get traffic counters of chain %s which '
'does not exist'), chain)
return
name = get_chain_name(chain, wrap)
acc = {'pkts': 0, 'bytes': 0}
for cmd, table in cmd_tables:
args = [cmd, '-t', table, '-L', name, '-n', '-v', '-x']
if zero:
args.append('-Z')
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
current_table = self.execute(args, run_as_root=True)
current_lines = current_table.split('\n')
for line in current_lines[2:]:
if not line:
break
data = line.split()
if (len(data) < 2 or
not data[0].isdigit() or
not data[1].isdigit()):
break
acc['pkts'] += int(data[0])
acc['bytes'] += int(data[1])
return acc
def make_filter_map(filter_list):
filter_map = collections.defaultdict(list)
for data in filter_list:
# strip any [packet:byte] counts at start or end of lines,
# for example, chains look like ":neutron-foo - [0:0]"
# and rules look like "[0:0] -A neutron-foo..."
if data.startswith('['):
key = data.rpartition('] ')[2]
elif data.endswith(']'):
key = data.rsplit(' [', 1)[0]
if key.endswith(' -'):
key = key[:-2]
else:
# things like COMMIT, *filter, and *nat land here
continue
filter_map[key].append(data)
# regular IP(v6) entries are translated into /32s or /128s so we
# include a lookup without the CIDR here to match as well
for cidr in ('/32', '/128'):
if cidr in key:
alt_key = key.replace(cidr, '')
filter_map[alt_key].append(data)
# return a regular dict so readers don't accidentally add entries
return dict(filter_map)
|
apache-2.0
|
EricSB/powerline
|
tests/lib/config_mock.py
|
3
|
5302
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
from threading import Lock
from copy import deepcopy
from time import sleep
from functools import wraps
from powerline.renderer import Renderer
from powerline.lib.config import ConfigLoader
from powerline import Powerline, get_default_theme
from tests.lib import Args, replace_attr
UT = get_default_theme(is_unicode=True)
AT = get_default_theme(is_unicode=False)
class TestHelpers(object):
def __init__(self, config):
self.config = config
self.access_log = []
self.access_lock = Lock()
def loader_condition(self, path):
return (path in self.config) and path
def find_config_files(self, cfg_path, config_loader, loader_callback):
if cfg_path.endswith('.json'):
cfg_path = cfg_path[:-5]
if cfg_path.startswith('/'):
cfg_path = cfg_path.lstrip('/')
with self.access_lock:
self.access_log.append('check:' + cfg_path)
if cfg_path in self.config:
yield cfg_path
else:
if config_loader:
config_loader.register_missing(self.loader_condition, loader_callback, cfg_path)
raise IOError(('fcf:' if cfg_path.endswith('raise') else '') + cfg_path)
def load_json_config(self, config_file_path, *args, **kwargs):
if config_file_path.endswith('.json'):
config_file_path = config_file_path[:-5]
if config_file_path.startswith('/'):
config_file_path = config_file_path.lstrip('/')
with self.access_lock:
self.access_log.append('load:' + config_file_path)
try:
return deepcopy(self.config[config_file_path])
except KeyError:
raise IOError(config_file_path)
def pop_events(self):
with self.access_lock:
r = self.access_log[:]
self.access_log = []
return r
def log_call(func):
@wraps(func)
def ret(self, *args, **kwargs):
self._calls.append((func.__name__, args, kwargs))
return func(self, *args, **kwargs)
return ret
class TestWatcher(object):
events = set()
lock = Lock()
def __init__(self):
self._calls = []
@log_call
def watch(self, file):
pass
@log_call
def __call__(self, file):
with self.lock:
if file in self.events:
self.events.remove(file)
return True
return False
def _reset(self, files):
with self.lock:
self.events.clear()
self.events.update(files)
@log_call
def unsubscribe(self):
pass
class Logger(object):
def __init__(self):
self.messages = []
self.lock = Lock()
def _add_msg(self, attr, msg):
with self.lock:
self.messages.append(attr + ':' + msg)
def _pop_msgs(self):
with self.lock:
r = self.messages
self.messages = []
return r
def __getattr__(self, attr):
return lambda *args, **kwargs: self._add_msg(attr, *args, **kwargs)
class SimpleRenderer(Renderer):
def hlstyle(self, fg=None, bg=None, attrs=None):
return '<{fg} {bg} {attrs}>'.format(fg=fg and fg[0], bg=bg and bg[0], attrs=attrs)
class EvenSimplerRenderer(Renderer):
def hlstyle(self, fg=None, bg=None, attrs=None):
return '{{{fg}{bg}{attrs}}}'.format(
fg=fg and fg[0] or '-',
bg=bg and bg[0] or '-',
attrs=attrs if attrs else '',
)
class TestPowerline(Powerline):
_created = False
def __init__(self, _helpers, **kwargs):
super(TestPowerline, self).__init__(**kwargs)
self._helpers = _helpers
self.find_config_files = _helpers.find_config_files
@staticmethod
def get_local_themes(local_themes):
return local_themes
@staticmethod
def get_config_paths():
return ['']
def _will_create_renderer(self):
return self.cr_kwargs
def _pop_events(self):
return self._helpers.pop_events()
renderer = EvenSimplerRenderer
class TestConfigLoader(ConfigLoader):
def __init__(self, _helpers, **kwargs):
watcher = TestWatcher()
super(TestConfigLoader, self).__init__(
load=_helpers.load_json_config,
watcher=watcher,
watcher_type='test',
**kwargs
)
def get_powerline(config, **kwargs):
helpers = TestHelpers(config)
return get_powerline_raw(
helpers,
TestPowerline,
_helpers=helpers,
ext='test',
renderer_module='tests.lib.config_mock',
logger=Logger(),
**kwargs
)
def select_renderer(simpler_renderer=False):
global renderer
renderer = EvenSimplerRenderer if simpler_renderer else SimpleRenderer
def get_powerline_raw(helpers, PowerlineClass, replace_gcp=False, **kwargs):
if not isinstance(helpers, TestHelpers):
helpers = TestHelpers(helpers)
select_renderer(kwargs.pop('simpler_renderer', False))
if replace_gcp:
class PowerlineClass(PowerlineClass):
@staticmethod
def get_config_paths():
return ['/']
pl = PowerlineClass(
config_loader=TestConfigLoader(
_helpers=helpers,
run_once=kwargs.get('run_once')
),
**kwargs
)
pl._watcher = pl.config_loader.watcher
return pl
def swap_attributes(config, powerline_module):
return replace_attr(powerline_module, 'os', Args(
path=Args(
isfile=lambda path: path.lstrip('/').replace('.json', '') in config,
join=os.path.join,
expanduser=lambda path: path,
realpath=lambda path: path,
dirname=os.path.dirname,
),
environ={},
))
def add_watcher_events(p, *args, **kwargs):
if isinstance(p._watcher, TestWatcher):
p._watcher._reset(args)
while not p._will_create_renderer():
sleep(kwargs.get('interval', 0.1))
if not kwargs.get('wait', True):
return
|
mit
|
thomashaw/SecGen
|
modules/utilities/unix/audit_tools/ghidra/files/release/Ghidra/Features/Python/data/jython-2.7.1/Lib/readline.py
|
13
|
5659
|
import os.path
import sys
from warnings import warn
try:
_console = sys._jy_console
_reader = _console.reader
except AttributeError:
raise ImportError("Cannot access JLine2 setup")
try:
# jarjar-ed version
from org.python.jline.console.history import MemoryHistory
except ImportError:
# dev version from extlibs
from jline.console.history import MemoryHistory
__all__ = ['add_history', 'clear_history', 'get_begidx', 'get_completer',
'get_completer_delims', 'get_current_history_length',
'get_endidx', 'get_history_item', 'get_history_length',
'get_line_buffer', 'insert_text', 'parse_and_bind',
'read_history_file', 'read_init_file', 'redisplay',
'remove_history_item', 'set_completer', 'set_completer_delims',
'set_history_length', 'set_pre_input_hook', 'set_startup_hook',
'write_history_file']
_history_list = None
# The need for the following warnings should go away once we update
# JLine. Choosing ImportWarning as the closest warning to what is
# going on here, namely this is functionality not yet available on
# Jython.
class NotImplementedWarning(ImportWarning):
"""Not yet implemented by Jython"""
class SecurityWarning(ImportWarning):
"""Security manager prevents access to private field"""
def parse_and_bind(string):
pass
def get_line_buffer():
return str(_reader.cursorBuffer.buffer)
def insert_text(string):
_reader.putString(string)
def read_init_file(filename=None):
warn("read_init_file: %s" % (filename,), NotImplementedWarning, "module", 2)
def read_history_file(filename="~/.history"):
expanded = os.path.expanduser(filename)
with open(expanded) as f:
_reader.history.load(f)
def write_history_file(filename="~/.history"):
expanded = os.path.expanduser(filename)
with open(expanded, 'w') as f:
for line in _reader.history.entries():
f.write(line.value().encode("utf-8"))
f.write("\n")
def clear_history():
_reader.history.clear()
def add_history(line):
_reader.history.add(line)
def get_history_length():
return _reader.history.maxSize
def set_history_length(length):
_reader.history.maxSize = length
def get_current_history_length():
return _reader.history.size()
def get_history_item(index):
# JLine indexes from 0 while readline indexes from 1 (at least in test_readline)
if index>0:
return _reader.history.get(index-1)
else:
return None
def remove_history_item(pos):
_reader.history.remove(pos)
def replace_history_item(pos, line):
_reader.history.set(pos, line)
def redisplay():
_reader.redrawLine()
def set_startup_hook(function=None):
_console.startupHook = function
def set_pre_input_hook(function=None):
warn("set_pre_input_hook %s" % (function,), NotImplementedWarning, stacklevel=2)
_completer_function = None
def set_completer(function=None):
"""set_completer([function]) -> None
Set or remove the completer function.
The function is called as function(text, state),
for state in 0, 1, 2, ..., until it returns a non-string.
It should return the next possible completion starting with 'text'."""
global _completer_function
_completer_function = function
def complete_handler(buffer, cursor, candidates):
start = _get_delimited(buffer, cursor)[0]
delimited = buffer[start:cursor]
try:
sys.ps2
have_ps2 = True
except AttributeError:
have_ps2 = False
if (have_ps2 and _reader.prompt == sys.ps2) and (not delimited or delimited.isspace()):
# Insert tab (as expanded to 4 spaces), but only if if
# preceding is whitespace/empty and in console
# continuation; this is a planned featue for Python 3 per
# http://bugs.python.org/issue5845
#
# Ideally this would not expand tabs, in case of mixed
# copy&paste of tab-indented code, however JLine2 gets
# confused as to the cursor position if certain, but not
# all, subsequent editing if the tab is backspaced
candidates.add(" " * 4)
return start
# TODO: if there are a reasonably large number of completions
# (need to get specific numbers), CPython 3.4 will show a
# message like so:
# >>>
# Display all 186 possibilities? (y or n)
# Currently Jython arbitrarily limits this to 100 and displays them
for state in xrange(100):
completion = None
try:
completion = function(delimited, state)
except:
pass
if completion:
candidates.add(completion)
else:
break
return start
_reader.addCompleter(complete_handler)
def get_completer():
return _completer_function
def _get_delimited(buffer, cursor):
start = cursor
for i in xrange(cursor-1, -1, -1):
if buffer[i] in _completer_delims:
break
start = i
return start, cursor
def get_begidx():
return _get_delimited(str(_reader.cursorBuffer.buffer), _reader.cursorBuffer.cursor)[0]
def get_endidx():
return _get_delimited(str(_reader.cursorBuffer.buffer), _reader.cursorBuffer.cursor)[1]
def set_completer_delims(string):
global _completer_delims, _completer_delims_set
_completer_delims = string
_completer_delims_set = set(string)
def get_completer_delims():
return _completer_delims
set_completer_delims(' \t\n`~!@#$%^&*()-=+[{]}\\|;:\'",<>/?')
|
gpl-3.0
|
gamahead/nupic
|
tests/swarming/nupic/swarming/experiments/input_predicted_field/description.py
|
8
|
14110
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupicengine/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer
)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [ (u'timestamp', 'first'),
(u'consumption', 'sum'),
],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# 'encoders': {'field1': {'fieldname': 'field1', 'n':100,
# 'name': 'field1', 'type': 'AdaptiveScalarEncoder',
# 'w': 21}}
#
'encoders': {
'consumption': {
'clipInput': True,
'fieldname': u'consumption',
'n': 100,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'address': {
'fieldname': u'address',
'n': 300,
'name': u'address',
'type': 'SDRCategoryEncoder',
'w': 21},
'gym': {
'fieldname': u'gym',
'n': 100,
'name': u'gym',
'type': 'SDRCategoryEncoder',
'w': 21},
'timestamp_dayOfWeek': {
'dayOfWeek': (7, 3),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
'timestamp_timeOfDay': {
'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (7, 8),
'type': 'DateEncoder'},
'_classifierInput': {
'name': u'_classifierInput',
'fieldname': u'consumption',
'classifierOnly': True,
'type': 'AdaptiveScalarEncoder',
'clipInput': True,
'n': 100,
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : { u'days': 0, u'hours': 0},
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'anomalyParams': { u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': None},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/frameworks/opf/jsonschema/stream_def.json.
#
'dataset' : {
u'info': u'test_hotgym',
u'streams': [ { u'columns': [u'*'],
u'info': u'test data',
u'source': u'file://swarming/test_data.csv'}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'consumption', u'predictionSteps': [1]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'consumption', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'window': 1000, 'steps': [1], 'errorMetric': 'altMAPE'}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
|
gpl-3.0
|
VanHulleOne/DogBone
|
matrixTrans.py
|
1
|
2048
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 07 17:44:20 2016
A module to store operations related to matrix tranformations.
@author: Luke
"""
import Point as p
import Line as l
import constants as c
import numpy
import math
def translateMatrix(shiftX, shiftY, shiftZ=0):
transMatrix = numpy.identity(4)
transMatrix[c.X][3] = shiftX
transMatrix[c.Y][3] = shiftY
transMatrix[c.Z][3] = shiftZ
return transMatrix
def rotateMatrix(angle, point=None):
if point is None:
point = p.Point(0,0)
toOrigin = translateMatrix(-point.x, -point.y)
rotateMatrix = numpy.identity(4)
rotateMatrix[c.X][0] = math.cos(angle)
rotateMatrix[c.Y][0] = math.sin(angle)
rotateMatrix[c.X][1] = -rotateMatrix[c.Y][0]
rotateMatrix[c.Y][1] = rotateMatrix[c.X][0]
transBack = translateMatrix(point.x, point.y)
transMatrix = numpy.dot(transBack, numpy.dot(rotateMatrix, toOrigin))
return transMatrix
def mirrorMatrix(axis):
transMatrix = numpy.identity(4)
if type(axis) is l.Line:
mList = []
mList.append(translateMatrix(-axis.start.x, -axis.start.y)) #toOrigin
angle = math.asin((axis.end.y-axis.start.y)/axis.length) #angle
# print 'Angle: %.2f'%(angle/(2*math.pi)*360)
mList.append(rotateMatrix(-angle)) #rotate to X-axis
xMirror = numpy.identity(4)
xMirror[c.Y][c.Y] = -1
mList.append(xMirror) #mirror about X axis
mList.append(rotateMatrix(angle)) #rotate back
mList.append(translateMatrix(axis.start.x, axis.start.y)) #translate back
for matrix in mList:
transMatrix = numpy.dot(matrix, transMatrix)
return transMatrix
if(axis == c.X):
transMatrix[c.Y][c.Y] *= -1
else:
transMatrix[c.X][c.X] *= -1
return transMatrix
def combineTransformations(matrixList):
transMatrix = numpy.identity(4)
for matrix in matrixList:
transMatrix = numpy.dot(matrix, transMatrix)
return transMatrix
|
mit
|
csherwood-usgs/landlab
|
landlab/grid/tests/test_raster_funcs/test_is_on_grid.py
|
6
|
1700
|
import numpy as np
from numpy.testing import assert_array_equal
from nose import with_setup
from nose.tools import (assert_equal, assert_raises)
try:
from nose.tools import assert_is
except ImportError:
from landlab.testing.tools import assert_is
from landlab.grid import raster_funcs as rfuncs
from landlab import RasterModelGrid
def test_with_arrays():
"""Test with arrays as arg."""
rmg = RasterModelGrid((4, 5), spacing=(2., 2.))
coords = (np.array([1., -1.]), np.array([1., -1.]))
assert_array_equal(rfuncs.is_coord_on_grid(rmg, coords),
np.array([True, False]))
def test_just_inside():
"""Test with points just inside the grid."""
rmg = RasterModelGrid((4, 5), spacing=(2., 2.))
assert_equal(rfuncs.is_coord_on_grid(rmg, (0., 4.)), True)
assert_equal(rfuncs.is_coord_on_grid(rmg, (8. - 1e-12, 4.)), True)
assert_equal(rfuncs.is_coord_on_grid(rmg, (3., 0.)), True)
assert_equal(rfuncs.is_coord_on_grid(rmg, (3., 6. - 1e-12)), True)
def test_just_outside():
"""Test with points just outside the grid."""
rmg = RasterModelGrid((4, 5), spacing=(2., 2.))
assert_equal(rfuncs.is_coord_on_grid(rmg, (0. - 1e-12, 4.)), False)
assert_equal(rfuncs.is_coord_on_grid(rmg, (8., 4.)), False)
assert_equal(rfuncs.is_coord_on_grid(rmg, (3., 0. - 1e-12)), False)
assert_equal(rfuncs.is_coord_on_grid(rmg, (3., 6.)), False)
def test_just_x():
"""Test check if points are within the x bounds."""
rmg = RasterModelGrid((4, 5), spacing=(2., 2.))
assert_equal(rfuncs.is_coord_on_grid(rmg, (4., 1.e6), axes=(1, )), True)
assert_equal(rfuncs.is_coord_on_grid(rmg, (-1., 1.), axes=(1, )), False)
|
mit
|
40223144/2015cdafinal
|
static/Brython3.1.0-20150301-090019/Lib/site-packages/pygame/sprite.py
|
603
|
55779
|
## pygame - Python Game Library
## Copyright (C) 2000-2003, 2007 Pete Shinners
## (C) 2004 Joe Wreschnig
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## [email protected]
"""pygame module with basic game object classes
This module contains several simple classes to be used within games. There
are the main Sprite class and several Group classes that contain Sprites.
The use of these classes is entirely optional when using Pygame. The classes
are fairly lightweight and only provide a starting place for the code
that is common to most games.
The Sprite class is intended to be used as a base class for the different
types of objects in the game. There is also a base Group class that simply
stores sprites. A game could create new types of Group classes that operate
on specially customized Sprite instances they contain.
The basic Sprite class can draw the Sprites it contains to a Surface. The
Group.draw() method requires that each Sprite have a Surface.image attribute
and a Surface.rect. The Group.clear() method requires these same attributes
and can be used to erase all the Sprites with background. There are also
more advanced Groups: pygame.sprite.RenderUpdates() and
pygame.sprite.OrderedUpdates().
Lastly, this module contains several collision functions. These help find
sprites inside multiple groups that have intersecting bounding rectangles.
To find the collisions, the Sprites are required to have a Surface.rect
attribute assigned.
The groups are designed for high efficiency in removing and adding Sprites
to them. They also allow cheap testing to see if a Sprite already exists in
a Group. A given Sprite can exist in any number of groups. A game could use
some groups to control object rendering, and a completely separate set of
groups to control interaction or player movement. Instead of adding type
attributes or bools to a derived Sprite class, consider keeping the
Sprites inside organized Groups. This will allow for easier lookup later
in the game.
Sprites and Groups manage their relationships with the add() and remove()
methods. These methods can accept a single or multiple group arguments for
membership. The default initializers for these classes also take a
single group or list of groups as argments for initial membership. It is safe
to repeatedly add and remove the same Sprite from a Group.
While it is possible to design sprite and group classes that don't derive
from the Sprite and AbstractGroup classes below, it is strongly recommended
that you extend those when you create a new Sprite or Group class.
Sprites are not thread safe, so lock them yourself if using threads.
"""
##todo
## a group that holds only the 'n' most recent elements.
## sort of like the GroupSingle class, but holding more
## than one sprite
##
## drawing groups that can 'automatically' store the area
## underneath so they can "clear" without needing a background
## function. obviously a little slower than normal, but nice
## to use in many situations. (also remember it must "clear"
## in the reverse order that it draws :])
##
## the drawing groups should also be able to take a background
## function, instead of just a background surface. the function
## would take a surface and a rectangle on that surface to erase.
##
## perhaps more types of collision functions? the current two
## should handle just about every need, but perhaps more optimized
## specific ones that aren't quite so general but fit into common
## specialized cases.
import pygame
from pygame.rect import Rect
from pygame.time import get_ticks
from operator import truth
# Python 3 does not have the callable function, but an equivalent can be made
# with the hasattr function.
#if 'callable' not in dir(__builtins__):
callable = lambda obj: hasattr(obj, '__call__')
# Don't depend on pygame.mask if it's not there...
try:
from pygame.mask import from_surface
except:
pass
class Sprite(object):
"""simple base class for visible game objects
pygame.sprite.Sprite(*groups): return Sprite
The base class for visible game objects. Derived classes will want to
override the Sprite.update() method and assign Sprite.image and Sprite.rect
attributes. The initializer can accept any number of Group instances that
the Sprite will become a member of.
When subclassing the Sprite class, be sure to call the base initializer
before adding the Sprite to Groups.
"""
def __init__(self, *groups):
self.__g = {} # The groups the sprite is in
if groups:
self.add(*groups)
def add(self, *groups):
"""add the sprite to groups
Sprite.add(*groups): return None
Any number of Group instances can be passed as arguments. The
Sprite will be added to the Groups it is not already a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if not has(group):
group.add_internal(self)
self.add_internal(group)
else:
self.add(*group)
def remove(self, *groups):
"""remove the sprite from groups
Sprite.remove(*groups): return None
Any number of Group instances can be passed as arguments. The Sprite
will be removed from the Groups it is currently a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if has(group):
group.remove_internal(self)
self.remove_internal(group)
else:
self.remove(*group)
def add_internal(self, group):
self.__g[group] = 0
def remove_internal(self, group):
del self.__g[group]
def update(self, *args):
"""method to control sprite behavior
Sprite.update(*args):
The default implementation of this method does nothing; it's just a
convenient "hook" that you can override. This method is called by
Group.update() with whatever arguments you give it.
There is no need to use this method if not using the convenience
method by the same name in the Group class.
"""
pass
def kill(self):
"""remove the Sprite from all Groups
Sprite.kill(): return None
The Sprite is removed from all the Groups that contain it. This won't
change anything about the state of the Sprite. It is possible to
continue to use the Sprite after this method has been called, including
adding it to Groups.
"""
for c in self.__g:
c.remove_internal(self)
self.__g.clear()
def groups(self):
"""list of Groups that contain this Sprite
Sprite.groups(): return group_list
Returns a list of all the Groups that contain this Sprite.
"""
return list(self.__g)
def alive(self):
"""does the sprite belong to any groups
Sprite.alive(): return bool
Returns True when the Sprite belongs to one or more Groups.
"""
return truth(self.__g)
def __repr__(self):
return "<%s sprite(in %d groups)>" % (self.__class__.__name__, len(self.__g))
class DirtySprite(Sprite):
"""a more featureful subclass of Sprite with more attributes
pygame.sprite.DirtySprite(*groups): return DirtySprite
Extra DirtySprite attributes with their default values:
dirty = 1
If set to 1, it is repainted and then set to 0 again.
If set to 2, it is always dirty (repainted each frame;
flag is not reset).
If set to 0, it is not dirty and therefore not repainted again.
blendmode = 0
It's the special_flags argument of Surface.blit; see the blendmodes in
the Surface.blit documentation
source_rect = None
This is the source rect to use. Remember that it is relative to the top
left corner (0, 0) of self.image.
visible = 1
Normally this is 1. If set to 0, it will not be repainted. (If you
change visible to 1, you must set dirty to 1 for it to be erased from
the screen.)
_layer = 0
A READ ONLY value, it is read when adding it to the LayeredUpdates
group. For details see documentation of sprite.LayeredUpdates.
"""
def __init__(self, *groups):
self.dirty = 1
self.blendmode = 0 # pygame 1.8, referred to as special_flags in
# the documentation of Surface.blit
self._visible = 1
self._layer = 0 # READ ONLY by LayeredUpdates or LayeredDirty
self.source_rect = None
Sprite.__init__(self, *groups)
def _set_visible(self, val):
"""set the visible value (0 or 1) and makes the sprite dirty"""
self._visible = val
if self.dirty < 2:
self.dirty = 1
def _get_visible(self):
"""return the visible value of that sprite"""
return self._visible
visible = property(lambda self: self._get_visible(),
lambda self, value: self._set_visible(value),
doc="you can make this sprite disappear without "
"removing it from the group,\n"
"assign 0 for invisible and 1 for visible")
def __repr__(self):
return "<%s DirtySprite(in %d groups)>" % \
(self.__class__.__name__, len(self.groups()))
class AbstractGroup(object):
"""base class for containers of sprites
AbstractGroup does everything needed to behave as a normal group. You can
easily subclass a new group class from this or the other groups below if
you want to add more features.
Any AbstractGroup-derived sprite groups act like sequences and support
iteration, len, and so on.
"""
# dummy val to identify sprite groups, and avoid infinite recursion
_spritegroup = True
def __init__(self):
self.spritedict = {}
self.lostsprites = []
def sprites(self):
"""get a list of sprites in the group
Group.sprite(): return list
Returns an object that can be looped over with a 'for' loop. (For now,
it is always a list, but this could change in a future version of
pygame.) Alternatively, you can get the same information by iterating
directly over the sprite group, e.g. 'for sprite in group'.
"""
return list(self.spritedict)
def add_internal(self, sprite):
self.spritedict[sprite] = 0
def remove_internal(self, sprite):
r = self.spritedict[sprite]
if r:
self.lostsprites.append(r)
del self.spritedict[sprite]
def has_internal(self, sprite):
return sprite in self.spritedict
def copy(self):
"""copy a group with all the same sprites
Group.copy(): return Group
Returns a copy of the group that is an instance of the same class
and has the same sprites in it.
"""
return self.__class__(self.sprites())
def __iter__(self):
return iter(self.sprites())
def __contains__(self, sprite):
return self.has(sprite)
def add(self, *sprites):
"""add sprite(s) to group
Group.add(sprite, list, group, ...): return None
Adds a sprite or sequence of sprites to a group.
"""
for sprite in sprites:
# It's possible that some sprite is also an iterator.
# If this is the case, we should add the sprite itself,
# and not the iterator object.
if isinstance(sprite, Sprite):
if not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
else:
try:
# See if sprite is an iterator, like a list or sprite
# group.
self.add(*sprite)
except (TypeError, AttributeError):
# Not iterable. This is probably a sprite that is not an
# instance of the Sprite class or is not an instance of a
# subclass of the Sprite class. Alternately, it could be an
# old-style sprite group.
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if not self.has_internal(spr):
self.add_internal(spr)
spr.add_internal(self)
elif not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
def remove(self, *sprites):
"""remove sprite(s) from group
Group.remove(sprite, list, or group, ...): return None
Removes a sprite or sequence of sprites from a group.
"""
# This function behaves essentially the same as Group.add. It first
# tries to handle each argument as an instance of the Sprite class. If
# that failes, then it tries to handle the argument as an iterable
# object. If that failes, then it tries to handle the argument as an
# old-style sprite group. Lastly, if that fails, it assumes that the
# normal Sprite methods should be used.
for sprite in sprites:
if isinstance(sprite, Sprite):
if self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
else:
try:
self.remove(*sprite)
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
self.remove_internal(spr)
spr.remove_internal(self)
elif self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
def has(self, *sprites):
"""ask if group has a sprite or sprites
Group.has(sprite or group, ...): return bool
Returns True if the given sprite or sprites are contained in the
group. Alternatively, you can get the same information using the
'in' operator, e.g. 'sprite in group', 'subgroup in group'.
"""
return_value = False
for sprite in sprites:
if isinstance(sprite, Sprite):
# Check for Sprite instance's membership in this group
if self.has_internal(sprite):
return_value = True
else:
return False
else:
try:
if self.has(*sprite):
return_value = True
else:
return False
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
return_value = True
else:
return False
else:
if self.has_internal(sprite):
return_value = True
else:
return False
return return_value
def update(self, *args):
"""call the update method of every member sprite
Group.update(*args): return None
Calls the update method of every member sprite. All arguments that
were passed to this method are passed to the Sprite update function.
"""
for s in self.sprites():
s.update(*args)
def draw(self, surface):
"""draw all sprites onto the surface
Group.draw(surface): return None
Draws all of the member sprites onto the given surface.
"""
#from javascript import console
sprites = self.sprites()
surface_blit = surface.blit
for spr in sprites:
#console.log(spr.image, spr.rect)
#console.log(spr.image._canvas.width, spr.image._canvas.height)
self.spritedict[spr] = surface_blit(spr.image, spr.rect)
self.lostsprites = []
def clear(self, surface, bgd):
"""erase the previous position of all sprites
Group.clear(surface, bgd): return None
Clears the area under every drawn sprite in the group. The bgd
argument should be Surface which is the same dimensions as the
screen surface. The bgd could also be a function which accepts
the given surface and the area to be cleared as arguments.
"""
if callable(bgd):
for r in self.lostsprites:
bgd(surface, r)
for r in self.spritedict.values():
if r:
bgd(surface, r)
else:
surface_blit = surface.blit
for r in self.lostsprites:
surface_blit(bgd, r, r)
for r in self.spritedict.values():
if r:
surface_blit(bgd, r, r)
def empty(self):
"""remove all sprites
Group.empty(): return None
Removes all the sprites from the group.
"""
for s in self.sprites():
self.remove_internal(s)
s.remove_internal(self)
def __nonzero__(self):
return truth(self.sprites())
def __len__(self):
"""return number of sprites in group
Group.len(group): return int
Returns the number of sprites contained in the group.
"""
return len(self.sprites())
def __repr__(self):
return "<%s(%d sprites)>" % (self.__class__.__name__, len(self))
class Group(AbstractGroup):
"""container class for many Sprites
pygame.sprite.Group(*sprites): return Group
A simple container for Sprite objects. This class can be subclassed to
create containers with more specific behaviors. The constructor takes any
number of Sprite arguments to add to the Group. The group supports the
following standard Python operations:
in test if a Sprite is contained
len the number of Sprites contained
bool test if any Sprites are contained
iter iterate through all the Sprites
The Sprites in the Group are not ordered, so the Sprites are drawn and
iterated over in no particular order.
"""
def __init__(self, *sprites):
AbstractGroup.__init__(self)
self.add(*sprites)
RenderPlain = Group
RenderClear = Group
class RenderUpdates(Group):
"""Group class that tracks dirty updates
pygame.sprite.RenderUpdates(*sprites): return RenderUpdates
This class is derived from pygame.sprite.Group(). It has an enhanced draw
method that tracks the changed areas of the screen.
"""
def draw(self, surface):
spritedict = self.spritedict
surface_blit = surface.blit
dirty = self.lostsprites
self.lostsprites = []
dirty_append = dirty.append
for s in self.sprites():
r = spritedict[s]
newrect = surface_blit(s.image, s.rect)
if r:
if newrect.colliderect(r):
dirty_append(newrect.union(r))
else:
dirty_append(newrect)
dirty_append(r)
else:
dirty_append(newrect)
spritedict[s] = newrect
return dirty
class OrderedUpdates(RenderUpdates):
"""RenderUpdates class that draws Sprites in order of addition
pygame.sprite.OrderedUpdates(*spites): return OrderedUpdates
This class derives from pygame.sprite.RenderUpdates(). It maintains
the order in which the Sprites were added to the Group for rendering.
This makes adding and removing Sprites from the Group a little
slower than regular Groups.
"""
def __init__(self, *sprites):
self._spritelist = []
RenderUpdates.__init__(self, *sprites)
def sprites(self):
return list(self._spritelist)
def add_internal(self, sprite):
RenderUpdates.add_internal(self, sprite)
self._spritelist.append(sprite)
def remove_internal(self, sprite):
RenderUpdates.remove_internal(self, sprite)
self._spritelist.remove(sprite)
class LayeredUpdates(AbstractGroup):
"""LayeredUpdates Group handles layers, which are drawn like OrderedUpdates
pygame.sprite.LayeredUpdates(*spites, **kwargs): return LayeredUpdates
This group is fully compatible with pygame.sprite.Sprite.
New in pygame 1.8.0
"""
_init_rect = Rect(0, 0, 0, 0)
def __init__(self, *sprites, **kwargs):
"""initialize an instance of LayeredUpdates with the given attributes
You can set the default layer through kwargs using 'default_layer'
and an integer for the layer. The default layer is 0.
If the sprite you add has an attribute _layer, then that layer will be
used. If **kwarg contains 'layer', then the passed sprites will be
added to that layer (overriding the sprite._layer attribute). If
neither the sprite nor **kwarg has a 'layer', then the default layer is
used to add the sprites.
"""
self._spritelayers = {}
self._spritelist = []
AbstractGroup.__init__(self)
self._default_layer = kwargs.get('default_layer', 0)
self.add(*sprites, **kwargs)
def add_internal(self, sprite, layer=None):
"""Do not use this method directly.
It is used by the group to add a sprite internally.
"""
self.spritedict[sprite] = self._init_rect
if layer is None:
try:
layer = sprite._layer
except AttributeError:
layer = sprite._layer = self._default_layer
elif hasattr(sprite, '_layer'):
sprite._layer = layer
sprites = self._spritelist # speedup
sprites_layers = self._spritelayers
sprites_layers[sprite] = layer
# add the sprite at the right position
# bisect algorithmus
leng = len(sprites)
low = mid = 0
high = leng - 1
while low <= high:
mid = low + (high - low) // 2
if sprites_layers[sprites[mid]] <= layer:
low = mid + 1
else:
high = mid - 1
# linear search to find final position
while mid < leng and sprites_layers[sprites[mid]] <= layer:
mid += 1
sprites.insert(mid, sprite)
def add(self, *sprites, **kwargs):
"""add a sprite or sequence of sprites to a group
LayeredUpdates.add(*sprites, **kwargs): return None
If the sprite you add has an attribute _layer, then that layer will be
used. If **kwarg contains 'layer', then the passed sprites will be
added to that layer (overriding the sprite._layer attribute). If
neither the sprite nor **kwarg has a 'layer', then the default layer is
used to add the sprites.
"""
if not sprites:
return
if 'layer' in kwargs:
layer = kwargs['layer']
else:
layer = None
for sprite in sprites:
# It's possible that some sprite is also an iterator.
# If this is the case, we should add the sprite itself,
# and not the iterator object.
if isinstance(sprite, Sprite):
if not self.has_internal(sprite):
self.add_internal(sprite, layer)
sprite.add_internal(self)
else:
try:
# See if sprite is an iterator, like a list or sprite
# group.
self.add(*sprite, **kwargs)
except (TypeError, AttributeError):
# Not iterable. This is probably a sprite that is not an
# instance of the Sprite class or is not an instance of a
# subclass of the Sprite class. Alternately, it could be an
# old-style sprite group.
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if not self.has_internal(spr):
self.add_internal(spr, layer)
spr.add_internal(self)
elif not self.has_internal(sprite):
self.add_internal(sprite, layer)
sprite.add_internal(self)
def remove_internal(self, sprite):
"""Do not use this method directly.
The group uses it to add a sprite.
"""
self._spritelist.remove(sprite)
# these dirty rects are suboptimal for one frame
r = self.spritedict[sprite]
if r is not self._init_rect:
self.lostsprites.append(r) # dirty rect
if hasattr(sprite, 'rect'):
self.lostsprites.append(sprite.rect) # dirty rect
del self.spritedict[sprite]
del self._spritelayers[sprite]
def sprites(self):
"""return a ordered list of sprites (first back, last top).
LayeredUpdates.sprites(): return sprites
"""
return list(self._spritelist)
def draw(self, surface):
"""draw all sprites in the right order onto the passed surface
LayeredUpdates.draw(surface): return Rect_list
"""
spritedict = self.spritedict
surface_blit = surface.blit
dirty = self.lostsprites
self.lostsprites = []
dirty_append = dirty.append
init_rect = self._init_rect
for spr in self.sprites():
rec = spritedict[spr]
newrect = surface_blit(spr.image, spr.rect)
if rec is init_rect:
dirty_append(newrect)
else:
if newrect.colliderect(rec):
dirty_append(newrect.union(rec))
else:
dirty_append(newrect)
dirty_append(rec)
spritedict[spr] = newrect
return dirty
def get_sprites_at(self, pos):
"""return a list with all sprites at that position
LayeredUpdates.get_sprites_at(pos): return colliding_sprites
Bottom sprites are listed first; the top ones are listed last.
"""
_sprites = self._spritelist
rect = Rect(pos, (0, 0))
colliding_idx = rect.collidelistall(_sprites)
colliding = [_sprites[i] for i in colliding_idx]
return colliding
def get_sprite(self, idx):
"""return the sprite at the index idx from the groups sprites
LayeredUpdates.get_sprite(idx): return sprite
Raises IndexOutOfBounds if the idx is not within range.
"""
return self._spritelist[idx]
def remove_sprites_of_layer(self, layer_nr):
"""remove all sprites from a layer and return them as a list
LayeredUpdates.remove_sprites_of_layer(layer_nr): return sprites
"""
sprites = self.get_sprites_from_layer(layer_nr)
self.remove(*sprites)
return sprites
#---# layer methods
def layers(self):
"""return a list of unique defined layers defined.
LayeredUpdates.layers(): return layers
"""
return sorted(set(self._spritelayers.values()))
def change_layer(self, sprite, new_layer):
"""change the layer of the sprite
LayeredUpdates.change_layer(sprite, new_layer): return None
The sprite must have been added to the renderer already. This is not
checked.
"""
sprites = self._spritelist # speedup
sprites_layers = self._spritelayers # speedup
sprites.remove(sprite)
sprites_layers.pop(sprite)
# add the sprite at the right position
# bisect algorithmus
leng = len(sprites)
low = mid = 0
high = leng - 1
while low <= high:
mid = low + (high - low) // 2
if sprites_layers[sprites[mid]] <= new_layer:
low = mid + 1
else:
high = mid - 1
# linear search to find final position
while mid < leng and sprites_layers[sprites[mid]] <= new_layer:
mid += 1
sprites.insert(mid, sprite)
if hasattr(sprite, 'layer'):
sprite.layer = new_layer
# add layer info
sprites_layers[sprite] = new_layer
def get_layer_of_sprite(self, sprite):
"""return the layer that sprite is currently in
If the sprite is not found, then it will return the default layer.
"""
return self._spritelayers.get(sprite, self._default_layer)
def get_top_layer(self):
"""return the top layer
LayeredUpdates.get_top_layer(): return layer
"""
return self._spritelayers[self._spritelist[-1]]
def get_bottom_layer(self):
"""return the bottom layer
LayeredUpdates.get_bottom_layer(): return layer
"""
return self._spritelayers[self._spritelist[0]]
def move_to_front(self, sprite):
"""bring the sprite to front layer
LayeredUpdates.move_to_front(sprite): return None
Brings the sprite to front by changing the sprite layer to the top-most
layer. The sprite is added at the end of the list of sprites in that
top-most layer.
"""
self.change_layer(sprite, self.get_top_layer())
def move_to_back(self, sprite):
"""move the sprite to the bottom layer
LayeredUpdates.move_to_back(sprite): return None
Moves the sprite to the bottom layer by moving it to a new layer below
the current bottom layer.
"""
self.change_layer(sprite, self.get_bottom_layer() - 1)
def get_top_sprite(self):
"""return the topmost sprite
LayeredUpdates.get_top_sprite(): return Sprite
"""
return self._spritelist[-1]
def get_sprites_from_layer(self, layer):
"""return all sprites from a layer ordered as they where added
LayeredUpdates.get_sprites_from_layer(layer): return sprites
Returns all sprites from a layer. The sprites are ordered in the
sequence that they where added. (The sprites are not removed from the
layer.
"""
sprites = []
sprites_append = sprites.append
sprite_layers = self._spritelayers
for spr in self._spritelist:
if sprite_layers[spr] == layer:
sprites_append(spr)
elif sprite_layers[spr] > layer:# break after because no other will
# follow with same layer
break
return sprites
def switch_layer(self, layer1_nr, layer2_nr):
"""switch the sprites from layer1_nr to layer2_nr
LayeredUpdates.switch_layer(layer1_nr, layer2_nr): return None
The layers number must exist. This method does not check for the
existence of the given layers.
"""
sprites1 = self.remove_sprites_of_layer(layer1_nr)
for spr in self.get_sprites_from_layer(layer2_nr):
self.change_layer(spr, layer1_nr)
self.add(layer=layer2_nr, *sprites1)
class LayeredDirty(LayeredUpdates):
"""LayeredDirty Group is for DirtySprites; subclasses LayeredUpdates
pygame.sprite.LayeredDirty(*spites, **kwargs): return LayeredDirty
This group requires pygame.sprite.DirtySprite or any sprite that
has the following attributes:
image, rect, dirty, visible, blendmode (see doc of DirtySprite).
It uses the dirty flag technique and is therefore faster than
pygame.sprite.RenderUpdates if you have many static sprites. It
also switches automatically between dirty rect updating and full
screen drawing, so you do no have to worry which would be faster.
As with the pygame.sprite.Group, you can specify some additional attributes
through kwargs:
_use_update: True/False (default is False)
_default_layer: default layer where the sprites without a layer are
added
_time_threshold: treshold time for switching between dirty rect mode
and fullscreen mode; defaults to updating at 80 frames per second,
which is equal to 1000.0 / 80.0
New in pygame 1.8.0
"""
def __init__(self, *sprites, **kwargs):
"""initialize group.
pygame.sprite.LayeredDirty(*spites, **kwargs): return LayeredDirty
You can specify some additional attributes through kwargs:
_use_update: True/False (default is False)
_default_layer: default layer where the sprites without a layer are
added
_time_threshold: treshold time for switching between dirty rect
mode and fullscreen mode; defaults to updating at 80 frames per
second, which is equal to 1000.0 / 80.0
"""
LayeredUpdates.__init__(self, *sprites, **kwargs)
self._clip = None
self._use_update = False
self._time_threshold = 1000.0 / 80.0 # 1000.0 / fps
self._bgd = None
for key, val in kwargs.items():
if key in ['_use_update', '_time_threshold', '_default_layer']:
if hasattr(self, key):
setattr(self, key, val)
def add_internal(self, sprite, layer=None):
"""Do not use this method directly.
It is used by the group to add a sprite internally.
"""
# check if all needed attributes are set
if not hasattr(sprite, 'dirty'):
raise AttributeError()
if not hasattr(sprite, 'visible'):
raise AttributeError()
if not hasattr(sprite, 'blendmode'):
raise AttributeError()
if not isinstance(sprite, DirtySprite):
raise TypeError()
if sprite.dirty == 0: # set it dirty if it is not
sprite.dirty = 1
LayeredUpdates.add_internal(self, sprite, layer)
def draw(self, surface, bgd=None):
"""draw all sprites in the right order onto the given surface
LayeredDirty.draw(surface, bgd=None): return Rect_list
You can pass the background too. If a self.bgd is already set to some
value that is not None, then the bgd argument has no effect.
"""
# speedups
_orig_clip = surface.get_clip()
_clip = self._clip
if _clip is None:
_clip = _orig_clip
_surf = surface
_sprites = self._spritelist
_old_rect = self.spritedict
_update = self.lostsprites
_update_append = _update.append
_ret = None
_surf_blit = _surf.blit
_rect = Rect
if bgd is not None:
self._bgd = bgd
_bgd = self._bgd
init_rect = self._init_rect
_surf.set_clip(_clip)
# -------
# 0. decide whether to render with update or flip
start_time = get_ticks()
if self._use_update: # dirty rects mode
# 1. find dirty area on screen and put the rects into _update
# still not happy with that part
for spr in _sprites:
if 0 < spr.dirty:
# chose the right rect
if spr.source_rect:
_union_rect = _rect(spr.rect.topleft,
spr.source_rect.size)
else:
_union_rect = _rect(spr.rect)
_union_rect_collidelist = _union_rect.collidelist
_union_rect_union_ip = _union_rect.union_ip
i = _union_rect_collidelist(_update)
while -1 < i:
_union_rect_union_ip(_update[i])
del _update[i]
i = _union_rect_collidelist(_update)
_update_append(_union_rect.clip(_clip))
if _old_rect[spr] is not init_rect:
_union_rect = _rect(_old_rect[spr])
_union_rect_collidelist = _union_rect.collidelist
_union_rect_union_ip = _union_rect.union_ip
i = _union_rect_collidelist(_update)
while -1 < i:
_union_rect_union_ip(_update[i])
del _update[i]
i = _union_rect_collidelist(_update)
_update_append(_union_rect.clip(_clip))
# can it be done better? because that is an O(n**2) algorithm in
# worst case
# clear using background
if _bgd is not None:
for rec in _update:
_surf_blit(_bgd, rec, rec)
# 2. draw
for spr in _sprites:
if 1 > spr.dirty:
if spr._visible:
# sprite not dirty; blit only the intersecting part
_spr_rect = spr.rect
if spr.source_rect is not None:
_spr_rect = Rect(spr.rect.topleft,
spr.source_rect.size)
_spr_rect_clip = _spr_rect.clip
for idx in _spr_rect.collidelistall(_update):
# clip
clip = _spr_rect_clip(_update[idx])
_surf_blit(spr.image,
clip,
(clip[0] - _spr_rect[0],
clip[1] - _spr_rect[1],
clip[2],
clip[3]),
spr.blendmode)
else: # dirty sprite
if spr._visible:
_old_rect[spr] = _surf_blit(spr.image,
spr.rect,
spr.source_rect,
spr.blendmode)
if spr.dirty == 1:
spr.dirty = 0
_ret = list(_update)
else: # flip, full screen mode
if _bgd is not None:
_surf_blit(_bgd, (0, 0))
for spr in _sprites:
if spr._visible:
_old_rect[spr] = _surf_blit(spr.image,
spr.rect,
spr.source_rect,
spr.blendmode)
_ret = [_rect(_clip)] # return only the part of the screen changed
# timing for switching modes
# How may a good threshold be found? It depends on the hardware.
end_time = get_ticks()
if end_time-start_time > self._time_threshold:
self._use_update = False
else:
self._use_update = True
## # debug
## print " check: using dirty rects:", self._use_update
# emtpy dirty rects list
_update[:] = []
# -------
# restore original clip
_surf.set_clip(_orig_clip)
return _ret
def clear(self, surface, bgd):
"""use to set background
Group.clear(surface, bgd): return None
"""
self._bgd = bgd
def repaint_rect(self, screen_rect):
"""repaint the given area
LayeredDirty.repaint_rect(screen_rect): return None
screen_rect is in screen coordinates.
"""
if self._clip:
self.lostsprites.append(screen_rect.clip(self._clip))
else:
self.lostsprites.append(Rect(screen_rect))
def set_clip(self, screen_rect=None):
"""clip the area where to draw; pass None (default) to reset the clip
LayeredDirty.set_clip(screen_rect=None): return None
"""
if screen_rect is None:
self._clip = pygame.display.get_surface().get_rect()
else:
self._clip = screen_rect
self._use_update = False
def get_clip(self):
"""get the area where drawing will occur
LayeredDirty.get_clip(): return Rect
"""
return self._clip
def change_layer(self, sprite, new_layer):
"""change the layer of the sprite
LayeredUpdates.change_layer(sprite, new_layer): return None
The sprite must have been added to the renderer already. This is not
checked.
"""
LayeredUpdates.change_layer(self, sprite, new_layer)
if sprite.dirty == 0:
sprite.dirty = 1
def set_timing_treshold(self, time_ms):
"""set the treshold in milliseconds
set_timing_treshold(time_ms): return None
Defaults to 1000.0 / 80.0. This means that the screen will be painted
using the flip method rather than the update method if the update
method is taking so long to update the screen that the frame rate falls
below 80 frames per second.
"""
self._time_threshold = time_ms
class GroupSingle(AbstractGroup):
"""A group container that holds a single most recent item.
This class works just like a regular group, but it only keeps a single
sprite in the group. Whatever sprite has been added to the group last will
be the only sprite in the group.
You can access its one sprite as the .sprite attribute. Assigning to this
attribute will properly remove the old sprite and then add the new one.
"""
def __init__(self, sprite=None):
AbstractGroup.__init__(self)
self.__sprite = None
if sprite is not None:
self.add(sprite)
def copy(self):
return GroupSingle(self.__sprite)
def sprites(self):
if self.__sprite is not None:
return [self.__sprite]
else:
return []
def add_internal(self, sprite):
if self.__sprite is not None:
self.__sprite.remove_internal(self)
self.remove_internal(self.__sprite)
self.__sprite = sprite
def __nonzero__(self):
return self.__sprite is not None
def _get_sprite(self):
return self.__sprite
def _set_sprite(self, sprite):
self.add_internal(sprite)
sprite.add_internal(self)
return sprite
sprite = property(_get_sprite,
_set_sprite,
None,
"The sprite contained in this group")
def remove_internal(self, sprite):
if sprite is self.__sprite:
self.__sprite = None
if sprite in self.spritedict:
AbstractGroup.remove_internal(self, sprite)
def has_internal(self, sprite):
return self.__sprite is sprite
# Optimizations...
def __contains__(self, sprite):
return self.__sprite is sprite
# Some different collision detection functions that could be used.
def collide_rect(left, right):
"""collision detection between two sprites, using rects.
pygame.sprite.collide_rect(left, right): return bool
Tests for collision between two sprites. Uses the pygame.Rect colliderect
function to calculate the collision. It is intended to be passed as a
collided callback function to the *collide functions. Sprites must have
"rect" attributes.
New in pygame 1.8.0
"""
return left.rect.colliderect(right.rect)
class collide_rect_ratio:
"""A callable class that checks for collisions using scaled rects
The class checks for collisions between two sprites using a scaled version
of the sprites' rects. Is created with a ratio; the instance is then
intended to be passed as a collided callback function to the *collide
functions.
New in pygame 1.8.1
"""
def __init__(self, ratio):
"""create a new collide_rect_ratio callable
Ratio is expected to be a floating point value used to scale
the underlying sprite rect before checking for collisions.
"""
self.ratio = ratio
def __call__(self, left, right):
"""detect collision between two sprites using scaled rects
pygame.sprite.collide_rect_ratio(ratio)(left, right): return bool
Tests for collision between two sprites. Uses the pygame.Rect
colliderect function to calculate the collision after scaling the rects
by the stored ratio. Sprites must have "rect" attributes.
"""
ratio = self.ratio
leftrect = left.rect
width = leftrect.width
height = leftrect.height
leftrect = leftrect.inflate(width * ratio - width,
height * ratio - height)
rightrect = right.rect
width = rightrect.width
height = rightrect.height
rightrect = rightrect.inflate(width * ratio - width,
height * ratio - height)
return leftrect.colliderect(rightrect)
def collide_circle(left, right):
"""detect collision between two sprites using circles
pygame.sprite.collide_circle(left, right): return bool
Tests for collision between two sprites by testing whether two circles
centered on the sprites overlap. If the sprites have a "radius" attribute,
then that radius is used to create the circle; otherwise, a circle is
created that is big enough to completely enclose the sprite's rect as
given by the "rect" attribute. This function is intended to be passed as
a collided callback function to the *collide functions. Sprites must have a
"rect" and an optional "radius" attribute.
New in pygame 1.8.0
"""
xdistance = left.rect.centerx - right.rect.centerx
ydistance = left.rect.centery - right.rect.centery
distancesquared = xdistance ** 2 + ydistance ** 2
if hasattr(left, 'radius'):
leftradius = left.radius
else:
leftrect = left.rect
# approximating the radius of a square by using half of the diagonal,
# might give false positives (especially if its a long small rect)
leftradius = 0.5 * ((leftrect.width ** 2 + leftrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(left, 'radius', leftradius)
if hasattr(right, 'radius'):
rightradius = right.radius
else:
rightrect = right.rect
# approximating the radius of a square by using half of the diagonal
# might give false positives (especially if its a long small rect)
rightradius = 0.5 * ((rightrect.width ** 2 + rightrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(right, 'radius', rightradius)
return distancesquared <= (leftradius + rightradius) ** 2
class collide_circle_ratio(object):
"""detect collision between two sprites using scaled circles
This callable class checks for collisions between two sprites using a
scaled version of a sprite's radius. It is created with a ratio as the
argument to the constructor. The instance is then intended to be passed as
a collided callback function to the *collide functions.
New in pygame 1.8.1
"""
def __init__(self, ratio):
"""creates a new collide_circle_ratio callable instance
The given ratio is expected to be a floating point value used to scale
the underlying sprite radius before checking for collisions.
When the ratio is ratio=1.0, then it behaves exactly like the
collide_circle method.
"""
self.ratio = ratio
def __call__(self, left, right):
"""detect collision between two sprites using scaled circles
pygame.sprite.collide_circle_radio(ratio)(left, right): return bool
Tests for collision between two sprites by testing whether two circles
centered on the sprites overlap after scaling the circle's radius by
the stored ratio. If the sprites have a "radius" attribute, that is
used to create the circle; otherwise, a circle is created that is big
enough to completely enclose the sprite's rect as given by the "rect"
attribute. Intended to be passed as a collided callback function to the
*collide functions. Sprites must have a "rect" and an optional "radius"
attribute.
"""
ratio = self.ratio
xdistance = left.rect.centerx - right.rect.centerx
ydistance = left.rect.centery - right.rect.centery
distancesquared = xdistance ** 2 + ydistance ** 2
if hasattr(left, "radius"):
leftradius = left.radius * ratio
else:
leftrect = left.rect
leftradius = ratio * 0.5 * ((leftrect.width ** 2 + leftrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(left, 'radius', leftradius)
if hasattr(right, "radius"):
rightradius = right.radius * ratio
else:
rightrect = right.rect
rightradius = ratio * 0.5 * ((rightrect.width ** 2 + rightrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(right, 'radius', rightradius)
return distancesquared <= (leftradius + rightradius) ** 2
def collide_mask(left, right):
"""collision detection between two sprites, using masks.
pygame.sprite.collide_mask(SpriteLeft, SpriteRight): bool
Tests for collision between two sprites by testing if their bitmasks
overlap. If the sprites have a "mask" attribute, that is used as the mask;
otherwise, a mask is created from the sprite image. Intended to be passed
as a collided callback function to the *collide functions. Sprites must
have a "rect" and an optional "mask" attribute.
New in pygame 1.8.0
"""
xoffset = right.rect[0] - left.rect[0]
yoffset = right.rect[1] - left.rect[1]
try:
leftmask = left.mask
except AttributeError:
leftmask = from_surface(left.image)
try:
rightmask = right.mask
except AttributeError:
rightmask = from_surface(right.image)
return leftmask.overlap(rightmask, (xoffset, yoffset))
def spritecollide(sprite, group, dokill, collided=None):
"""find Sprites in a Group that intersect another Sprite
pygame.sprite.spritecollide(sprite, group, dokill, collided=None):
return Sprite_list
Return a list containing all Sprites in a Group that intersect with another
Sprite. Intersection is determined by comparing the Sprite.rect attribute
of each Sprite.
The dokill argument is a bool. If set to True, all Sprites that collide
will be removed from the Group.
The collided argument is a callback function used to calculate if two
sprites are colliding. it should take two sprites as values, and return a
bool value indicating if they are colliding. If collided is not passed, all
sprites must have a "rect" value, which is a rectangle of the sprite area,
which will be used to calculate the collision.
"""
if dokill:
crashed = []
append = crashed.append
if collided:
for s in group.sprites():
if collided(sprite, s):
s.kill()
append(s)
else:
spritecollide = sprite.rect.colliderect
for s in group.sprites():
if spritecollide(s.rect):
s.kill()
append(s)
return crashed
elif collided:
return [s for s in group if collided(sprite, s)]
else:
spritecollide = sprite.rect.colliderect
return [s for s in group if spritecollide(s.rect)]
def groupcollide(groupa, groupb, dokilla, dokillb, collided=None):
"""detect collision between a group and another group
pygame.sprite.groupcollide(groupa, groupb, dokilla, dokillb):
return dict
Given two groups, this will find the intersections between all sprites in
each group. It returns a dictionary of all sprites in the first group that
collide. The value for each item in the dictionary is a list of the sprites
in the second group it collides with. The two dokill arguments control if
the sprites from either group will be automatically removed from all
groups. Collided is a callback function used to calculate if two sprites
are colliding. it should take two sprites as values, and return a bool
value indicating if they are colliding. If collided is not passed, all
sprites must have a "rect" value, which is a rectangle of the sprite area
that will be used to calculate the collision.
"""
crashed = {}
SC = spritecollide
if dokilla:
for s in groupa.sprites():
c = SC(s, groupb, dokillb, collided)
if c:
crashed[s] = c
s.kill()
else:
for s in groupa:
c = SC(s, groupb, dokillb, collided)
if c:
crashed[s] = c
return crashed
def spritecollideany(sprite, group, collided=None):
"""finds any sprites in a group that collide with the given sprite
pygame.sprite.spritecollideany(sprite, group): return sprite
Given a sprite and a group of sprites, this will return return any single
sprite that collides with with the given sprite. If there are no
collisions, then this returns None.
If you don't need all the features of the spritecollide function, this
function will be a bit quicker.
Collided is a callback function used to calculate if two sprites are
colliding. It should take two sprites as values and return a bool value
indicating if they are colliding. If collided is not passed, then all
sprites must have a "rect" value, which is a rectangle of the sprite area,
which will be used to calculate the collision.
"""
if collided:
for s in group:
if collided(sprite, s):
return s
else:
# Special case old behaviour for speed.
spritecollide = sprite.rect.colliderect
for s in group:
if spritecollide(s.rect):
return s
return None
|
gpl-3.0
|
sfermigier/flask-linktester
|
docs/conf.py
|
1
|
1823
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
sys.path.append(os.path.abspath('_themes'))
# Reused
from setup import VERSION
NAME = "Flask-LinkTester"
YEAR = "2012-2017"
AUTHOR = "Stefane Fermigier"
# -- General configuration -----------------------------------------------------
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = NAME
copyright = u"%s, %s" % (YEAR, AUTHOR)
version = VERSION
release = VERSION
exclude_patterns = ['_build']
html_theme = 'flask_small'
html_theme_path = ['_themes']
html_static_path = ['_static']
html_theme_options = {
#'index_logo': 'flask-testing.png', # TODO
'github_fork': 'sfermigier/flask-linktester'
}
htmlhelp_basename = 'flask-linktesterdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
latex_elements = {
'papersize': 'a4',
}
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', '%s.tex' % NAME.lower(), u'%s Documentation' % NAME,
AUTHOR, 'manual'),
]
# -- Options for manual page output --------------------------------------------
man_pages = [
('index', str(NAME.lower()), u'%s Documentation' % NAME, [AUTHOR], 1)
]
|
bsd-3-clause
|
nstockton/barcode-finder
|
setup.py
|
1
|
4474
|
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import glob
import os
import shutil
import sys
import zlib
from distutils.core import setup
import py2exe
from constants import APP_NAME, APP_VERSION, APP_AUTHOR
# ModuleFinder can't handle runtime changes to __path__, but win32com uses them
try:
# py2exe 0.6.4 introduced a replacement modulefinder.
# This means we have to add package paths there, not to the built-in one.
# If this new modulefinder gets integrated into Python, then we might be able to revert this some day.
# if this doesn't work, try import modulefinder
try:
import py2exe.mf as modulefinder
except ImportError:
import modulefinder
import win32com, sys
for p in win32com.__path__[1:]:
modulefinder.AddPackagePath("win32com", p)
for extra in ["win32com.shell"]:
__import__(extra)
m = sys.modules[extra]
for p in m.__path__[1:]:
modulefinder.AddPackagePath(extra, p)
except ImportError:
pass
# Remove the build folder if it exists.
shutil.rmtree("build", ignore_errors=True)
# do the same for dist folder if it exists.
shutil.rmtree("dist", ignore_errors=True)
# If run without args, build executables, in quiet mode.
if len(sys.argv) == 1:
sys.argv.append("py2exe")
sys.argv.append("-q")
class Target(object):
def __init__(self, **kw):
self.__dict__.update(kw)
# for the versioninfo resources
self.version = APP_VERSION
self.company_name = ""
self.copyright = APP_AUTHOR
self.name = APP_NAME
# The manifest will be inserted as a resource into the executable. This gives the controls the Windows XP appearance (if run on XP ;-)
manifest_template = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s Program</description>
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel
level="asInvoker"
uiAccess="false">
</requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.VC90.CRT"
version="9.0.21022.8"
processorArchitecture="x86"
publicKeyToken="1fc8b3b9a1e18e3b">
</assemblyIdentity>
</dependentAssembly>
</dependency>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
"""
RT_MANIFEST = 24
program = Target(
# used for the versioninfo resource
description = "%s V%s" % (APP_NAME, APP_VERSION),
# what to build
script = "%s.pyw" % APP_NAME,
other_resources = [(RT_MANIFEST, 1, manifest_template % dict(prog=APP_NAME))],
icon_resources = [(1, "%s.ico" % APP_NAME)],
dest_base = APP_NAME
)
excludes = [
"_ssl",
"_gtkagg",
"_tkagg",
"bsddb",
"curses",
"email",
"pywin.debugger",
"pywin.debugger.dbgcon",
"pywin.dialogs",
"tcl",
"Tkconstants",
"Tkinter",
"pdbunittest",
"difflib",
"pyreadline",
"optparse",
"pickle",
"calendar",
]
packages = [
"xml.etree",
"json",
"encodings.utf_8",
"encodings.ascii",
"encodings.latin_1",
"encodings.hex_codec"
]
dll_excludes = [
"libgdk-win32-2.0-0.dll",
"libgobject-2.0-0.dll",
"tcl84.dll",
"tk84.dll",
"MSVCP90.dll",
"mswsock.dll",
"powrprof.dll",
"python23.dll",
"_sre.pyd",
"_winreg.pyd",
"unicodedata.pyd",
"zlib.pyd",
"wxc.pyd",
"wxmsw24uh.dll",
"w9xpopen.exe",
]
setup(
options = {
"py2exe": {
"bundle_files": True,
"ascii": True,
"compressed": True,
"optimize": 2,
"excludes": excludes,
"packages": packages,
"dll_excludes": dll_excludes,
}
},
zipfile = None,
windows = [program],
data_files = [
("sounds", glob.glob("sounds\\*")),
("speech_libs", glob.glob("speech_libs\\*")),
],
)
# Remove the build folder since we no longer need it.
shutil.rmtree("build", ignore_errors=True)
|
mpl-2.0
|
mromanoff/schedule-appointment
|
client/vendor/bower_components/jasmine/lib/jasmine-core/core.py
|
163
|
1481
|
import pkg_resources
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
class Core(object):
@classmethod
def js_package(cls):
return __package__
@classmethod
def css_package(cls):
return __package__
@classmethod
def image_package(cls):
return __package__ + ".images"
@classmethod
def js_files(cls):
js_files = sorted(list(filter(lambda x: '.js' in x, pkg_resources.resource_listdir(cls.js_package(), '.'))))
# jasmine.js needs to be first
js_files.insert(0, 'jasmine.js')
# boot needs to be last
js_files.remove('boot.js')
js_files.append('boot.js')
return cls._uniq(js_files)
@classmethod
def css_files(cls):
return cls._uniq(sorted(filter(lambda x: '.css' in x, pkg_resources.resource_listdir(cls.css_package(), '.'))))
@classmethod
def favicon(cls):
return 'jasmine_favicon.png'
@classmethod
def _uniq(self, items, idfun=None):
# order preserving
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in items:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen:
continue
seen[marker] = 1
result.append(item)
return result
|
mit
|
linfuzki/autokey
|
src/lib/gtkui/settingsdialog.py
|
46
|
9114
|
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Chris Dekter
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, sys
from gi.repository import Gtk
from autokey.configmanager import *
from autokey import iomediator, model, common
from dialogs import GlobalHotkeyDialog
import configwindow
DESKTOP_FILE = "/usr/share/applications/autokey-gtk.desktop"
AUTOSTART_DIR = os.path.expanduser("~/.config/autostart")
AUTOSTART_FILE = os.path.join(AUTOSTART_DIR, "autokey-gtk.desktop")
ICON_NAME_MAP = {
_("Light") : common.ICON_FILE_NOTIFICATION,
_("Dark") : common.ICON_FILE_NOTIFICATION_DARK
}
ICON_NAME_LIST = []
class SettingsDialog:
KEY_MAP = GlobalHotkeyDialog.KEY_MAP
REVERSE_KEY_MAP = GlobalHotkeyDialog.REVERSE_KEY_MAP
def __init__(self, parent, configManager):
builder = configwindow.get_ui("settingsdialog.xml")
self.ui = builder.get_object("settingsdialog")
builder.connect_signals(self)
self.ui.set_transient_for(parent)
self.configManager = configManager
# General Settings
self.autoStartCheckbox = builder.get_object("autoStartCheckbox")
self.promptToSaveCheckbox = builder.get_object("promptToSaveCheckbox")
self.showTrayCheckbox = builder.get_object("showTrayCheckbox")
self.allowKbNavCheckbox = builder.get_object("allowKbNavCheckbox")
self.allowKbNavCheckbox.hide()
self.sortByUsageCheckbox = builder.get_object("sortByUsageCheckbox")
self.enableUndoCheckbox = builder.get_object("enableUndoCheckbox")
self.iconStyleCombo = Gtk.ComboBoxText.new()
hbox = builder.get_object("hbox4")
hbox.pack_start(self.iconStyleCombo, False, True, 0)
hbox.show_all()
for key, value in ICON_NAME_MAP.items():
self.iconStyleCombo.append_text(key)
ICON_NAME_LIST.append(value)
self.iconStyleCombo.set_sensitive(ConfigManager.SETTINGS[SHOW_TRAY_ICON])
self.iconStyleCombo.set_active(ICON_NAME_LIST.index(ConfigManager.SETTINGS[NOTIFICATION_ICON]))
self.autoStartCheckbox.set_active(os.path.exists(AUTOSTART_FILE))
self.promptToSaveCheckbox.set_active(ConfigManager.SETTINGS[PROMPT_TO_SAVE])
self.showTrayCheckbox.set_active(ConfigManager.SETTINGS[SHOW_TRAY_ICON])
#self.allowKbNavCheckbox.set_active(ConfigManager.SETTINGS[MENU_TAKES_FOCUS])
self.sortByUsageCheckbox.set_active(ConfigManager.SETTINGS[SORT_BY_USAGE_COUNT])
self.enableUndoCheckbox.set_active(ConfigManager.SETTINGS[UNDO_USING_BACKSPACE])
# Hotkeys
self.showConfigDlg = GlobalHotkeyDialog(parent, configManager, self.on_config_response)
self.toggleMonitorDlg = GlobalHotkeyDialog(parent, configManager, self.on_monitor_response)
self.configKeyLabel = builder.get_object("configKeyLabel")
self.clearConfigButton = builder.get_object("clearConfigButton")
self.monitorKeyLabel = builder.get_object("monitorKeyLabel")
self.clearMonitorButton = builder.get_object("clearMonitorButton")
self.useConfigHotkey = self.__loadHotkey(configManager.configHotkey, self.configKeyLabel,
self.showConfigDlg, self.clearConfigButton)
self.useServiceHotkey = self.__loadHotkey(configManager.toggleServiceHotkey, self.monitorKeyLabel,
self.toggleMonitorDlg, self.clearMonitorButton)
# Script Engine Settings
self.userModuleChooserButton = builder.get_object("userModuleChooserButton")
if configManager.userCodeDir is not None:
self.userModuleChooserButton.set_current_folder(configManager.userCodeDir)
if configManager.userCodeDir in sys.path:
sys.path.remove(configManager.userCodeDir)
def on_save(self, widget, data=None):
if self.autoStartCheckbox.get_active():
if not os.path.exists(AUTOSTART_FILE):
try:
inFile = open(DESKTOP_FILE, 'r')
outFile = open(AUTOSTART_FILE, 'w')
contents = inFile.read()
contents = contents.replace(" -c\n", "\n")
outFile.write(contents)
inFile.close()
outFile.close()
except:
pass
else:
if os.path.exists(AUTOSTART_FILE):
os.remove(AUTOSTART_FILE)
ConfigManager.SETTINGS[PROMPT_TO_SAVE] = self.promptToSaveCheckbox.get_active()
ConfigManager.SETTINGS[SHOW_TRAY_ICON] = self.showTrayCheckbox.get_active()
#ConfigManager.SETTINGS[MENU_TAKES_FOCUS] = self.allowKbNavCheckbox.get_active()
ConfigManager.SETTINGS[SORT_BY_USAGE_COUNT] = self.sortByUsageCheckbox.get_active()
ConfigManager.SETTINGS[UNDO_USING_BACKSPACE] = self.enableUndoCheckbox.get_active()
ConfigManager.SETTINGS[NOTIFICATION_ICON] = ICON_NAME_MAP[self.iconStyleCombo.get_active_text()]
self.configManager.userCodeDir = self.userModuleChooserButton.get_current_folder()
sys.path.append(self.configManager.userCodeDir)
configHotkey = self.configManager.configHotkey
toggleHotkey = self.configManager.toggleServiceHotkey
app = self.configManager.app
if configHotkey.enabled:
app.hotkey_removed(configHotkey)
configHotkey.enabled = self.useConfigHotkey
if self.useConfigHotkey:
self.showConfigDlg.save(configHotkey)
app.hotkey_created(configHotkey)
if toggleHotkey.enabled:
app.hotkey_removed(toggleHotkey)
toggleHotkey.enabled = self.useServiceHotkey
if self.useServiceHotkey:
self.toggleMonitorDlg.save(toggleHotkey)
app.hotkey_created(toggleHotkey)
app.update_notifier_visibility()
self.configManager.config_altered(True)
self.hide()
self.destroy()
def on_cancel(self, widget, data=None):
self.hide()
self.destroy()
def __getattr__(self, attr):
# Magic fudge to allow us to pretend to be the ui class we encapsulate
return getattr(self.ui, attr)
def __loadHotkey(self, item, label, dialog, clearButton):
dialog.load(item)
if item.enabled:
key = item.hotKey.encode("utf-8")
label.set_text(item.get_hotkey_string())
clearButton.set_sensitive(True)
return True
else:
label.set_text(_("(None configured)"))
clearButton.set_sensitive(False)
return False
# ---- Signal handlers
def on_showTrayCheckbox_toggled(self, widget, data=None):
self.iconStyleCombo.set_sensitive(widget.get_active())
def on_setConfigButton_pressed(self, widget, data=None):
self.showConfigDlg.run()
def on_config_response(self, res):
if res == Gtk.ResponseType.OK:
self.useConfigHotkey = True
key = self.showConfigDlg.key
modifiers = self.showConfigDlg.build_modifiers()
self.configKeyLabel.set_text(self.build_hotkey_string(key, modifiers))
self.clearConfigButton.set_sensitive(True)
def on_clearConfigButton_pressed(self, widget, data=None):
self.useConfigHotkey = False
self.clearConfigButton.set_sensitive(False)
self.configKeyLabel.set_text(_("(None configured)"))
self.showConfigDlg.reset()
def on_setMonitorButton_pressed(self, widget, data=None):
self.toggleMonitorDlg.run()
def on_monitor_response(self, res):
if res == Gtk.ResponseType.OK:
self.useServiceHotkey = True
key = self.toggleMonitorDlg.key
modifiers = self.toggleMonitorDlg.build_modifiers()
self.monitorKeyLabel.set_text(self.build_hotkey_string(key, modifiers))
self.clearMonitorButton.set_sensitive(True)
def on_clearMonitorButton_pressed(self, widget, data=None):
self.useServiceHotkey = False
self.clearMonitorButton.set_sensitive(False)
self.monitorKeyLabel.set_text(_("(None configured)"))
self.toggleMonitorDlg.reset()
|
gpl-3.0
|
alsrgv/tensorflow
|
tensorflow/contrib/slim/python/slim/data/test_utils.py
|
163
|
3795
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains test utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import image_ops
def _encoded_int64_feature(ndarray):
return feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=ndarray.flatten().tolist()))
def _encoded_bytes_feature(tf_encoded):
encoded = tf_encoded.eval()
def string_to_bytes(value):
return feature_pb2.BytesList(value=[value])
return feature_pb2.Feature(bytes_list=string_to_bytes(encoded))
def _string_feature(value):
value = value.encode('utf-8')
return feature_pb2.Feature(bytes_list=feature_pb2.BytesList(value=[value]))
def _encoder(image, image_format):
assert image_format in ['jpeg', 'png']
if image_format == 'jpeg':
tf_image = constant_op.constant(image, dtype=dtypes.uint8)
return image_ops.encode_jpeg(tf_image)
if image_format == 'png':
tf_image = constant_op.constant(image, dtype=dtypes.uint8)
return image_ops.encode_png(tf_image)
def generate_image(image_shape, image_format='jpeg', label=0):
"""Generates an image and an example containing the encoded image.
GenerateImage must be called within an active session.
Args:
image_shape: the shape of the image to generate.
image_format: the encoding format of the image.
label: the int64 labels for the image.
Returns:
image: the generated image.
example: a TF-example with a feature key 'image/encoded' set to the
serialized image and a feature key 'image/format' set to the image
encoding format ['jpeg', 'png'].
"""
image = np.random.random_integers(0, 255, size=image_shape)
tf_encoded = _encoder(image, image_format)
example = example_pb2.Example(features=feature_pb2.Features(feature={
'image/encoded': _encoded_bytes_feature(tf_encoded),
'image/format': _string_feature(image_format),
'image/class/label': _encoded_int64_feature(np.array(label)),
}))
return image, example.SerializeToString()
def create_tfrecord_files(output_dir, num_files=3, num_records_per_file=10):
"""Creates TFRecords files.
The method must be called within an active session.
Args:
output_dir: The directory where the files are stored.
num_files: The number of files to create.
num_records_per_file: The number of records per file.
Returns:
A list of the paths to the TFRecord files.
"""
tfrecord_paths = []
for i in range(num_files):
path = os.path.join(output_dir,
'flowers.tfrecord-%d-of-%s' % (i, num_files))
tfrecord_paths.append(path)
writer = tf_record.TFRecordWriter(path)
for _ in range(num_records_per_file):
_, example = generate_image(image_shape=(10, 10, 3))
writer.write(example)
writer.close()
return tfrecord_paths
|
apache-2.0
|
HubbleStack/Hubble
|
tests/unittests/test_readfile.py
|
2
|
23431
|
from __future__ import absolute_import
import json
import os
import sys
import yaml
import pytest
myPath = os.path.abspath(os.getcwd())
sys.path.insert(0, myPath)
import hubblestack.extmods.fdg.readfile
class TestReadfile():
'''
Class used to test the functions in ``readfile.py``
'''
def generate_data(self):
'''
Helping function to generate dict data to populate json/yaml files
'''
sample_data = {"id": "file",
"value": {"key1": "value1",
"key2": {"key3": "value2"}},
"menuitem": ["item1", "item2", "item3"]}
return sample_data
@pytest.fixture(scope="session")
def json_file(self, tmpdir_factory):
'''
Helping function that creates a ``.json`` sample file to test against
'''
sample_json = self.generate_data()
json_file = tmpdir_factory.mktemp("data").join("json_file.json")
json_file.write(str(json.dumps(sample_json)))
return str(json_file)
def test_json_InvalidPath_EmptyReturn(self):
'''
Test that given an invalid path, the json function returns False status
and None value
'''
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.readfile.json('/invalid/path')
assert expected_status == status
assert expected_ret == ret
def test_json_SingleSubkey_ReturnsValue(self, json_file):
'''
Test that given a single subkey argument, the function extracts the correct value
'''
expected_status, expected_ret = True, "file"
status, ret = hubblestack.extmods.fdg.readfile.json(json_file, subkey='id')
assert expected_status == status
assert expected_ret == ret
def test_json_InvalidSingleSubkey_EmptyReturn(self, json_file):
'''
Test that given an invalid single subkey argument,
the function returns False status and None value
'''
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.readfile.json(json_file, subkey='invalid_key')
assert expected_status == status
assert expected_ret == ret
def test_json_MultipleSubkeys_ReturnsValue(self, json_file):
'''
Test that given multiple subkeys, separated by a valid separator ``sep``,
the function returns the correct value
'''
expected_status, expected_ret = True, "value2"
status, ret = hubblestack.extmods.fdg.readfile.json(
json_file, subkey='value,key2,key3', sep=',')
assert expected_status == status
assert expected_ret == ret
def test_json_InvalidSep_EmptyReturn(self, json_file):
'''
Test that given multiple subkeys separated by an invalid separator``sep``,
the function returns False status and None value
'''
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.readfile.json(
json_file, subkey='value,key2,key3', sep='/')
assert expected_status == status
assert expected_ret == ret
def test_json_IndexSubkey_ReturnsValue(self, json_file):
'''
Test that given an index as subkey, the function returns the correct value
'''
expected_status, expected_ret = True, "item2"
status, ret = hubblestack.extmods.fdg.readfile.json(
json_file, subkey='menuitem,1', sep=',')
assert expected_status == status
assert expected_ret == ret
def test_json_InvalidIndexSubkey_EmptyReturn(self, json_file):
'''
Test that given an index as subkey that exceeds the list length,
the function returns False status and None value
'''
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.readfile.json(
json_file, subkey='menuitem,15', sep=',')
assert expected_status == status
assert expected_ret == ret
def test_json_EmptyFile_EmptyReturn(self, json_file):
'''
Test that given an empty json file, the function returns False status and None value
'''
with open(json_file, 'r+') as invalid_file:
invalid_file.truncate(0)
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.readfile.json(json_file, subkey='id')
assert expected_status == status
assert expected_ret == ret
def test_json_InvalidJsonFile_EmptyReturn(self, json_file):
'''
Test that given an invalid json file, the function returns False status and None value
'''
with open(json_file, 'w+') as invalid_file:
invalid_file.write("InvalidJson")
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.readfile.json(json_file, subkey='id')
assert expected_status == status
assert expected_ret == ret
@pytest.fixture(scope="session")
def yaml_file(self, tmpdir_factory):
'''
Helping function that creates a ``.yaml`` sample file to test against
'''
sample_yaml = self.generate_data()
yaml_file = tmpdir_factory.mktemp("data").join("yaml_file.yaml")
yaml_file.write(str(yaml.dump(sample_yaml)))
return str(yaml_file)
def test_yaml_InvalidPath_EmptyReturn(self):
'''
Test that given an invalid path, the yaml function returns False status
and an empty return value
'''
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.readfile.yaml('/invalid/path')
assert expected_status == status
assert expected_ret == ret
def test_yaml_SingleSubkey_ReturnsValue(self, yaml_file):
'''
Test that given a single subkey argument, the function extracts the appropriated value
'''
expected_status, expected_ret = True, "file"
status, ret = hubblestack.extmods.fdg.readfile.yaml(yaml_file, subkey='id')
assert expected_status == status
assert expected_ret == ret
def test_yaml_InvalidSingleSubkey_EmptyReturn(self, yaml_file):
'''
Test that given an invalid single subkey argument,
the function returns False status and empty value
'''
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.readfile.yaml(yaml_file, subkey='invalid_key')
assert expected_status == status
assert expected_ret == ret
def test_yaml_MultipleSubkeys_ReturnsValue(self, yaml_file):
'''
Test that given multiple subkeys, separated by a valid separator,
the function returns the appropriate value
'''
expected_status, expected_ret = True, "value2"
status, ret = hubblestack.extmods.fdg.readfile.yaml(
yaml_file, subkey='value,key2,key3', sep=',')
assert expected_status == status
assert expected_ret == ret
def test_yaml_InvalidSep_EmptyReturn(self, yaml_file):
'''
Test that given multiple subkeys separated by an invalid ``sep``,
the function returns a False status and None value
'''
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.readfile.yaml(
yaml_file, subkey='value,key2,key3', sep='/')
assert expected_status == status
assert expected_ret == ret
def test_yaml_IndexSubkey_ReturnsValue(self, yaml_file):
'''
Test that given an index as subkey, the function returns the appropriate value
'''
expected_status, expected_ret = True, "item2"
status, ret = hubblestack.extmods.fdg.readfile.yaml(
yaml_file, subkey='menuitem,1', sep=',')
assert expected_status == status
assert expected_ret == ret
def test_yaml_InvalidIndexSubkey_EmptyReturn(self, yaml_file):
'''
Test that given an index as subkey that exceeds the list length,
the function returns False status and None value
'''
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.readfile.yaml(
yaml_file, subkey='menuitem,15', sep=',')
assert expected_status == status
assert expected_ret == ret
def test_yaml_EmptyFile_EmptyReturn(self, yaml_file):
'''
Test that given an empty yaml file, the function returns False status and None value
'''
with open(yaml_file, 'r+') as invalid_file:
invalid_file.truncate(0)
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.readfile.yaml(yaml_file, subkey='id')
assert expected_status == status
assert expected_ret == ret
def _test_yaml_InvalidJsonFile_EmptyReturn(self, yaml_file):
'''
Test that given an invalid yaml file, the function returns False status and None value
'''
with open(yaml_file, 'w+') as invalid_file:
invalid_file.write("invalidyaml")
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.readfile.yaml(yaml_file, subkey='id')
assert expected_status == status
assert expected_ret == ret
def test_checkPattern_EmptyPatternEmptyIgnore_ReturnTrue(self):
'''
Test that given an empty ``pattern`` and empty ``ignore_pattern``, the function returns True
'''
expected_ret = True
ret = hubblestack.extmods.fdg.readfile._check_pattern('Sample text', None, None)
assert expected_ret == ret
def test_checkPattern_EmptyPatternValidIgnore_ReturnFalse(self):
'''
Test that given an empty ``pattern`` and a valid ``ignore_pattern``,
the function returns False
'''
expected_ret = False
ret = hubblestack.extmods.fdg.readfile._check_pattern('invalid text', None, 'invalid.*')
assert expected_ret == ret
def test_checkPattern_EmptyPatternInvalidIgnore_ReturnTrue(self):
'''
Test that given an empty ``pattern`` and an invalid ``ignore_pattern``,
the function returns True
'''
expected_ret = True
ret = hubblestack.extmods.fdg.readfile._check_pattern('Sample text', None, 'invalid')
assert expected_ret == ret
def test_checkPattern_ValidPatternValidIgnore_ReturnFalse(self):
'''
Test that given a valid``pattern`` and a valid ``ignore_pattern``,
the function returns False
'''
expected_ret = False
line = 'valid and invalid text'
ret = hubblestack.extmods.fdg.readfile._check_pattern(line, 'valid.*', '.*invalid.*')
assert expected_ret == ret
def test_checkPattern_ValidPatternInvalidIgnore_ReturnTrue(self):
'''
Test that given a valid``pattern`` and an invalid ``ignore_pattern``,
the function returns True
'''
expected_ret = True
line = 'valid text'
ret = hubblestack.extmods.fdg.readfile._check_pattern(line, 'valid', 'invalid')
assert expected_ret == ret
def test_checkPattern_ValidPatternEmptyIgnore_ReturnTrue(self):
'''
Test that given a valid``pattern`` and an empty ``ignore_pattern``,
the function returns True
'''
expected_ret = True
line = 'valid text'
ret = hubblestack.extmods.fdg.readfile._check_pattern(line, 'valid', None)
assert expected_ret == ret
def test_checkPattern_InvalidPatternInvalidIgnore_ReturnFalse(self):
'''
Test that given an invalid``pattern`` and an invalid ``ignore_pattern``,
the function returns False
'''
expected_ret = False
line = 'Line with invalid text'
ret = hubblestack.extmods.fdg.readfile._check_pattern(line, 'bad pattern', 'bad ignore')
assert expected_ret == ret
def test_checkPattern_InvalidPatternValidIgnore_ReturnFalse(self):
'''
Test that given an invalid``pattern`` and a valid ``ignore_pattern``,
the function returns False
'''
expected_ret = False
line = 'Line with invalid text'
ret = hubblestack.extmods.fdg.readfile._check_pattern(line, 'bad pattern', '.*invalid.*')
assert expected_ret == ret
def test_checkPattern_InvalidPatternEmptyIgnore_ReturnFalse(self):
'''
Test that given an invalid``pattern`` and an empty ``ignore_pattern``,
the function returns False
'''
expected_ret = False
line = 'Line with invalid text'
ret = hubblestack.extmods.fdg.readfile._check_pattern(line, 'bad pattern', None)
assert expected_ret == ret
def test_processLine_ValidArguments_ReturnDict(self):
'''
Test that given valid arguments, the function returns a valid dictionary
'''
expected_key, expected_val = 'APP_ATTRIBUTES', {'cluster_role': 'controol',
'provider': 'aws',
'zone': '3'}
line = "APP_ATTRIBUTES=cluster_role:controol;zone:3;provider:aws"
key, val = hubblestack.extmods.fdg.readfile._process_line(
line, dictsep='=', valsep=';', subsep=':')
assert expected_key == key
assert expected_val == val
def test_processLine_ValidArgumentsDuplicateKeys_ReturnDict(self):
'''
Test that given valid arguments, if the input data contains duplicate keys,
they will be removed from the return dict
'''
expected_key, expected_val = 'APP_ATTRIBUTES', {'cluster_role': 'controol',
'provider': 'aws',
'zone': '3'}
line = "APP_ATTRIBUTES=cluster_role:controol;zone:6;provider:aws;zone:3"
key, val = hubblestack.extmods.fdg.readfile._process_line(
line, dictsep='=', valsep=';', subsep=':')
assert expected_key == key
assert expected_val == val
def test_processLine_EmptyArguemnts_ReturnLine(self):
'''
Test that given empty arguments, the line is returned
'''
line = "line of text"
ret, none = hubblestack.extmods.fdg.readfile._process_line(line, None, None, None)
assert ret == line
assert none is None
def test_processLine_ValidDictsepValsepEmptySubsep_ReturnList(self):
'''
Test that given a valid ``dictsep``, a valid ``valsep`` and an empty ``subsep``,
a list is returned
'''
expected_key, expected_val = 'key0', ['key1', 'key2', 'val']
line = "key0:key1;key2;val"
key, val = hubblestack.extmods.fdg.readfile._process_line(line, ':', ';', None)
assert expected_key == key
assert expected_val == val
def test_processLine_ValidDictsepInvalidValsep_ReturnList(self):
'''
Test that given a valid ``dictsep`` and an invalid ``valsep``, a list is returned
'''
expected_key, expected_val = 'key0', ['key1;key2;val']
line = "key0:key1;key2;val"
key, val = hubblestack.extmods.fdg.readfile._process_line(line, ':', '-', None)
assert expected_key == key
assert expected_val == val
def test_processLine_ValidDictsepValsepInvalidSubsep_ReturnDict(self):
'''
Test that given a valid ``dictsep``, a valid ``valsep`` and an invalid ``subsep``,
a dict is returned
'''
expected_key, expected_val = 'APP_ATTRIBUTES', {'cluster_role:controol': None,
'provider:aws': None,
'zone:3': None}
line = "APP_ATTRIBUTES=cluster_role:controol;zone:3;provider:aws"
key, val = hubblestack.extmods.fdg.readfile._process_line(line, '=', ';', '-')
assert expected_key == key
assert expected_val == val
def test_processLine_ValidDictsepSubsepInvalidValsep_ReturnDict(self):
'''
Test that given a valid ``dictsep``, a valid ``subsep`` and an invalid ``valsep``,
a dict is returned
'''
expected_key, expected_val = 'key0', {'key1;val': 'val2'}
line = "key0:key1;val-val2"
key, val = hubblestack.extmods.fdg.readfile._process_line(line, ':', '.', '-')
assert expected_key == key
assert expected_val == val
def test_processLine_InvalidDictsep_ReturnLine(self):
'''
Test that given a valid ``dictsep``, a valid ``subsep`` and an invalid ``valsep``,
a dict is returned
'''
line = "key0:key1;val-val2"
ret, none = hubblestack.extmods.fdg.readfile._process_line(line, '?', '.', '-')
assert ret == line
assert none is None
def generate_config_data(self):
'''
Sample data to use for testing the ``config`` function
'''
sample_data = ["APP_ATTRIBUTES=cluster_role:control;zone:3;provider:aws",
"APP_ATTRIBUTES=cluster_role:worker;zone:1;provider:aws",
"APP_ATTRIBUTES=cluster_role:master;zone:0;provider:aws"]
return sample_data
@pytest.fixture(scope="session")
def config_file(self, tmpdir_factory):
'''
Helping function that creates a config file to test the ``config`` function against
'''
sample_data = "\n".join(self.generate_config_data())
config_file = tmpdir_factory.mktemp("data").join("config_file")
config_file.write(sample_data)
return str(config_file)
def test_config_EmptyArguments_ReturnList(self, config_file):
'''
Test that given empty arguemtsn, the function returns a list with lines as elements
'''
expected_status, expected_ret = True, self.generate_config_data()
status, ret = hubblestack.extmods.fdg.readfile.config(config_file)
assert expected_status == status
assert expected_ret == ret
def test_config_InvalidPath_ReturnNone(self):
'''
Test that given an invalid ``path``, the function returns ``None``
'''
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.readfile.config('/invalid/path')
assert expected_status == status
assert expected_ret == ret
def test_config_OnlyDictsep_ReturnDict(self, config_file):
'''
Test that given a valid ``dictsep`` and empty arguments,
the function returns a valid ``dict``
'''
sample_data = self.generate_config_data()
expected_status, expected_ret = True, {"APP_ATTRIBUTES": [x.split("=")[1]
for x in sample_data]}
status, ret = hubblestack.extmods.fdg.readfile.config(config_file, dictsep="=")
assert expected_status == status
assert expected_ret == ret
def test_config_SamePatternIgnore_ReturnEmptyDict(self, config_file):
'''
Test that given the same ``pattern`` and ``ignore_pattern``
'''
expected_status, expected_ret = True, {}
status, ret = hubblestack.extmods.fdg.readfile.config(
config_file, pattern="APP_ATTRIBUTES", ignore_pattern="APP_ATTRIBUTES", dictsep="=")
assert expected_status == status
assert expected_ret == ret
def test_config_InvalidDictsep_ReturnDict(self, config_file):
'''
Test that given an invalid ``dictsep`` and valid arguments,
the function returns a dict with values of ``None``
'''
sample_data = self.generate_config_data()
expected_status, expected_ret = True, {x: None for x in sample_data
if "master" not in x}
status, ret = hubblestack.extmods.fdg.readfile.config(
config_file, ignore_pattern=".*master.*", dictsep="?", valsep=';', subsep=':')
assert expected_status == status
assert expected_ret == ret
def test_config_ValidArguments_ReturnDict(self, config_file):
'''
Test that given valid arguments, the function returns a valid dict
'''
expected_status, expected_ret = True, {"APP_ATTRIBUTES": {
"cluster_role": "worker", "zone": "1", "provider":"aws"}}
status, ret = hubblestack.extmods.fdg.readfile.config(
config_file, pattern=".*(3|1).*", ignore_pattern=".*3.*",
dictsep="=", valsep=';', subsep=':')
assert expected_status == status
assert expected_ret == ret
def test_config_EmptyValsep_ReturnDict(self, config_file):
'''
Test that given valid arguments and an empty ``valsep``,
the function returns an incomplete dict
'''
expected_status, expected_ret = True, {"APP_ATTRIBUTES": {
"cluster_role": "control;zone:3;provider:aws"}}
status, ret = hubblestack.extmods.fdg.readfile.config(
config_file, pattern=".*control.*", dictsep="=", subsep=':')
assert expected_status == status
assert expected_ret == ret
def test_config_EmptySubsep_ReturnDict(self, config_file):
'''
Test that given valid arguments and an empty ``subsep``,
the function returns a dict with a list as value
'''
expected_status, expected_ret = True, {"APP_ATTRIBUTES": ["cluster_role:control",
"zone:3",
"provider:aws"]}
status, ret = hubblestack.extmods.fdg.readfile.config(
config_file, ignore_pattern=".*(worker|master).*", dictsep="=", valsep=';')
assert expected_status == status
assert expected_ret == ret
def test_readfileString_InvalidPath_emptyReturn(self):
'''
Test that given invalid arguments, the function returns False and None.
'''
expected_status, expected_ret = False, None
status, ret= hubblestack.extmods.fdg.readfile.readfile_string('/invalid/path')
assert status == expected_status
assert ret == expected_ret
def test_readfileString_ValidPathFalseEncode_returnString(self, json_file):
'''
Test that given a valid path, the contents are returned as string with no encoding
'''
with open(json_file, 'w') as jfile:
jfile.writelines(["First line", "Second line", "Foo bar line"])
status, ret = hubblestack.extmods.fdg.readfile.readfile_string(json_file)
assert status == True
assert ret == "First lineSecond lineFoo bar line"
def test_readfileString_ValidPathTrueEncode_returnEncodedString(self, json_file):
'''
Test that given a valid path, the contents are returned as string
'''
with open(json_file, 'w') as jfile:
jfile.writelines(["Foo", "bar"])
status, ret = hubblestack.extmods.fdg.readfile.readfile_string(json_file, encode_b64=True)
assert status == True
# encoded Foobar
assert ret == 'Rm9vYmFy'
|
apache-2.0
|
toolforger/sympy
|
sympy/printing/python.py
|
118
|
3256
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import keyword as kw
import sympy
from .repr import ReprPrinter
from .str import StrPrinter
# A list of classes that should be printed using StrPrinter
STRPRINT = ("Add", "Infinity", "Integer", "Mul", "NegativeInfinity",
"Pow", "Zero")
class PythonPrinter(ReprPrinter, StrPrinter):
"""A printer which converts an expression into its Python interpretation."""
def __init__(self, settings=None):
ReprPrinter.__init__(self)
StrPrinter.__init__(self, settings)
self.symbols = []
self.functions = []
# Create print methods for classes that should use StrPrinter instead
# of ReprPrinter.
for name in STRPRINT:
f_name = "_print_%s" % name
f = getattr(StrPrinter, f_name)
setattr(PythonPrinter, f_name, f)
def _print_Function(self, expr):
func = expr.func.__name__
if not hasattr(sympy, func) and not func in self.functions:
self.functions.append(func)
return StrPrinter._print_Function(self, expr)
# procedure (!) for defining symbols which have be defined in print_python()
def _print_Symbol(self, expr):
symbol = self._str(expr)
if symbol not in self.symbols:
self.symbols.append(symbol)
return StrPrinter._print_Symbol(self, expr)
def _print_module(self, expr):
raise ValueError('Modules in the expression are unacceptable')
def python(expr, **settings):
"""Return Python interpretation of passed expression
(can be passed to the exec() function without any modifications)"""
printer = PythonPrinter(settings)
exprp = printer.doprint(expr)
result = ''
# Returning found symbols and functions
renamings = {}
for symbolname in printer.symbols:
newsymbolname = symbolname
# Escape symbol names that are reserved python keywords
if kw.iskeyword(newsymbolname):
while True:
newsymbolname += "_"
if (newsymbolname not in printer.symbols and
newsymbolname not in printer.functions):
renamings[sympy.Symbol(
symbolname)] = sympy.Symbol(newsymbolname)
break
result += newsymbolname + ' = Symbol(\'' + symbolname + '\')\n'
for functionname in printer.functions:
newfunctionname = functionname
# Escape function names that are reserved python keywords
if kw.iskeyword(newfunctionname):
while True:
newfunctionname += "_"
if (newfunctionname not in printer.symbols and
newfunctionname not in printer.functions):
renamings[sympy.Function(
functionname)] = sympy.Function(newfunctionname)
break
result += newfunctionname + ' = Function(\'' + functionname + '\')\n'
if not len(renamings) == 0:
exprp = expr.subs(renamings)
result += 'e = ' + printer._str(exprp)
return result
def print_python(expr, **settings):
"""Print output of python() function"""
print(python(expr, **settings))
|
bsd-3-clause
|
MountainWei/nova
|
nova/tests/unit/compute/test_resources.py
|
57
|
11446
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the compute extra resources framework."""
from oslo_config import cfg
from stevedore import extension
from stevedore import named
from nova.compute import resources
from nova.compute.resources import base
from nova.compute.resources import vcpu
from nova import context
from nova.objects import flavor as flavor_obj
from nova import test
from nova.tests.unit import fake_instance
CONF = cfg.CONF
class FakeResourceHandler(resources.ResourceHandler):
def __init__(self, extensions):
self._mgr = \
named.NamedExtensionManager.make_test_instance(extensions)
class FakeResource(base.Resource):
def __init__(self):
self.total_res = 0
self.used_res = 0
def _get_requested(self, usage):
if 'extra_specs' not in usage:
return
if self.resource_name not in usage['extra_specs']:
return
req = usage['extra_specs'][self.resource_name]
return int(req)
def _get_limit(self, limits):
if self.resource_name not in limits:
return
limit = limits[self.resource_name]
return int(limit)
def reset(self, resources, driver):
self.total_res = 0
self.used_res = 0
def test(self, usage, limits):
requested = self._get_requested(usage)
if not requested:
return
limit = self._get_limit(limits)
if not limit:
return
free = limit - self.used_res
if requested <= free:
return
else:
return ('Free %(free)d < requested %(requested)d ' %
{'free': free, 'requested': requested})
def add_instance(self, usage):
requested = self._get_requested(usage)
if requested:
self.used_res += requested
def remove_instance(self, usage):
requested = self._get_requested(usage)
if requested:
self.used_res -= requested
def write(self, resources):
pass
def report_free(self):
return "Free %s" % (self.total_res - self.used_res)
class ResourceA(FakeResource):
def reset(self, resources, driver):
# ResourceA uses a configuration option
self.total_res = int(CONF.resA)
self.used_res = 0
self.resource_name = 'resource:resA'
def write(self, resources):
resources['resA'] = self.total_res
resources['used_resA'] = self.used_res
class ResourceB(FakeResource):
def reset(self, resources, driver):
# ResourceB uses resource details passed in parameter resources
self.total_res = resources['resB']
self.used_res = 0
self.resource_name = 'resource:resB'
def write(self, resources):
resources['resB'] = self.total_res
resources['used_resB'] = self.used_res
def fake_flavor_obj(**updates):
flavor = flavor_obj.Flavor()
flavor.id = 1
flavor.name = 'fakeflavor'
flavor.memory_mb = 8000
flavor.vcpus = 3
flavor.root_gb = 11
flavor.ephemeral_gb = 4
flavor.swap = 0
flavor.rxtx_factor = 1.0
flavor.vcpu_weight = 1
if updates:
flavor.update(updates)
return flavor
class BaseTestCase(test.NoDBTestCase):
def _initialize_used_res_counter(self):
# Initialize the value for the used resource
for ext in self.r_handler._mgr.extensions:
ext.obj.used_res = 0
def setUp(self):
super(BaseTestCase, self).setUp()
# initialize flavors and stub get_by_id to
# get flavors from here
self._flavors = {}
self.ctxt = context.get_admin_context()
# Create a flavor without extra_specs defined
_flavor_id = 1
_flavor = fake_flavor_obj(id=_flavor_id)
self._flavors[_flavor_id] = _flavor
# Create a flavor with extra_specs defined
_flavor_id = 2
requested_resA = 5
requested_resB = 7
requested_resC = 7
_extra_specs = {'resource:resA': requested_resA,
'resource:resB': requested_resB,
'resource:resC': requested_resC}
_flavor = fake_flavor_obj(id=_flavor_id,
extra_specs=_extra_specs)
self._flavors[_flavor_id] = _flavor
# create fake resource extensions and resource handler
_extensions = [
extension.Extension('resA', None, ResourceA, ResourceA()),
extension.Extension('resB', None, ResourceB, ResourceB()),
]
self.r_handler = FakeResourceHandler(_extensions)
# Resources details can be passed to each plugin or can be specified as
# configuration options
driver_resources = {'resB': 5}
CONF.resA = '10'
# initialise the resources
self.r_handler.reset_resources(driver_resources, None)
def test_update_from_instance_with_extra_specs(self):
# Flavor with extra_specs
_flavor_id = 2
sign = 1
self.r_handler.update_from_instance(self._flavors[_flavor_id], sign)
expected_resA = self._flavors[_flavor_id].extra_specs['resource:resA']
expected_resB = self._flavors[_flavor_id].extra_specs['resource:resB']
self.assertEqual(int(expected_resA),
self.r_handler._mgr['resA'].obj.used_res)
self.assertEqual(int(expected_resB),
self.r_handler._mgr['resB'].obj.used_res)
def test_update_from_instance_without_extra_specs(self):
# Flavor id without extra spec
_flavor_id = 1
self._initialize_used_res_counter()
self.r_handler.resource_list = []
sign = 1
self.r_handler.update_from_instance(self._flavors[_flavor_id], sign)
self.assertEqual(0, self.r_handler._mgr['resA'].obj.used_res)
self.assertEqual(0, self.r_handler._mgr['resB'].obj.used_res)
def test_write_resources(self):
self._initialize_used_res_counter()
extra_resources = {}
expected = {'resA': 10, 'used_resA': 0, 'resB': 5, 'used_resB': 0}
self.r_handler.write_resources(extra_resources)
self.assertEqual(expected, extra_resources)
def test_test_resources_without_extra_specs(self):
limits = {}
# Flavor id without extra_specs
flavor = self._flavors[1]
result = self.r_handler.test_resources(flavor, limits)
self.assertEqual([None, None], result)
def test_test_resources_with_limits_for_different_resource(self):
limits = {'resource:resC': 20}
# Flavor id with extra_specs
flavor = self._flavors[2]
result = self.r_handler.test_resources(flavor, limits)
self.assertEqual([None, None], result)
def test_passing_test_resources(self):
limits = {'resource:resA': 10, 'resource:resB': 20}
# Flavor id with extra_specs
flavor = self._flavors[2]
self._initialize_used_res_counter()
result = self.r_handler.test_resources(flavor, limits)
self.assertEqual([None, None], result)
def test_failing_test_resources_for_single_resource(self):
limits = {'resource:resA': 4, 'resource:resB': 20}
# Flavor id with extra_specs
flavor = self._flavors[2]
self._initialize_used_res_counter()
result = self.r_handler.test_resources(flavor, limits)
expected = ['Free 4 < requested 5 ', None]
self.assertEqual(sorted(expected),
sorted(result))
def test_empty_resource_handler(self):
"""An empty resource handler has no resource extensions,
should have no effect, and should raise no exceptions.
"""
empty_r_handler = FakeResourceHandler([])
resources = {}
empty_r_handler.reset_resources(resources, None)
flavor = self._flavors[1]
sign = 1
empty_r_handler.update_from_instance(flavor, sign)
limits = {}
test_result = empty_r_handler.test_resources(flavor, limits)
self.assertEqual([], test_result)
sign = -1
empty_r_handler.update_from_instance(flavor, sign)
extra_resources = {}
expected_extra_resources = extra_resources
empty_r_handler.write_resources(extra_resources)
self.assertEqual(expected_extra_resources, extra_resources)
empty_r_handler.report_free_resources()
def test_vcpu_resource_load(self):
# load the vcpu example
names = ['vcpu']
real_r_handler = resources.ResourceHandler(names)
ext_names = real_r_handler._mgr.names()
self.assertEqual(names, ext_names)
# check the extension loaded is the one we expect
# and an instance of the object has been created
ext = real_r_handler._mgr['vcpu']
self.assertIsInstance(ext.obj, vcpu.VCPU)
class TestVCPU(test.NoDBTestCase):
def setUp(self):
super(TestVCPU, self).setUp()
self._vcpu = vcpu.VCPU()
self._vcpu._total = 10
self._vcpu._used = 0
self._flavor = fake_flavor_obj(vcpus=5)
self._big_flavor = fake_flavor_obj(vcpus=20)
self._instance = fake_instance.fake_instance_obj(None)
def test_reset(self):
# set vcpu values to something different to test reset
self._vcpu._total = 10
self._vcpu._used = 5
driver_resources = {'vcpus': 20}
self._vcpu.reset(driver_resources, None)
self.assertEqual(20, self._vcpu._total)
self.assertEqual(0, self._vcpu._used)
def test_add_and_remove_instance(self):
self._vcpu.add_instance(self._flavor)
self.assertEqual(10, self._vcpu._total)
self.assertEqual(5, self._vcpu._used)
self._vcpu.remove_instance(self._flavor)
self.assertEqual(10, self._vcpu._total)
self.assertEqual(0, self._vcpu._used)
def test_test_pass_limited(self):
result = self._vcpu.test(self._flavor, {'vcpu': 10})
self.assertIsNone(result, 'vcpu test failed when it should pass')
def test_test_pass_unlimited(self):
result = self._vcpu.test(self._big_flavor, {})
self.assertIsNone(result, 'vcpu test failed when it should pass')
def test_test_fail(self):
result = self._vcpu.test(self._flavor, {'vcpu': 2})
expected = 'Free CPUs 2.00 VCPUs < requested 5 VCPUs'
self.assertEqual(expected, result)
def test_write(self):
resources = {'stats': {}}
self._vcpu.write(resources)
expected = {
'vcpus': 10,
'vcpus_used': 0,
'stats': {
'num_vcpus': 10,
'num_vcpus_used': 0
}
}
self.assertEqual(sorted(expected),
sorted(resources))
|
apache-2.0
|
kuriositeetti/wamp-tikki
|
venv/lib/python2.7/site-packages/pip/utils/build.py
|
899
|
1312
|
from __future__ import absolute_import
import os.path
import tempfile
from pip.utils import rmtree
class BuildDirectory(object):
def __init__(self, name=None, delete=None):
# If we were not given an explicit directory, and we were not given an
# explicit delete option, then we'll default to deleting.
if name is None and delete is None:
delete = True
if name is None:
# We realpath here because some systems have their default tmpdir
# symlinked to another directory. This tends to confuse build
# scripts, so we canonicalize the path by traversing potential
# symlinks here.
name = os.path.realpath(tempfile.mkdtemp(prefix="pip-build-"))
# If we were not given an explicit directory, and we were not given
# an explicit delete option, then we'll default to deleting.
if delete is None:
delete = True
self.name = name
self.delete = delete
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self):
if self.delete:
rmtree(self.name)
|
mit
|
rjw57/videosequence
|
test/test_simple_seeking.py
|
1
|
2242
|
from __future__ import print_function
from contextlib import closing
from PIL import ImageChops, ImageStat
from videosequence import VideoSequence
def assert_images_not_equal(im1, im2):
diff = ImageChops.difference(im1, im2)
for min_, max_ in ImageStat.Stat(diff).extrema:
if max_ > 0:
return
assert False
def assert_images_equal(im1, im2):
diff = ImageChops.difference(im1, im2)
for min_, max_ in ImageStat.Stat(diff).extrema:
if max_ != 0:
assert False
def test_duration(news_video, ice_video):
with closing(VideoSequence(news_video)) as s:
assert len(s) == 288
with closing(VideoSequence(ice_video)) as s:
assert len(s) == 468
def test_size(news_video):
with closing(VideoSequence(news_video)) as s:
assert s.width == 352
assert s.height == 288
def test_initial_and_final_frame(news_video, ice_video):
with closing(VideoSequence(news_video)) as s:
start = s[0]
end = s[-1]
assert_images_not_equal(start, end)
with closing(VideoSequence(ice_video)) as s:
start = s[0]
end = s[-1]
assert_images_not_equal(start, end)
def test_first_few_frames_differ(news_video):
with closing(VideoSequence(news_video)) as s:
last_mean = 0.0
for idx in range(5):
print("Frame", idx)
mean = ImageStat.Stat(s[idx]).mean[0]
assert mean != last_mean
assert mean > 0
last_mean = mean
def test_slice_news(news_video):
with closing(VideoSequence(news_video)) as s:
frames = [s[idx] for idx in range(5, 10)]
for f1, f2 in zip(frames, s[5:10]):
assert_images_equal(f1, f2)
def test_slice_ice(ice_video):
with closing(VideoSequence(ice_video)) as s:
frames = [s[idx] for idx in range(5, 10)]
for f1, f2 in zip(frames, s[5:10]):
assert_images_equal(f1, f2)
def __xtest_iteration(news_video, ice_video):
with closing(VideoSequence(news_video)) as s:
n = 0
for _ in s:
n += 1
assert n == len(s)
with closing(VideoSequence(ice_video)) as s:
n = 0
for _ in s:
n += 1
assert n == len(s)
|
mit
|
lukeiwanski/tensorflow
|
tensorflow/contrib/signal/python/ops/util_ops.py
|
71
|
2459
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility ops shared across tf.contrib.signal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fractions
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
def gcd(a, b, name=None):
"""Returns the greatest common divisor via Euclid's algorithm.
Args:
a: The dividend. A scalar integer `Tensor`.
b: The divisor. A scalar integer `Tensor`.
name: An optional name for the operation.
Returns:
A scalar `Tensor` representing the greatest common divisor between `a` and
`b`.
Raises:
ValueError: If `a` or `b` are not scalar integers.
"""
with ops.name_scope(name, 'gcd', [a, b]):
a = ops.convert_to_tensor(a)
b = ops.convert_to_tensor(b)
a.shape.assert_has_rank(0)
b.shape.assert_has_rank(0)
if not a.dtype.is_integer:
raise ValueError('a must be an integer type. Got: %s' % a.dtype)
if not b.dtype.is_integer:
raise ValueError('b must be an integer type. Got: %s' % b.dtype)
# TPU requires static shape inference. GCD is used for subframe size
# computation, so we should prefer static computation where possible.
const_a = tensor_util.constant_value(a)
const_b = tensor_util.constant_value(b)
if const_a is not None and const_b is not None:
return ops.convert_to_tensor(fractions.gcd(const_a, const_b))
cond = lambda _, b: math_ops.greater(b, array_ops.zeros_like(b))
body = lambda a, b: [b, math_ops.mod(a, b)]
a, b = control_flow_ops.while_loop(cond, body, [a, b], back_prop=False)
return a
|
apache-2.0
|
sasukeh/neutron
|
neutron/tests/unit/plugins/oneconvergence/test_security_group.py
|
28
|
4681
|
# Copyright 2014 OneConvergence, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from neutron.extensions import securitygroup as ext_sg
from neutron import manager
from neutron.plugins.oneconvergence import plugin as nvsd_plugin
from neutron.tests import tools
from neutron.tests.unit.agent import test_securitygroups_rpc as test_sg_rpc
from neutron.tests.unit.extensions import test_securitygroup as test_sg
PLUGIN_NAME = ('neutron.plugins.oneconvergence.'
'plugin.OneConvergencePluginV2')
AGENTNOTIFIER = ('neutron.plugins.oneconvergence.'
'plugin.NVSDPluginV2AgentNotifierApi')
DUMMY_NVSD_LIB = ('neutron.tests.unit.plugins.oneconvergence.'
'dummynvsdlib.NVSDApi')
class OneConvergenceSecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase):
_plugin_name = PLUGIN_NAME
def setUp(self):
if 'v6' in self._testMethodName:
self.skipTest("NVSD Plugin does not support IPV6.")
def mocked_oneconvergence_init(self):
def side_effect(*args, **kwargs):
return {'id': str(uuid.uuid4())}
self.nvsdlib = mock.Mock()
self.nvsdlib.create_network.side_effect = side_effect
test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_HYBRID_DRIVER)
notifier_cls = mock.patch(AGENTNOTIFIER).start()
self.notifier = mock.Mock()
notifier_cls.return_value = self.notifier
self.useFixture(tools.AttributeMapMemento())
with mock.patch.object(nvsd_plugin.OneConvergencePluginV2,
'oneconvergence_init',
new=mocked_oneconvergence_init):
super(OneConvergenceSecurityGroupsTestCase,
self).setUp(PLUGIN_NAME)
def tearDown(self):
super(OneConvergenceSecurityGroupsTestCase, self).tearDown()
class TestOneConvergenceSGServerRpcCallBack(
OneConvergenceSecurityGroupsTestCase,
test_sg_rpc.SGServerRpcCallBackTestCase):
pass
class TestOneConvergenceSecurityGroups(OneConvergenceSecurityGroupsTestCase,
test_sg.TestSecurityGroups,
test_sg_rpc.SGNotificationTestMixin):
def test_security_group_get_port_from_device(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
security_group_id = sg['security_group']['id']
res = self._create_port(self.fmt, n['network']['id'])
port = self.deserialize(self.fmt, res)
fixed_ips = port['port']['fixed_ips']
data = {'port': {'fixed_ips': fixed_ips,
'name': port['port']['name'],
ext_sg.SECURITYGROUPS:
[security_group_id]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
port_id = res['port']['id']
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin.get_port_from_device(mock.Mock(),
port_id)
self.assertEqual(port_id, port_dict['id'])
self.assertEqual([security_group_id],
port_dict[ext_sg.SECURITYGROUPS])
self.assertEqual([], port_dict['security_group_rules'])
self.assertEqual([fixed_ips[0]['ip_address']],
port_dict['fixed_ips'])
self._delete('ports', port_id)
def test_security_group_get_port_from_device_with_no_port(self):
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin.get_port_from_device(mock.Mock(), 'bad_device_id')
self.assertIsNone(port_dict)
|
apache-2.0
|
Kagee/youtube-dl
|
youtube_dl/extractor/ebaumsworld.py
|
149
|
1055
|
from __future__ import unicode_literals
from .common import InfoExtractor
class EbaumsWorldIE(InfoExtractor):
_VALID_URL = r'https?://www\.ebaumsworld\.com/video/watch/(?P<id>\d+)'
_TEST = {
'url': 'http://www.ebaumsworld.com/video/watch/83367677/',
'info_dict': {
'id': '83367677',
'ext': 'mp4',
'title': 'A Giant Python Opens The Door',
'description': 'This is how nightmares start...',
'uploader': 'jihadpizza',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
config = self._download_xml(
'http://www.ebaumsworld.com/video/player/%s' % video_id, video_id)
video_url = config.find('file').text
return {
'id': video_id,
'title': config.find('title').text,
'url': video_url,
'description': config.find('description').text,
'thumbnail': config.find('image').text,
'uploader': config.find('username').text,
}
|
unlicense
|
d2emon/gurps-helper
|
fill_dmg.py
|
1
|
2024
|
def thrustDice(value):
if value <= 10:
return 1
if value < 40:
return (value - 11) // 8 + 1
if value < 60:
return (value - 5) // 10 + 1
return (value) // 10 + 1
def thrustModifier(value):
if value <= 10:
return (value - 11) // 2 - 1
if value < 40:
return (value - 11) // 2 % 4 - 1
if value < 60:
return 1 + (value - 40) // 10 * 5 - (value - 40) // 5 * (value // 10 - 3)
if value < 70:
return (value - 60) // 5 * 2 - 1
if value < 100:
return (value - 60) // 5 % 2 * 2
return 0
def swingDice(value):
if value <= 10:
return 1
if value < 27:
return (value - 9) // 4 + 1
if value < 40:
return (value - 7) // 8 + 3
return (value) // 10 + 3
def swingModifier(value):
if value < 9:
return (value - 11) // 2
if value < 27:
return (value - 9) % 4 - 1
if value < 40:
g = (value - 9) // 2 + 1
return g % 4 - 1
if value < 60:
return (value - 40) // 5 % 2 * 2 - 1
if value < 100:
return (value - 60) // 5 % 2 * 2
return 0
def main():
import attributes
import db
e, s = db.connect()
s.query(attributes.BasicDamage).delete()
s.commit()
for st in range(1, 41):
print("{}\t{}d + {}\t{}d + {}".format(st, thrustDice(st), thrustModifier(st), swingDice(st), swingModifier(st)))
dmg = attributes.BasicDamage(
st,
[thrustDice(st), thrustModifier(st)],
[swingDice(st), swingModifier(st)],
)
print(dmg)
s.add(dmg)
for st in range(45, 101, 5):
print("{}\t{}d + {}\t{}d + {}".format(st, thrustDice(st), thrustModifier(st), swingDice(st), swingModifier(st)))
dmg = attributes.BasicDamage(
st,
[thrustDice(st), thrustModifier(st)],
[swingDice(st), swingModifier(st)],
)
print(dmg)
s.add(dmg)
s.commit()
if __name__ == "__main__":
main()
|
gpl-3.0
|
rnd0101/urbanmediator
|
urbanmediator/fckeditor.py
|
1
|
4435
|
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2007 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This is the integration file for Python.
"""
import cgi
import os
import re
import string
def escape(text, replace=string.replace):
"""Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
text = replace(text, "'", ''')
return text
# The FCKeditor class
class FCKeditor(object):
def __init__(self, instanceName):
self.InstanceName = instanceName
self.BasePath = '/fckeditor/'
self.Width = '100%'
self.Height = '200'
self.ToolbarSet = 'Default'
self.Value = '';
self.Config = {}
def Create(self):
return self.CreateHtml()
def CreateHtml(self):
HtmlValue = escape(self.Value)
Html = "<div>"
if (self.IsCompatible()):
File = "fckeditor.html"
Link = "%seditor/%s?InstanceName=%s" % (
self.BasePath,
File,
self.InstanceName
)
if (self.ToolbarSet is not None):
Link += "&ToolBar=%s" % self.ToolbarSet
# Render the linked hidden field
Html += "<input type=\"hidden\" id=\"%s\" name=\"%s\" value=\"%s\" style=\"display:none\" />" % (
self.InstanceName,
self.InstanceName,
HtmlValue
)
# Render the configurations hidden field
Html += "<input type=\"hidden\" id=\"%s___Config\" value=\"%s\" style=\"display:none\" />" % (
self.InstanceName,
self.GetConfigFieldString()
)
# Render the editor iframe
Html += "<iframe id=\"%s___Frame\" src=\"%s\" width=\"%s\" height=\"%s\" frameborder=\"0\" scrolling=\"no\"></iframe>" % (
self.InstanceName,
Link,
self.Width,
self.Height
)
else:
if (self.Width.find("%%") < 0):
WidthCSS = "%spx" % self.Width
else:
WidthCSS = self.Width
if (self.Height.find("%%") < 0):
HeightCSS = "%spx" % self.Height
else:
HeightCSS = self.Height
Html += "<textarea name=\"%s\" rows=\"4\" cols=\"40\" style=\"width: %s; height: %s;\" wrap=\"virtual\">%s</textarea>" % (
self.InstanceName,
WidthCSS,
HeightCSS,
HtmlValue
)
Html += "</div>"
return Html
def IsCompatible(self):
import web
if (web.ctx.environ.has_key("HTTP_USER_AGENT")):
sAgent = web.ctx.environ.get("HTTP_USER_AGENT", "")
else:
sAgent = ""
if (sAgent.find("MSIE") >= 0) and (sAgent.find("mac") < 0) and (sAgent.find("Opera") < 0):
i = sAgent.find("MSIE")
iVersion = float(sAgent[i+5:i+5+3])
if (iVersion >= 5.5):
return True
return False
elif (sAgent.find("Gecko/") >= 0):
i = sAgent.find("Gecko/")
iVersion = int(sAgent[i+6:i+6+8])
if (iVersion >= 20030210):
return True
return False
elif (sAgent.find("Opera/") >= 0):
i = sAgent.find("Opera/")
iVersion = float(sAgent[i+6:i+6+4])
if (iVersion >= 9.5):
return True
return False
elif (sAgent.find("AppleWebKit/") >= 0):
p = re.compile('AppleWebKit\/(\d+)', re.IGNORECASE)
m = p.search(sAgent)
if (m.group(1) >= 522):
return True
return False
else:
return False
def GetConfigFieldString(self):
sParams = ""
bFirst = True
for sKey in self.Config.keys():
sValue = self.Config[sKey]
if (not bFirst):
sParams += "&"
else:
bFirst = False
if (sValue):
k = escape(sKey)
v = escape(sValue)
if (sValue == "true"):
sParams += "%s=true" % k
elif (sValue == "false"):
sParams += "%s=false" % k
else:
sParams += "%s=%s" % (k, v)
return sParams
|
bsd-3-clause
|
skidzo/pydy
|
pydy/viz/shapes.py
|
4
|
19419
|
#!/usr/bin/env python
__all__ = ['Cube',
'Cylinder',
'Cone',
'Sphere',
'Circle',
'Plane',
'Tetrahedron',
'Octahedron',
'Icosahedron',
'Torus',
'TorusKnot',
'Tube']
import numpy as np
# This is a list of ColorKeywords from THREE.js
THREE_COLORKEYWORDS = ['aliceblue', 'antiquewhite', 'aqua',
'aquamarine', 'azure', 'beige', 'bisque',
'black', 'blanchedalmond', 'blue', 'blueviolet',
'brown', 'burlywood', 'cadetblue', 'chartreuse',
'chocolate', 'coral', 'cornflowerblue',
'cornsilk', 'crimson', 'cyan', 'darkblue',
'darkcyan', 'darkgoldenrod', 'darkgray',
'darkgreen', 'darkgrey', 'darkkhaki',
'darkmagenta', 'darkolivegreen', 'darkorange',
'darkorchid', 'darkred', 'darksalmon',
'darkseagreen', 'darkslateblue', 'darkslategray',
'darkslategrey', 'darkturquoise', 'darkviolet',
'deeppink', 'deepskyblue', 'dimgray', 'dimgrey',
'dodgerblue', 'firebrick', 'floralwhite',
'forestgreen', 'fuchsia', 'gainsboro',
'ghostwhite', 'gold', 'goldenrod', 'gray',
'green', 'greenyellow', 'grey', 'honeydew',
'hotpink', 'indianred', 'indigo', 'ivory',
'khaki', 'lavender', 'lavenderblush',
'lawngreen', 'lemonchiffon', 'lightblue',
'lightcoral', 'lightcyan',
'lightgoldenrodyellow', 'lightgray',
'lightgreen', 'lightgrey', 'lightpink',
'lightsalmon', 'lightseagreen', 'lightskyblue',
'lightslategray', 'lightslategrey',
'lightsteelblue', 'lightyellow', 'lime',
'limegreen', 'linen', 'magenta', 'maroon',
'mediumaquamarine', 'mediumblue',
'mediumorchid', 'mediumpurple', 'mediumseagreen',
'mediumslateblue', 'mediumspringgreen',
'mediumturquoise', 'mediumvioletred',
'midnightblue', 'mintcream', 'mistyrose',
'moccasin', 'navajowhite', 'navy', 'oldlace',
'olive', 'olivedrab', 'orange', 'orangered',
'orchid', 'palegoldenrod', 'palegreen',
'paleturquoise', 'palevioletred', 'papayawhip',
'peachpuff', 'peru', 'pink', 'plum',
'powderblue', 'purple', 'red', 'rosybrown',
'royalblue', 'saddlebrown', 'salmon',
'sandybrown', 'seagreen', 'seashell', 'sienna',
'silver', 'skyblue', 'slateblue', 'slategray',
'slategrey', 'snow', 'springgreen', 'steelblue',
'tan', 'teal', 'thistle', 'tomato', 'turquoise',
'violet', 'wheat', 'white', 'whitesmoke',
'yellow', 'yellowgreen']
MATERIALS = ["default", "checkerboard", "metal", "dirt", "foil", "water",
"grass"]
class Shape(object):
"""Instantiates a shape. This is primarily used as a superclass for more
specific shapes like Cube, Cylinder, Sphere etc.
Shapes must be associated with a reference frame and a point using the
VisualizationFrame class.
Parameters
==========
name : str, optional
A name assigned to the shape.
color : str, optional
A color string from list of colors in THREE_COLORKEYWORDS
Examples
========
>>> from pydy.viz.shapes import Shape
>>> s = Shape()
>>> s.name
'unnamed'
>>> s.color
'grey'
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> a = Shape(name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
"""
def __init__(self, name='unnamed', color='grey', material="default"):
self.name = name
self.color = color
self.material = material
self.geometry_attrs = []
def __str__(self):
attributes = ([self.__class__.__name__,
self.name,
'color:' + self.color,
'material:' + self.material] +
sorted([attr + ':{}'.format(getattr(self, attr)) for
attr in self.geometry_attrs]))
return ' '.join(['{}'] * len(attributes)).format(*attributes)
def __repr__(self):
return self.__class__.__name__
@property
def name(self):
"""Returns the name attribute of the shape."""
return self._name
@name.setter
def name(self, new_name):
"""Sets the name attribute of the shape."""
if not isinstance(new_name, str):
raise TypeError("'name' should be a valid str object.")
else:
self._name = new_name
@property
def color(self):
"""Returns the color attribute of the shape."""
return self._color
@color.setter
def color(self, new_color):
"""Sets the color attributes of the shape. This should be a valid
three.js color keyword string."""
if new_color not in THREE_COLORKEYWORDS:
msg = "'color' should be a valid Three.js colors string:\n{}"
raise ValueError(msg.format('\n'.join(THREE_COLORKEYWORDS)))
else:
self._color = new_color
@property
def material(self):
"""Returns the material attribute of the shape."""
return self._material
@material.setter
def material(self, new_material):
"""Sets the material attribute of the shape, i.e. its shine,
brightness, opacity etc.. The material should be a valid material
from the listed MATERIALS. If a shape is attributed as "red" color,
and "water" material, ideally it should have opacity and brightness
properties like that of a red fluid.
"""
if new_material.lower() not in MATERIALS:
msg = "'material' is not valid. Choose from:\n{}"
raise ValueError(msg.format('\n'.join(MATERIALS)))
else:
self._material = new_material
def generate_dict(self, constant_map={}):
"""Returns a dictionary containing all the data associated with the
Shape.
Parameters
==========
constant_map : dictionary
If any of the shape's geometry are defined as SymPy expressions,
then this dictionary should map all SymPy Symbol's found in the
expressions to floats.
"""
data_dict = {}
data_dict['name'] = self.name
data_dict['color'] = self.color
data_dict['material'] = self.material
data_dict['type'] = self.__repr__()
for geom in self.geometry_attrs:
atr = getattr(self, geom)
try:
data_dict[geom] = float(atr.subs(constant_map))
except AttributeError:
# not a SymPy expression
data_dict[geom] = atr
except TypeError:
# can't convert expression to float
raise TypeError('{} is an expression, you '.format(atr) +
'must provide a mapping to numerical values.')
return data_dict
class Cube(Shape):
"""Instantiates a cube of a given size.
Parameters
==========
length : float or SymPy expression
The length of the cube.
Examples
========
>>> from pydy.viz.shapes import Cube
>>> s = Cube(10.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>>s.length
10.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.length = 12.0
>>> s.length
12.0
>>> a = Cube('my-shape2', 'red', length=10)
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.length
10.0
"""
def __init__(self, length, **kwargs):
super(Cube, self).__init__(**kwargs)
self.geometry_attrs.append('length')
self.length = length
class Cylinder(Shape):
"""Instantiates a cylinder with given length and radius.
Parameters
==========
length : float or SymPy expression
The length of the cylinder.
radius : float or SymPy expression
The radius of the cylinder.
Examples
========
>>> from pydy.viz.shapes import Cylinder
>>> s = Cylinder(10.0, 5.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>> s.length
10.0
>>> s.radius
5.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.length = 12.0
>>> s.length
12.0
>>> s.radius = 6.0
>>> s.radius
6.0
>>> a = Cylinder(10.0, 5.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.length
10.0
>>> a.radius
5.0
"""
def __init__(self, length, radius, **kwargs):
super(Cylinder, self).__init__(**kwargs)
self.geometry_attrs += ['length', 'radius']
self.length = length
self.radius = radius
class Cone(Shape):
"""Instantiates a cone with given length and base radius.
Parameters
==========
length : float or SymPy expression
The length of the cone.
radius : float or SymPy expression
The base radius of the cone.
Examples
========
>>> from pydy.viz.shapes import Cone
>>> s = Cone(10.0, 5.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>> s.length
10.0
>>> s.radius
5.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.length = 12.0
>>> s.length
12.0
>>> s.radius = 6.0
>>> s.radius
6.0
>>> a = Cone(10.0, 5.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.length
10.0
>>> a.radius
5.0
"""
def __init__(self, length, radius, **kwargs):
super(Cone, self).__init__(**kwargs)
self.geometry_attrs += ['length', 'radius']
self.length = length
self.radius = radius
class Sphere(Shape):
"""Instantiates a sphere with a given radius.
Parameters
==========
radius : float or SymPy expression
The radius of the sphere.
Examples
========
>>> from pydy.viz.shapes import Sphere
>>> s = Sphere(10.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>>s.radius
10.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 12.0
>>> s.radius
12.0
>>> a = Sphere(10.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
10.0
"""
def __init__(self, radius=10.0, **kwargs):
super(Sphere, self).__init__(**kwargs)
self.geometry_attrs += ['radius']
self.radius = radius
class Circle(Sphere):
"""Instantiates a circle with a given radius.
Parameters
==========
radius : float or SymPy Expression
The radius of the circle.
Examples
========
>>> from pydy.viz.shapes import Circle
>>> s = Circle(10.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>>s.radius
10.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 12.0
>>> s.radius
12.0
>>> a = Circle(10.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
10.0
"""
class Plane(Shape):
"""Instantiates a plane with a given length and width.
Parameters
==========
length : float or SymPy expression
The length of the plane.
width : float or SymPy expression
The width of the plane.
Examples
========
>>> from pydy.viz.shapes import Plane
>>> s = Plane(10.0, 5.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>> s.length
10.0
>>> s.width
5.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.length = 12.0
>>> s.length
12.0
>>> s.width = 6.0
>>> s.width
6.0
>>> a = Plane(10.0, 5.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.length
10.0
>>> a.width
5.0
"""
def __init__(self, length=10.0, width=5.0, **kwargs):
super(Plane, self).__init__(**kwargs)
self.geometry_attrs += ['length', 'width']
self.length = length
self.width = width
class Tetrahedron(Sphere):
"""Instantiates a Tetrahedron inscribed in a given radius circle.
Parameters
==========
radius : float or SymPy expression
The radius of the circum-scribing sphere of around the tetrahedron.
Examples
========
>>> from pydy.viz.shapes import Tetrahedron
>>> s = Tetrahedron(10.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>>s.radius
10.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 12.0
>>> s.radius
12.0
>>> a = Tetrahedron(10.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
10.0
"""
class Octahedron(Sphere):
"""Instantiaties an Octahedron inscribed in a circle of the given
radius.
Parameters
==========
radius : float or SymPy expression.
The radius of the circum-scribing sphere around the octahedron.
Examples
========
>>> from pydy.viz.shapes import Octahedron
>>> s = Octahedron(10.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>>s.radius
10.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 12.0
>>> s.radius
12.0
>>> a = Octahedron(10.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
10.0
"""
class Icosahedron(Sphere):
"""Instantiates an icosahedron inscribed in a sphere of the given
radius.
Parameters
==========
radius : float or a SymPy expression
Radius of the circum-scribing sphere for Icosahedron
Examples
========
>>> from pydy.viz.shapes import Icosahedron
>>> s = Icosahedron(10)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>>s.radius
10.0
>>>#These can be changed later too ..
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 12.0
>>> s.radius
12
>>> a = Icosahedron(10.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
10.0
"""
class Torus(Shape):
"""Instantiates a torus with a given radius and section radius.
Parameters
==========
radius : float or SymPy expression
The radius of the torus.
tube_radius : float or SymPy expression
The radius of the torus tube.
Examples
========
>>> from pydy.viz.shapes import Torus
>>> s = Torus(10.0, 5.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>> s.radius
10.0
>>> s.tube_radius
5.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 12.0
>>> s.radius
12.0
>>> s.tube_radius = 6.0
>>> s.tube_radius
6.0
>>> a = Torus(10.0, 5.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
10.0
>>> a.tube_radius
5.0
"""
def __init__(self, radius, tube_radius, **kwargs):
super(Torus, self).__init__(**kwargs)
self.geometry_attrs += ['radius', 'tube_radius']
self.radius = radius
self.tube_radius = tube_radius
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, new_radius):
self._radius = new_radius
@property
def tube_radius(self):
return self._tube_radius
@tube_radius.setter
def tube_radius(self, new_tube_radius):
self._tube_radius = new_tube_radius
class TorusKnot(Torus):
"""Instantiates a torus knot with given radius and section radius.
Parameters
==========
radius : float or SymPy expression
The radius of the torus knot.
tube_radius : float or SymPy expression
The radius of the torus knot tube.
Examples
========
>>> from pydy.viz.shapes import TorusKnot
>>> s = TorusKnot(10.0, 5.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>> s.radius
10.0
>>> s.tube_radius
5.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 12.0
>>> s.radius
12.0
>>> s.tube_radius = 6.0
>>> s.tube_radius
6.0
>>> a = TorusKnot(10.0, 5.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
10.0
>>> a.tube_radius
5.0
"""
class Tube(Shape):
"""Instantiates a tube that sweeps along a path.
Parameters
==========
radius : float or SymPy expression
The radius of the tube.
points : array_like, shape(n, 3)
An array of n (x, y, z) coordinates representing points that the
tube's center line should follow.
Examples
========
>>> from pydy.viz.shapes import Tube
>>> points = [[1.0, 2.0, 1.0], [2.0, 1.0, 1.0], [2.0, 3.0, 4.0]]
>>> s = Tube(10.0, points)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>> s.points
[[1.0, 2.0, 1.0], [2.0, 1.0, 1.0], [2.0, 3.0, 4.0]]
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 14.0
>>> s.radius
14.0
>>> s.points = [[2.0, 1.0, 4.0], [1.0, 2.0, 4.0],
... [2.0, 3.0, 1.0], [1.0, 1.0, 3.0]]
>>> s.points
[[2.0, 1.0, 4.0], [1.0, 2.0, 4.0], [2.0, 3.0, 1.0], [1.0, 1.0, 3.0]]
>>> a = Tube(12.0, points, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
12.0
>>> a.points
[[1.0, 2.0, 1.0], [2.0, 1.0, 1.0], [2.0, 3.0, 4.0]]
"""
def __init__(self, radius, points, **kwargs):
super(Tube, self).__init__(**kwargs)
self.geometry_attrs += ['radius', 'points']
self.radius = radius
self.points = points
@property
def points(self):
return self._points
@points.setter
def points(self, new_points):
self._points = np.asarray(new_points)
|
bsd-3-clause
|
ojengwa/oh-mainline
|
vendor/packages/oauthlib/oauthlib/oauth1/rfc5849/signature.py
|
36
|
22922
|
# -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849.signature
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module represents a direct implementation of `section 3.4`_ of the spec.
Terminology:
* Client: software interfacing with an OAuth API
* Server: the API provider
* Resource Owner: the user who is granting authorization to the client
Steps for signing a request:
1. Collect parameters from the uri query, auth header, & body
2. Normalize those parameters
3. Normalize the uri
4. Pass the normalized uri, normalized parameters, and http method to
construct the base string
5. Pass the base string and any keys needed to a signing function
.. _`section 3.4`: http://tools.ietf.org/html/rfc5849#section-3.4
"""
from __future__ import absolute_import, unicode_literals
import binascii
import hashlib
import hmac
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from . import utils
from oauthlib.common import urldecode, extract_params, safe_string_equals
from oauthlib.common import bytes_type, unicode_type
def construct_base_string(http_method, base_string_uri,
normalized_encoded_request_parameters):
"""**String Construction**
Per `section 3.4.1.1`_ of the spec.
For example, the HTTP request::
POST /request?b5=%3D%253D&a3=a&c%40=&a2=r%20b HTTP/1.1
Host: example.com
Content-Type: application/x-www-form-urlencoded
Authorization: OAuth realm="Example",
oauth_consumer_key="9djdj82h48djs9d2",
oauth_token="kkk9d7dh3k39sjv7",
oauth_signature_method="HMAC-SHA1",
oauth_timestamp="137131201",
oauth_nonce="7d8f3e4a",
oauth_signature="bYT5CMsGcbgUdFHObYMEfcx6bsw%3D"
c2&a3=2+q
is represented by the following signature base string (line breaks
are for display purposes only)::
POST&http%3A%2F%2Fexample.com%2Frequest&a2%3Dr%2520b%26a3%3D2%2520q
%26a3%3Da%26b5%3D%253D%25253D%26c%2540%3D%26c2%3D%26oauth_consumer_
key%3D9djdj82h48djs9d2%26oauth_nonce%3D7d8f3e4a%26oauth_signature_m
ethod%3DHMAC-SHA1%26oauth_timestamp%3D137131201%26oauth_token%3Dkkk
9d7dh3k39sjv7
.. _`section 3.4.1.1`: http://tools.ietf.org/html/rfc5849#section-3.4.1.1
"""
# The signature base string is constructed by concatenating together,
# in order, the following HTTP request elements:
# 1. The HTTP request method in uppercase. For example: "HEAD",
# "GET", "POST", etc. If the request uses a custom HTTP method, it
# MUST be encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
base_string = utils.escape(http_method.upper())
# 2. An "&" character (ASCII code 38).
base_string += '&'
# 3. The base string URI from `Section 3.4.1.2`_, after being encoded
# (`Section 3.6`_).
#
# .. _`Section 3.4.1.2`: http://tools.ietf.org/html/rfc5849#section-3.4.1.2
# .. _`Section 3.4.6`: http://tools.ietf.org/html/rfc5849#section-3.4.6
base_string += utils.escape(base_string_uri)
# 4. An "&" character (ASCII code 38).
base_string += '&'
# 5. The request parameters as normalized in `Section 3.4.1.3.2`_, after
# being encoded (`Section 3.6`).
#
# .. _`Section 3.4.1.3.2`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
# .. _`Section 3.4.6`: http://tools.ietf.org/html/rfc5849#section-3.4.6
base_string += utils.escape(normalized_encoded_request_parameters)
return base_string
def normalize_base_string_uri(uri, host=None):
"""**Base String URI**
Per `section 3.4.1.2`_ of the spec.
For example, the HTTP request::
GET /r%20v/X?id=123 HTTP/1.1
Host: EXAMPLE.COM:80
is represented by the base string URI: "http://example.com/r%20v/X".
In another example, the HTTPS request::
GET /?q=1 HTTP/1.1
Host: www.example.net:8080
is represented by the base string URI: "https://www.example.net:8080/".
.. _`section 3.4.1.2`: http://tools.ietf.org/html/rfc5849#section-3.4.1.2
The host argument overrides the netloc part of the uri argument.
"""
if not isinstance(uri, unicode_type):
raise ValueError('uri must be a unicode object.')
# FIXME: urlparse does not support unicode
scheme, netloc, path, params, query, fragment = urlparse.urlparse(uri)
# The scheme, authority, and path of the request resource URI `RFC3986`
# are included by constructing an "http" or "https" URI representing
# the request resource (without the query or fragment) as follows:
#
# .. _`RFC3986`: http://tools.ietf.org/html/rfc3986
if not scheme or not netloc:
raise ValueError('uri must include a scheme and netloc')
# Per `RFC 2616 section 5.1.2`_:
#
# Note that the absolute path cannot be empty; if none is present in
# the original URI, it MUST be given as "/" (the server root).
#
# .. _`RFC 2616 section 5.1.2`: http://tools.ietf.org/html/rfc2616#section-5.1.2
if not path:
path = '/'
# 1. The scheme and host MUST be in lowercase.
scheme = scheme.lower()
netloc = netloc.lower()
# 2. The host and port values MUST match the content of the HTTP
# request "Host" header field.
if host is not None:
netloc = host.lower()
# 3. The port MUST be included if it is not the default port for the
# scheme, and MUST be excluded if it is the default. Specifically,
# the port MUST be excluded when making an HTTP request `RFC2616`_
# to port 80 or when making an HTTPS request `RFC2818`_ to port 443.
# All other non-default port numbers MUST be included.
#
# .. _`RFC2616`: http://tools.ietf.org/html/rfc2616
# .. _`RFC2818`: http://tools.ietf.org/html/rfc2818
default_ports = (
('http', '80'),
('https', '443'),
)
if ':' in netloc:
host, port = netloc.split(':', 1)
if (scheme, port) in default_ports:
netloc = host
return urlparse.urlunparse((scheme, netloc, path, params, '', ''))
# ** Request Parameters **
#
# Per `section 3.4.1.3`_ of the spec.
#
# In order to guarantee a consistent and reproducible representation of
# the request parameters, the parameters are collected and decoded to
# their original decoded form. They are then sorted and encoded in a
# particular manner that is often different from their original
# encoding scheme, and concatenated into a single string.
#
# .. _`section 3.4.1.3`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3
def collect_parameters(uri_query='', body=[], headers=None,
exclude_oauth_signature=True, with_realm=False):
"""**Parameter Sources**
Parameters starting with `oauth_` will be unescaped.
Body parameters must be supplied as a dict, a list of 2-tuples, or a
formencoded query string.
Headers must be supplied as a dict.
Per `section 3.4.1.3.1`_ of the spec.
For example, the HTTP request::
POST /request?b5=%3D%253D&a3=a&c%40=&a2=r%20b HTTP/1.1
Host: example.com
Content-Type: application/x-www-form-urlencoded
Authorization: OAuth realm="Example",
oauth_consumer_key="9djdj82h48djs9d2",
oauth_token="kkk9d7dh3k39sjv7",
oauth_signature_method="HMAC-SHA1",
oauth_timestamp="137131201",
oauth_nonce="7d8f3e4a",
oauth_signature="djosJKDKJSD8743243%2Fjdk33klY%3D"
c2&a3=2+q
contains the following (fully decoded) parameters used in the
signature base sting::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| b5 | =%3D |
| a3 | a |
| c@ | |
| a2 | r b |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_token | kkk9d7dh3k39sjv7 |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_nonce | 7d8f3e4a |
| c2 | |
| a3 | 2 q |
+------------------------+------------------+
Note that the value of "b5" is "=%3D" and not "==". Both "c@" and
"c2" have empty values. While the encoding rules specified in this
specification for the purpose of constructing the signature base
string exclude the use of a "+" character (ASCII code 43) to
represent an encoded space character (ASCII code 32), this practice
is widely used in "application/x-www-form-urlencoded" encoded values,
and MUST be properly decoded, as demonstrated by one of the "a3"
parameter instances (the "a3" parameter is used twice in this
request).
.. _`section 3.4.1.3.1`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3.1
"""
headers = headers or {}
params = []
# The parameters from the following sources are collected into a single
# list of name/value pairs:
# * The query component of the HTTP request URI as defined by
# `RFC3986, Section 3.4`_. The query component is parsed into a list
# of name/value pairs by treating it as an
# "application/x-www-form-urlencoded" string, separating the names
# and values and decoding them as defined by
# `W3C.REC-html40-19980424`_, Section 17.13.4.
#
# .. _`RFC3986, Section 3.4`: http://tools.ietf.org/html/rfc3986#section-3.4
# .. _`W3C.REC-html40-19980424`: http://tools.ietf.org/html/rfc5849#ref-W3C.REC-html40-19980424
if uri_query:
params.extend(urldecode(uri_query))
# * The OAuth HTTP "Authorization" header field (`Section 3.5.1`_) if
# present. The header's content is parsed into a list of name/value
# pairs excluding the "realm" parameter if present. The parameter
# values are decoded as defined by `Section 3.5.1`_.
#
# .. _`Section 3.5.1`: http://tools.ietf.org/html/rfc5849#section-3.5.1
if headers:
headers_lower = dict((k.lower(), v) for k, v in headers.items())
authorization_header = headers_lower.get('authorization')
if authorization_header is not None:
params.extend([i for i in utils.parse_authorization_header(
authorization_header) if with_realm or i[0] != 'realm'])
# * The HTTP request entity-body, but only if all of the following
# conditions are met:
# * The entity-body is single-part.
#
# * The entity-body follows the encoding requirements of the
# "application/x-www-form-urlencoded" content-type as defined by
# `W3C.REC-html40-19980424`_.
# * The HTTP request entity-header includes the "Content-Type"
# header field set to "application/x-www-form-urlencoded".
#
# .._`W3C.REC-html40-19980424`: http://tools.ietf.org/html/rfc5849#ref-W3C.REC-html40-19980424
# TODO: enforce header param inclusion conditions
bodyparams = extract_params(body) or []
params.extend(bodyparams)
# ensure all oauth params are unescaped
unescaped_params = []
for k, v in params:
if k.startswith('oauth_'):
v = utils.unescape(v)
unescaped_params.append((k, v))
# The "oauth_signature" parameter MUST be excluded from the signature
# base string if present.
if exclude_oauth_signature:
unescaped_params = list(filter(lambda i: i[0] != 'oauth_signature',
unescaped_params))
return unescaped_params
def normalize_parameters(params):
"""**Parameters Normalization**
Per `section 3.4.1.3.2`_ of the spec.
For example, the list of parameters from the previous section would
be normalized as follows:
Encoded::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| b5 | %3D%253D |
| a3 | a |
| c%40 | |
| a2 | r%20b |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_token | kkk9d7dh3k39sjv7 |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_nonce | 7d8f3e4a |
| c2 | |
| a3 | 2%20q |
+------------------------+------------------+
Sorted::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| a2 | r%20b |
| a3 | 2%20q |
| a3 | a |
| b5 | %3D%253D |
| c%40 | |
| c2 | |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_nonce | 7d8f3e4a |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_token | kkk9d7dh3k39sjv7 |
+------------------------+------------------+
Concatenated Pairs::
+-------------------------------------+
| Name=Value |
+-------------------------------------+
| a2=r%20b |
| a3=2%20q |
| a3=a |
| b5=%3D%253D |
| c%40= |
| c2= |
| oauth_consumer_key=9djdj82h48djs9d2 |
| oauth_nonce=7d8f3e4a |
| oauth_signature_method=HMAC-SHA1 |
| oauth_timestamp=137131201 |
| oauth_token=kkk9d7dh3k39sjv7 |
+-------------------------------------+
and concatenated together into a single string (line breaks are for
display purposes only)::
a2=r%20b&a3=2%20q&a3=a&b5=%3D%253D&c%40=&c2=&oauth_consumer_key=9dj
dj82h48djs9d2&oauth_nonce=7d8f3e4a&oauth_signature_method=HMAC-SHA1
&oauth_timestamp=137131201&oauth_token=kkk9d7dh3k39sjv7
.. _`section 3.4.1.3.2`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
"""
# The parameters collected in `Section 3.4.1.3`_ are normalized into a
# single string as follows:
#
# .. _`Section 3.4.1.3`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3
# 1. First, the name and value of each parameter are encoded
# (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
key_values = [(utils.escape(k), utils.escape(v)) for k, v in params]
# 2. The parameters are sorted by name, using ascending byte value
# ordering. If two or more parameters share the same name, they
# are sorted by their value.
key_values.sort()
# 3. The name of each parameter is concatenated to its corresponding
# value using an "=" character (ASCII code 61) as a separator, even
# if the value is empty.
parameter_parts = ['{0}={1}'.format(k, v) for k, v in key_values]
# 4. The sorted name/value pairs are concatenated together into a
# single string by using an "&" character (ASCII code 38) as
# separator.
return '&'.join(parameter_parts)
def sign_hmac_sha1_with_client(base_string, client):
return sign_hmac_sha1(base_string,
client.client_secret,
client.resource_owner_secret
)
def sign_hmac_sha1(base_string, client_secret, resource_owner_secret):
"""**HMAC-SHA1**
The "HMAC-SHA1" signature method uses the HMAC-SHA1 signature
algorithm as defined in `RFC2104`_::
digest = HMAC-SHA1 (key, text)
Per `section 3.4.2`_ of the spec.
.. _`RFC2104`: http://tools.ietf.org/html/rfc2104
.. _`section 3.4.2`: http://tools.ietf.org/html/rfc5849#section-3.4.2
"""
# The HMAC-SHA1 function variables are used in following way:
# text is set to the value of the signature base string from
# `Section 3.4.1.1`_.
#
# .. _`Section 3.4.1.1`: http://tools.ietf.org/html/rfc5849#section-3.4.1.1
text = base_string
# key is set to the concatenated values of:
# 1. The client shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
key = utils.escape(client_secret or '')
# 2. An "&" character (ASCII code 38), which MUST be included
# even when either secret is empty.
key += '&'
# 3. The token shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
key += utils.escape(resource_owner_secret or '')
# FIXME: HMAC does not support unicode!
key_utf8 = key.encode('utf-8')
text_utf8 = text.encode('utf-8')
signature = hmac.new(key_utf8, text_utf8, hashlib.sha1)
# digest is used to set the value of the "oauth_signature" protocol
# parameter, after the result octet string is base64-encoded
# per `RFC2045, Section 6.8`.
#
# .. _`RFC2045, Section 6.8`: http://tools.ietf.org/html/rfc2045#section-6.8
return binascii.b2a_base64(signature.digest())[:-1].decode('utf-8')
def sign_rsa_sha1(base_string, rsa_private_key):
"""**RSA-SHA1**
Per `section 3.4.3`_ of the spec.
The "RSA-SHA1" signature method uses the RSASSA-PKCS1-v1_5 signature
algorithm as defined in `RFC3447, Section 8.2`_ (also known as
PKCS#1), using SHA-1 as the hash function for EMSA-PKCS1-v1_5. To
use this method, the client MUST have established client credentials
with the server that included its RSA public key (in a manner that is
beyond the scope of this specification).
NOTE: this method requires the python-rsa library.
.. _`section 3.4.3`: http://tools.ietf.org/html/rfc5849#section-3.4.3
.. _`RFC3447, Section 8.2`: http://tools.ietf.org/html/rfc3447#section-8.2
"""
# TODO: finish RSA documentation
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
key = RSA.importKey(rsa_private_key)
if isinstance(base_string, unicode_type):
base_string = base_string.encode('utf-8')
h = SHA.new(base_string)
p = PKCS1_v1_5.new(key)
return binascii.b2a_base64(p.sign(h))[:-1].decode('utf-8')
def sign_rsa_sha1_with_client(base_string, client):
return sign_rsa_sha1(base_string, client.rsa_key)
def sign_plaintext(client_secret, resource_owner_secret):
"""Sign a request using plaintext.
Per `section 3.4.4`_ of the spec.
The "PLAINTEXT" method does not employ a signature algorithm. It
MUST be used with a transport-layer mechanism such as TLS or SSL (or
sent over a secure channel with equivalent protections). It does not
utilize the signature base string or the "oauth_timestamp" and
"oauth_nonce" parameters.
.. _`section 3.4.4`: http://tools.ietf.org/html/rfc5849#section-3.4.4
"""
# The "oauth_signature" protocol parameter is set to the concatenated
# value of:
# 1. The client shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
signature = utils.escape(client_secret or '')
# 2. An "&" character (ASCII code 38), which MUST be included even
# when either secret is empty.
signature += '&'
# 3. The token shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
signature += utils.escape(resource_owner_secret or '')
return signature
def sign_plaintext_with_client(base_string, client):
return sign_plaintext(client.client_secret, client.resource_owner_secret)
def verify_hmac_sha1(request, client_secret=None,
resource_owner_secret=None):
"""Verify a HMAC-SHA1 signature.
Per `section 3.4`_ of the spec.
.. _`section 3.4`: http://tools.ietf.org/html/rfc5849#section-3.4
To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri
attribute MUST be an absolute URI whose netloc part identifies the
origin server or gateway on which the resource resides. Any Host
item of the request argument's headers dict attribute will be
ignored.
.. _`RFC2616 section 5.2`: http://tools.ietf.org/html/rfc2616#section-5.2
"""
norm_params = normalize_parameters(request.params)
uri = normalize_base_string_uri(request.uri)
base_string = construct_base_string(request.http_method, uri, norm_params)
signature = sign_hmac_sha1(base_string, client_secret,
resource_owner_secret)
return safe_string_equals(signature, request.signature)
def verify_rsa_sha1(request, rsa_public_key):
"""Verify a RSASSA-PKCS #1 v1.5 base64 encoded signature.
Per `section 3.4.3`_ of the spec.
Note this method requires the PyCrypto library.
.. _`section 3.4.3`: http://tools.ietf.org/html/rfc5849#section-3.4.3
To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri
attribute MUST be an absolute URI whose netloc part identifies the
origin server or gateway on which the resource resides. Any Host
item of the request argument's headers dict attribute will be
ignored.
.. _`RFC2616 section 5.2`: http://tools.ietf.org/html/rfc2616#section-5.2
"""
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
key = RSA.importKey(rsa_public_key)
norm_params = normalize_parameters(request.params)
uri = normalize_base_string_uri(request.uri)
message = construct_base_string(request.http_method, uri, norm_params)
h = SHA.new(message.encode('utf-8'))
p = PKCS1_v1_5.new(key)
sig = binascii.a2b_base64(request.signature.encode('utf-8'))
return p.verify(h, sig)
def verify_plaintext(request, client_secret=None, resource_owner_secret=None):
"""Verify a PLAINTEXT signature.
Per `section 3.4`_ of the spec.
.. _`section 3.4`: http://tools.ietf.org/html/rfc5849#section-3.4
"""
signature = sign_plaintext(client_secret, resource_owner_secret)
return safe_string_equals(signature, request.signature)
|
agpl-3.0
|
jfergie/closure-linter
|
closure_linter/not_strict_test.py
|
129
|
2318
|
#!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gjslint --nostrict.
Tests errors that can be thrown by gjslint when not in strict mode.
"""
import os
import sys
import unittest
import gflags as flags
import unittest as googletest
from closure_linter import errors
from closure_linter import runner
from closure_linter.common import filetestcase
_RESOURCE_PREFIX = 'closure_linter/testdata'
flags.FLAGS.strict = False
flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
'limited_doc_checks.js')
# List of files under testdata to test.
# We need to list files explicitly since pyglib can't list directories.
_TEST_FILES = [
'not_strict.js'
]
class GJsLintTestSuite(unittest.TestSuite):
"""Test suite to run a GJsLintTest for each of several files.
If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in
testdata to test. Otherwise, _TEST_FILES is used.
"""
def __init__(self, tests=()):
unittest.TestSuite.__init__(self, tests)
argv = sys.argv and sys.argv[1:] or []
if argv:
test_files = argv
else:
test_files = _TEST_FILES
for test_file in test_files:
resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
self.addTest(filetestcase.AnnotatedFileTestCase(resource_path,
runner.Run,
errors.ByName))
if __name__ == '__main__':
# Don't let main parse args; it happens in the TestSuite.
googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite')
|
apache-2.0
|
Omegaphora/external_chromium_org
|
build/android/findbugs_diff.py
|
57
|
1391
|
#!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs findbugs, and returns an error code if there are new warnings.
This runs findbugs with an additional flag to exclude known bugs.
To update the list of known bugs, do this:
findbugs_diff.py --rebaseline
Note that this is separate from findbugs_exclude.xml. The "exclude" file has
false positives that we do not plan to fix. The "known bugs" file has real
bugs that we *do* plan to fix (but haven't done so yet).
Other options
--only-analyze used to only analyze the class you are interested.
--relase-build analyze the classes in out/Release directory.
--findbugs-args used to passin other findbugs's options.
Run
$CHROM_SRC/third_party/findbugs/bin/findbugs -textui for details.
"""
import os
import sys
from pylib import constants
from pylib.utils import findbugs
def main():
parser = findbugs.GetCommonParser()
options, _ = parser.parse_args()
if not options.base_dir:
options.base_dir = os.path.join(constants.DIR_SOURCE_ROOT, 'build',
'android', 'findbugs_filter')
if not options.only_analyze:
options.only_analyze = 'org.chromium.-'
return findbugs.Run(options)
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
|
premanandchandrasekar/boto
|
boto/emr/__init__.py
|
6
|
3243
|
# Copyright (c) 2010 Spotify AB
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
This module provies an interface to the Elastic MapReduce (EMR)
service from AWS.
"""
from connection import EmrConnection
from step import Step, StreamingStep, JarStep
from bootstrap_action import BootstrapAction
from boto.regioninfo import RegionInfo
def regions():
"""
Get all available regions for the Amazon Elastic MapReduce service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
return [RegionInfo(name='us-east-1',
endpoint='elasticmapreduce.us-east-1.amazonaws.com',
connection_cls=EmrConnection),
RegionInfo(name='us-west-1',
endpoint='elasticmapreduce.us-west-1.amazonaws.com',
connection_cls=EmrConnection),
RegionInfo(name='us-west-2',
endpoint='elasticmapreduce.us-west-2.amazonaws.com',
connection_cls=EmrConnection),
RegionInfo(name='ap-northeast-1',
endpoint='elasticmapreduce.ap-northeast-1.amazonaws.com',
connection_cls=EmrConnection),
RegionInfo(name='ap-southeast-1',
endpoint='elasticmapreduce.ap-southeast-1.amazonaws.com',
connection_cls=EmrConnection),
RegionInfo(name='ap-southeast-2',
endpoint='elasticmapreduce.ap-southeast-2.amazonaws.com',
connection_cls=EmrConnection),
RegionInfo(name='eu-west-1',
endpoint='elasticmapreduce.eu-west-1.amazonaws.com',
connection_cls=EmrConnection),
RegionInfo(name='sa-east-1',
endpoint='elasticmapreduce.sa-east-1.amazonaws.com',
connection_cls=EmrConnection),
]
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
|
mit
|
pjmaker/python-tsi-tools
|
tags.py
|
1
|
32923
|
# tags.py -- map tags from one namespace to another
# Copyright 2016 Ben Elliston
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
tags = {
'G1_OIL_TOP_UP_PIML': None,
'G1_OIL_USED_IN_SERVICE_PIML': None,
'G2_OIL_TOP_UP_PIML': None,
'G2_OIL_USED_IN_SERVICE_PIML': None,
'G3_OIL_TOP_UP_PIML': None,
'G3_OIL_USED_IN_SERVICE_PIML': None,
'G4_OIL_TOP_UP_PIML': None,
'G4_OIL_USED_IN_SERVICE_PIML': None,
'STATION_MAX_DEMAND_PIML': None,
'F1_OUT_CNT_PIML': None,
'F1_OUT_DATE_PIML': None,
'F1_OUT_HOUR_CNT_PIML': None,
'F1_OUT_TIME_PIML': None,
'F2_OUT_CNT_PIML': None,
'F2_OUT_DATE_PIML': None,
'F2_OUT_HOUR_CNT_PIML': None,
'F2_OUT_TIME_PIML': None,
'F3_OUT_CNT_PIML': None,
'F3_OUT_DATE_PIML': None,
'F3_OUT_HOUR_CNT_PIML': None,
'F3_OUT_TIME_PIML': None,
'F4_OUT_CNT_PIML': None,
'F4_OUT_DATE_PIML': None,
'F4_OUT_HOUR_CNT_PIML': None,
'F4_OUT_TIME_PIML': None,
'Fed1AlarmSt': None,
'Fed1AutoMd': None,
'Fed1BlackBusSt': None,
'Fed1ClosedSt': None,
'Fed1ClsingSt': None,
'Fed1DigIOAl': None,
'Fed1EarthFaultAl': None,
'FED1FACT': None,
'Fed1FtCloseAl': None,
'Fed1FtOpenAl': None,
'Fed1HealthySt': None,
'Fed1I1Act': None,
'Fed1I2Act': None,
'Fed1I3Act': None,
'Fed1KwhTot': None,
'Fed1ManualMd': None,
'Fed1OpenedCnt': None,
'Fed1OpenedSt': None,
'Fed1OpeningSt': None,
'Fed1OvercurrentAl': None,
'Fed1Pact': None,
'Fed1PCloseSet': None,
'Fed1PDemAvr': None,
'Fed1PwrFctAct': None,
'Fed1PwrMonAl': None,
'Fed1QAct': None,
'Fed1ScadaMd': None,
'Fed1SefAl': None,
'Fed1ShedCnt': None,
'Fed1ToutTot': None,
'Fed1TripCnt': None,
'Fed1TrippedAl': None,
'Fed1U12Act': None,
'Fed1U1NAct': None,
'Fed1U23Act': None,
'Fed1U2NAct': None,
'Fed1U31Act': None,
'Fed1U3NAct': None,
'Fed2AlarmSt': None,
'Fed2AutoMd': None,
'Fed2BlackBusSt': None,
'Fed2ClosedSt': None,
'Fed2ClsingSt': None,
'Fed2DigIOAl': None,
'Fed2EarthFaultAl': None,
'Fed2Fact': None,
'Fed2FtCloseAl': None,
'Fed2FtOpenAl': None,
'Fed2HealthySt': None,
'Fed2I1Act': None,
'Fed2I2Act': None,
'Fed2I3Act': None,
'Fed2KwhTot': None,
'Fed2ManualMd': None,
'Fed2OpenedCnt': None,
'Fed2OpenedSt': None,
'Fed2OpeningSt': None,
'Fed2OvercurrentAl': None,
'Fed2Pact': None,
'Fed2PCloseSet': None,
'Fed2PDemAvr': None,
'Fed2PwrFctAct': None,
'Fed2PwrMonAl': None,
'Fed2QAct': None,
'Fed2ScadaMd': None,
'Fed2SefAl': None,
'Fed2ShedCnt': None,
'Fed2ToutTot': None,
'Fed2TripCnt': None,
'Fed2TrippedAl': None,
'Fed2U12Act': None,
'Fed2U1NAct': None,
'Fed2U23Act': None,
'Fed2U2NAct': None,
'Fed2U31Act': None,
'Fed2U3NAct': None,
'Fed3AlarmSt': None,
'Fed3AutoMd': None,
'Fed3BlackBusSt': None,
'Fed3ClosedSt': None,
'Fed3ClsingSt': None,
'Fed3DigIOAl': None,
'Fed3EarthFaultAl': None,
'Fed3Fact': None,
'Fed3FtCloseAl': None,
'Fed3FtOpenAl': None,
'Fed3HealthySt': None,
'Fed3I1Act': None,
'Fed3I2Act': None,
'Fed3I3Act': None,
'Fed3KwhTot': None,
'Fed3ManualMd': None,
'Fed3OpenedCnt': None,
'Fed3OpenedSt': None,
'Fed3OpeningSt': None,
'Fed3OvercurrentAl': None,
'Fed3Pact': None,
'Fed3PCloseSet': None,
'Fed3PDemAvr': None,
'Fed3PwrFctAct': None,
'Fed3PwrMonAl': None,
'Fed3QAct': None,
'Fed3ScadaMd': None,
'Fed3SefAl': None,
'Fed3ShedCnt': None,
'Fed3ToutTot': None,
'Fed3TripCnt': None,
'Fed3TrippedAl': None,
'Fed3U12Act': None,
'Fed3U1NAct': None,
'Fed3U23Act': None,
'Fed3U2NAct': None,
'Fed3U31Act': None,
'Fed3U3NAct': None,
'Fed4AlarmSt': None,
'Fed4AutoMd': None,
'Fed4BlackBusSt': None,
'Fed4ClosedSt': None,
'Fed4ClsingSt': None,
'Fed4DigIOAl': None,
'Fed4EarthFaultAl': None,
'Fed4Fact': None,
'Fed4FtCloseAl': None,
'Fed4FtOpenAl': None,
'Fed4HealthySt': None,
'Fed4I1Act': None,
'Fed4I2Act': None,
'Fed4I3Act': None,
'Fed4KwhTot': None,
'Fed4ManualMd': None,
'Fed4OpenedCnt': None,
'Fed4OpenedSt': None,
'Fed4OpeningSt': None,
'Fed4OvercurrentAl': None,
'Fed4Pact': None,
'Fed4PCloseSet': None,
'Fed4PDemAvr': None,
'Fed4PwrFctAct': None,
'Fed4PwrMonAl': None,
'Fed4QAct': None,
'Fed4ScadaMd': None,
'Fed4SefAl': None,
'Fed4ShedCnt': None,
'Fed4ToutTot': None,
'Fed4TripCnt': None,
'Fed4TrippedAl': None,
'Fed4U12Act': None,
'Fed4U1NAct': None,
'Fed4U23Act': None,
'Fed4U2NAct': None,
'Fed4U31Act': None,
'Fed4U3NAct': None,
'FedAllClosedSt': None,
'FedAllOpenSt': None,
'FedAutoClsdSt': None,
'FedBlackSt': None,
'FedClosingSt': None,
'FedDemandingSt': None,
'FedNoAvailSt': None,
'FedNonManualOpenSt': None,
'FedOpenSt': None,
'FedPact': None,
'FedRotationModeCloseSt': None,
'FedRotationModeOpenSt': None,
'FedRotationSt': None,
'FedRotationWaitSt': None,
'FedSheddingSt': None,
'FedShutdownSt': None,
'FUEL_DELIVERED_PIML': None,
'FUEL_TANK1_PIML': None,
'FUEL_TANK2_PIML': None,
'FUEL_TANK3_PIML': None,
'G1_FUEL_TOTAL_PIML': 'Fuelgen1_l',
'G1_KWH_PIML': None,
'G1_TOTAL_HOURS_RUN_PIML': 'Tgen1_h',
'G2_FUEL_TOTAL_PIML': 'Fuelgen2_l',
'G2_KWH_PIML': None,
'G2_TOTAL_HOURS_RUN_PIML': 'Tgen2_h',
'G3_FUEL_TOTAL_PIML': 'Fuelgen3_l',
'G3_KWH_PIML': None,
'G3_TOTAL_HOURS_RUN_PIML': 'Tgen3_h',
'G4_FUEL_TOTAL_PIML': 'Fuelgen4_l',
'G4_KWH_PIML': None,
'G4_TOTAL_HOURS_RUN_PIML': 'Tgen4_h',
'Gen1AIOutOfRange': None, # generator 1 analogue input out of range
'Gen1AlarmSt': None, # generator 1 alarm state
'Gen1Asymmetry': None, # generator 1 unbalanced load alarm
'Gen1AutoMd': None, # generator 1 auto mode
'Gen1BatUnderVolt': None, # generator 1 battery under voltage
'Gen1BlackSt': None, # generator 1 black state
'Gen1ClosedSt': None, # generator 1 closed state
'Gen1CloseFailGCB': None, # generator 1 generator circuit breaker failed to close
'Gen1CommY1Y6': None, #
'Gen1CoolDownSt': None, # generator 1 cool down state
'Gen1CritAl': None, # generator 1 critical alarm
'Gen1DigIOAl': None, # generator 1 digital I/O alarm
'Gen1ExhTempLeft': None, # generator 1 exhaust temperature left
'Gen1ExhTempRight': None, # generator 1 exhaust temperature right
'Gen1ExtOpenGCB': None, # generator 1 external generator circuit breaker open
'Gen1Fact': 'fgen1_Hz', # generator 1 actual frequency
'Gen1FirstStartMd': None, # generator 1 first start mode
'Gen1FtOpenAl': None, # generator 1 fail to open alarm
'Gen1FtStartAl': None, # generator 1 fail to start alarm
'Gen1FtStopAl': None, # generator 1 fail to stop alarm
'Gen1FtSyncAl': None, # generator 1 fail to sync alarm
'Gen1FuelConAct': None, # generator 1 fuel consumption
'Gen1FuelLitreCnt': None, # generator 1 fuel consumption counter
'Gen1GenFreq': None,
'Gen1GenLoadUnb': None,
'Gen1GenOverCur': None, # generator 1 generator overcurrent
'Gen1GenOverFreq': None, # generator 1 generator overfrequency
'Gen1GenOverload': None, # generator 1 generator overload
'Gen1GenOverSpd': None, # generator 1 generator overspeed
'Gen1GenRevPwr': None, # generator 1 generator reverse power
'Gen1GenUnderFreq': None, # generator 1 generator underfrequency
'Gen1GenUnderVolt': None, # generator 1 generator undervoltage
'Gen1HealthySt': None, # generator 1 healthy state
'Gen1I1Act': None, # generator 1 phase 1 actual current
'Gen1I2Act': None, # generator 1 phase 2 actual current
'Gen1I3Act': None, # generator 1 phase 3 actual current
'Gen1IAct': 'Igen1_A', # FIXME: this tag doesn't exist yet.
'Gen1InternalError7': None, # generator 1 internal error 7 (magic?)
'Gen1KwhTot': 'Egen1_kWh', # generator 1 kwh total
'Gen1LastStartMd': None, # generator 1 last start mode
'Gen1LdFctAct': None, # generator 1 actual load factor
'Gen1MainsOverFrq': None, # generator 1 mains overfrequency
'Gen1MainsOverVolt': None, # generator 1 mains overvoltage
'Gen1MainsUnderFrq': None, # generator 1 mains underfrequency
'Gen1MainsUnderVolt': None, # generator 1 mains undervoltage
'Gen1MainsVectJump': None, # generator 1 mains vector jump (wha?)
'Gen1MaintenanceCall': None,
'Gen1ManualMd': None, # generator 1 manual mode
'Gen1MCBFail': None, # generator 1 MCB failure
'Gen1NonCritAl': None, # generator 1 non-critical alarm
'Gen1OilPrAftAct': None, # generator 1 actual oil pressure after filter
'Gen1OilPrBefAct': None, # generator 1 actual oil pressure before filter
'Gen1OilTact': None, # generator 1 actual oil temperature
'Gen1OpenFailGCB': None, # generator 1 open fail generator circuit breaker
'Gen1OpenSt': None, # generator 1 open state
'Gen1Pact': 'Pgen1_kVA', # generator 1 actual power
'Gen1PderAct': None, # generator 1 actual derated power
'Gen1PreGlowSt': None, # generator 1 pre-glow state
'Gen1PsetSt': None, # generator 1 power setpoint state
'Gen1PwrFctAct': 'PFgen1', # generator 1 actual power factor
'Gen1PwrMonAl': None, # generator 1 power monitor alarm
'Gen1QAct': 'Qgen1_kVAr', # generator 1 actual reactive power
'Gen1RpmAct': None, # generator 1 actual RPM
'Gen1RunningTimeOut': None, # generator 1 generator running timeout
'Gen1RunSt': None, # generator 1 running state
'Gen1ScadaMd': None, # generator 1 SCADA mode
'Gen1StartCnt': None, # generator 1 total number of starts
'Gen1StartingSt': None, # generator 1 starting state
'Gen1StoppingFail': None, # generator 1 failed to stop alarm
'Gen1StoppingSt': None, # generator 1 stopping state
'Gen1StopSt': None, # generator 1 stop state
'Gen1SyncSt': None, # generator 1 synchronisation state
'Gen1SyncTimeExcd': None, # generator 1 sync time exceeded
'Gen1TempDeratedSt': None,
'Gen1ThermOverCur': None, # generator 1 thermal overcurrent
'Gen1TimeoutSwchOnBlackBus': None, #
'Gen1TimeTillNextService': None, # generator 1 time until next service
'Gen1TrunTot': None, # generator 1 total run time
'Gen1U12Act': None, # generator 1 phase 1 to phase 2 voltage
'Gen1U1NAct': None, # generator 1 phase 1 to neutral voltage
'Gen1U23Act': None, # generator 1 phase 2 to phase 3 voltage
'Gen1U2NAct': None, # generator 1 phase 2 to neutral voltage
'Gen1U31Act': None, # generator 1 phase 3 to phase 1 voltage
'Gen1U3NAct': None, # generator 1 phase 3 to neutral voltage
'Gen1UnloadSt': None, # generator 1 unload state
'Gen1WarmUp': None, # generator 1 warm-up state
'Gen1WarnAl': None, # generator 1 warning alarm
'Gen1WatchdogPwr': None,
'Gen1WaterTinAct': None, # generator 1 actual water temp in
'Gen1WaterToutAct': None, # generator 1 actual water temp out
'Gen1WrongStart': None,
'Gen2AIOutOfRange': None, # generator 2 analogue input out of range
'Gen2AlarmSt': None, # generator 2 alarm state
'Gen2Asymmetry': None, # generator 2 unbalanced load alarm
'Gen2AutoMd': None, # generator 2 auto mode
'Gen2BatUnderVolt': None, # generator 2 battery under voltage
'Gen2BlackSt': None, # generator 2 black state
'Gen2ClosedSt': None, # generator 2 closed state
'Gen2CloseFailGCB': None, # generator 2 generator circuit breaker failed to close
'Gen2CommY1Y6': None, #
'Gen2CoolDownSt': None, # generator 2 cool down state
'Gen2CritAl': None, # generator 2 critical alarm
'Gen2DigIOAl': None, # generator 2 digital I/O alarm
'Gen2ExhTempLeft': None, # generator 2 exhaust temperature left
'Gen2ExhTempRight': None, # generator 2 exhaust temperature right
'Gen2ExtOpenGCB': None, # generator 2 external generator circuit breaker open
'Gen2Fact': 'fgen2_Hz', # generator 2 actual frequency
'Gen2FirstStartMd': None, # generator 2 first start mode
'Gen2FtOpenAl': None, # generator 2 fail to open alarm
'Gen2FtStartAl': None, # generator 2 fail to start alarm
'Gen2FtStopAl': None, # generator 2 fail to stop alarm
'Gen2FtSyncAl': None, # generator 2 fail to sync alarm
'Gen2FuelConAct': None, # generator 2 fuel consumption
'Gen2FuelLitreCnt': None, # generator 2 fuel consumption counter
'Gen2GenFreq': None,
'Gen2GenLoadUnb': None,
'Gen2GenOverCur': None, # generator 2 generator overcurrent
'Gen2GenOverFreq': None, # generator 2 generator overfrequency
'Gen2GenOverload': None, # generator 2 generator overload
'Gen2GenOverSpd': None, # generator 2 generator overspeed
'Gen2GenRevPwr': None, # generator 2 generator reverse power
'Gen2GenUnderFreq': None, # generator 2 generator underfrequency
'Gen2GenUnderVolt': None, # generator 2 generator undervoltage
'Gen2HealthySt': None, # generator 2 healthy state
'Gen2I1Act': None, # generator 2 phase 1 actual current
'Gen2I2Act': None, # generator 2 phase 2 actual current
'Gen2I3Act': None, # generator 2 phase 3 actual current
'Gen2IAct': 'Igen2_A', # FIXME: this tag doesn't exist yet.
'Gen2InternalError7': None, # generator 2 internal error 7 (magic?)
'Gen2KwhTot': 'Egen2_kWh', # generator 2 kwh total
'Gen2LastStartMd': None, # generator 2 last start mode
'Gen2LdFctAct': None, # generator 2 actual load factor
'Gen2MainsOverFrq': None, # generator 2 mains overfrequency
'Gen2MainsOverVolt': None, # generator 2 mains overvoltage
'Gen2MainsUnderFrq': None, # generator 2 mains underfrequency
'Gen2MainsUnderVolt': None, # generator 2 mains undervoltage
'Gen2MainsVectJump': None, # generator 2 mains vector jump (wha?)
'Gen2MaintenanceCall': None,
'Gen2ManualMd': None, # generator 2 manual mode
'Gen2MCBFail': None, # generator 2 MCB failure
'Gen2NonCritAl': None, # generator 2 non-critical alarm
'Gen2OilPrAftAct': None, # generator 2 actual oil pressure after filter
'Gen2OilPrBefAct': None, # generator 2 actual oil pressure before filter
'Gen2OilTact': None, # generator 2 actual oil temperature
'Gen2OpenFailGCB': None, # generator 2 open fail generator circuit breaker
'Gen2OpenSt': None, # generator 2 open state
'Gen2Pact': 'Pgen2_kVA', # generator 2 actual power
'Gen2PderAct': None, # generator 2 actual derated power
'Gen2PreGlowSt': None, # generator 2 pre-glow state
'Gen2PsetSt': None, # generator 2 power setpoint state
'Gen2PwrFctAct': 'PFgen2', # generator 2 actual power factor
'Gen2PwrMonAl': None, # generator 2 power monitor alarm
'Gen2QAct': 'Qgen2_kVAr', # generator 2 actual reactive power
'Gen2RpmAct': None, # generator 2 actual RPM
'Gen2RunningTimeOut': None, # generator 2 generator running timeout
'Gen2RunSt': None, # generator 2 running state
'Gen2ScadaMd': None, # generator 2 SCADA mode
'Gen2StartCnt': None, # generator 2 total number of starts
'Gen2StartingSt': None, # generator 2 starting state
'Gen2StoppingFail': None, # generator 2 failed to stop alarm
'Gen2StoppingSt': None, # generator 2 stopping state
'Gen2StopSt': None, # generator 2 stop state
'Gen2SyncSt': None, # generator 2 synchronisation state
'Gen2SyncTimeExcd': None, # generator 2 sync time exceeded
'Gen2TempDeratedSt': None, #
'Gen2ThermOverCur': None, # generator 2 thermal overcurrent
'Gen2TimeoutSwchOnBlackBus': None, #
'Gen2TimeTillNextService': None, # generator 2 time until next service
'Gen2TrunTot': None, # generator 2 total run time
'Gen2U12Act': None, # generator 2 phase 1 to phase 2 voltage
'Gen2U1NAct': None, # generator 2 phase 1 to neutral voltage
'Gen2U23Act': None, # generator 2 phase 2 to phase 3 voltage
'Gen2U2NAct': None, # generator 2 phase 2 to neutral voltage
'Gen2U31Act': None, # generator 2 phase 3 to phase 1 voltage
'Gen2U3NAct': None, # generator 2 phase 3 to neutral voltage
'Gen2UnloadSt': None, # generator 2 unload state
'Gen2WarmUp': None, # generator 2 warm-up state
'Gen2WarnAl': None, # generator 2 warning alarm
'Gen2WatchdogPwr': None,
'Gen2WaterTinAct': None, # generator 2 actual water temp in
'Gen2WaterToutAct': None, # generator 2 actual water temp out
'Gen2WrongStart': None,
'Gen3AIOutOfRange': None, # generator 3 analogue input out of range
'Gen3AlarmSt': None, # generator 3 alarm state
'Gen3Asymmetry': None, # generator 3 unbalanced load alarm
'Gen3AutoMd': None, # generator 3 auto mode
'Gen3BatUnderVolt': None, # generator 3 battery under voltage
'Gen3BlackSt': None, # generator 3 black state
'Gen3ClosedSt': None, # generator 3 closed state
'Gen3CloseFailGCB': None, # generator 3 generator circuit breaker failed to close
'Gen3CommY1Y6': None,
'Gen3CoolDownSt': None, # generator 3 cool down state
'Gen3CritAl': None, # generator 3 critical alarm
'Gen3DigIOAl': None, # generator 3 digital I/O alarm
'Gen3ExhTempLeft': None, # generator 3 exhaust temperature left
'Gen3ExhTempRight': None, # generator 3 exhaust temperature right
'Gen3ExtOpenGCB': None, # generator 3 external generator circuit breaker open
'Gen3Fact': 'fgen3_Hz', # generator 3 actual frequency
'Gen3FirstStartMd': None, # generator 3 first start mode
'Gen3FtOpenAl': None, # generator 3 fail to open alarm
'Gen3FtStartAl': None, # generator 3 fail to start alarm
'Gen3FtStopAl': None, # generator 3 fail to stop alarm
'Gen3FtSyncAl': None, # generator 3 fail to sync alarm
'Gen3FuelConAct': None, # generator 3 fuel consumption
'Gen3FuelLitreCnt': None, # generator 3 fuel consumption counter
'Gen3GenFreq': None,
'Gen3GenLoadUnb': None,
'Gen3GenOverCur': None, # generator 3 generator overcurrent
'Gen3GenOverFreq': None, # generator 3 generator overfrequency
'Gen3GenOverload': None, # generator 3 generator overload
'Gen3GenOverSpd': None, # generator 3 generator overspeed
'Gen3GenRevPwr': None, # generator 3 generator reverse power
'Gen3GenUnderFreq': None, # generator 3 generator underfrequency
'Gen3GenUnderVolt': None, # generator 3 generator undervoltage
'Gen3HealthySt': None, # generator 3 healthy state
'Gen3I1Act': None, # generator 3 phase 1 actual current
'Gen3I2Act': None, # generator 3 phase 2 actual current
'Gen3I3Act': None, # generator 3 phase 3 actual current
'Gen3IAct': 'Igen3_A', # FIXME: this tag doesn't exist yet.
'Gen3InternalError7': None, # generator 3 internal error 7 (magic?)
'Gen3KwhTot': 'Egen3_kWh', # generator 3 kwh total
'Gen3LastStartMd': None, # generator 3 last start mode
'Gen3LdFctAct': None, # generator 3 actual load factor
'Gen3MainsOverFrq': None, # generator 3 mains overfrequency
'Gen3MainsOverVolt': None, # generator 3 mains overvoltage
'Gen3MainsUnderFrq': None, # generator 3 mains underfrequency
'Gen3MainsUnderVolt': None, # generator 3 mains undervoltage
'Gen3MainsVectJump': None, # generator 3 mains vector jump (wha?)
'Gen3MaintenanceCall': None,
'Gen3ManualMd': None, # generator 3 manual mode
'Gen3MCBFail': None, # generator 3 MCB failure
'Gen3NonCritAl': None, # generator 3 non-critical alarm
'Gen3OilPrAftAct': None, # generator 3 actual oil pressure after filter
'Gen3OilPrBefAct': None, # generator 3 actual oil pressure before filter
'Gen3OilTact': None, # generator 3 actual oil temperature
'Gen3OpenFailGCB': None, # generator 3 open fail generator circuit breaker
'Gen3OpenSt': None, # generator 3 open state
'Gen3Pact': 'Pgen3_kVA', # generator 3 actual power
'Gen3PderAct': None, # generator 3 actual derated power
'Gen3PreGlowSt': None, # generator 3 pre-glow state
'Gen3PsetSt': None, # generator 3 power setpoint state
'Gen3PwrFctAct': 'PFgen3', # generator 3 actual power factor
'Gen3PwrMonAl': None, # generator 3 power monitor alarm
'Gen3QAct': 'Qgen3_kVAr', # generator 3 actual reactive power
'Gen3RpmAct': None, # generator 3 actual RPM
'Gen3RunningTimeOut': None, # generator 3 generator running timeout
'Gen3RunSt': None, # generator 3 running state
'Gen3ScadaMd': None, # generator 3 SCADA mode
'Gen3StartCnt': None, # generator 3 total number of starts
'Gen3StartingSt': None, # generator 3 starting state
'Gen3StoppingFail': None, # generator 3 failed to stop alarm
'Gen3StoppingSt': None, # generator 3 stopping state
'Gen3StopSt': None, # generator 3 stop state
'Gen3SyncSt': None, # generator 3 synchronisation state
'Gen3SyncTimeExcd': None, # generator 3 sync time exceeded
'Gen3TempDeratedSt': None,
'Gen3ThermOverCur': None, # generator 3 thermal overcurrent
'Gen3TimeoutSwchOnBlackBus': None, #
'Gen3TimeTillNextService': None, # generator 3 time until next service
'Gen3TrunTot': None, # generator 3 total run time
'Gen3U12Act': None, # generator 3 phase 1 to phase 2 voltage
'Gen3U1NAct': None, # generator 3 phase 1 to neutral voltage
'Gen3U23Act': None, # generator 3 phase 2 to phase 3 voltage
'Gen3U2NAct': None, # generator 3 phase 2 to neutral voltage
'Gen3U31Act': None, # generator 3 phase 3 to phase 1 voltage
'Gen3U3NAct': None, # generator 3 phase 3 to neutral voltage
'Gen3UnloadSt': None, # generator 3 unload state
'Gen3WarmUp': None, # generator 3 warm-up state
'Gen3WarnAl': None, # generator 3 warning alarm
'Gen3WatchdogPwr': None,
'Gen3WaterTinAct': None, # generator 3 actual water temp in
'Gen3WaterToutAct': None, # generator 3 actual water temp out
'Gen3WrongStart': None,
'Gen4AIOutOfRange': None, # generator 4 analogue input out of range
'Gen4AlarmSt': None, # generator 4 alarm state
'Gen4Asymmetry': None, # generator 4 unbalanced load alarm
'Gen4AutoMd': None, # generator 4 auto mode
'Gen4BatUnderVolt': None, # generator 4 battery under voltage
'Gen4BlackSt': None, # generator 4 black state
'Gen4ClosedSt': None, # generator 4 closed state
'Gen4CloseFailGCB': None, # generator 4 generator circuit breaker failed to close
'Gen4CommY1Y6': None, #
'Gen4CoolDownSt': None, # generator 4 cool down state
'Gen4CritAl': None, # generator 4 critical alarm
'Gen4DigIOAl': None, # generator 4 digital I/O alarm
'Gen4ExhTempLeft': None, # generator 4 exhaust temperature left
'Gen4ExhTempRight': None, # generator 4 exhaust temperature right
'Gen4ExtOpenGCB': None, # generator 4 external generator circuit breaker open
'Gen4Fact': 'fgen4_Hz', # generator 4 actual frequency
'Gen4FirstStartMd': None, # generator 4 first start mode
'Gen4FtOpenAl': None, # generator 4 fail to open alarm
'Gen4FtStartAl': None, # generator 4 fail to start alarm
'Gen4FtStopAl': None, # generator 4 fail to stop alarm
'Gen4FtSyncAl': None, # generator 4 fail to sync alarm
'Gen4FuelConAct': None, # generator 4 fuel consumption
'Gen4FuelLitreCnt': None, # generator 4 fuel consumption counter
'Gen4GenFreq': None,
'Gen4GenLoadUnb': None,
'Gen4GenOverCur': None, # generator 4 generator overcurrent
'Gen4GenOverFreq': None, # generator 4 generator overfrequency
'Gen4GenOverload': None, # generator 4 generator overload
'Gen4GenOverSpd': None, # generator 4 generator overspeed
'Gen4GenRevPwr': None, # generator 4 generator reverse power
'Gen4GenUnderFreq': None, # generator 4 generator underfrequency
'Gen4GenUnderVolt': None, # generator 4 generator undervoltage
'Gen4HealthySt': None, # generator 4 healthy state
'Gen4I1Act': None, # generator 4 phase 1 actual current
'Gen4I2Act': None, # generator 4 phase 2 actual current
'Gen4I3Act': None, # generator 4 phase 3 actual current
'Gen4IAct': 'Igen4_A', # FIXME: this tag doesn't exist yet.
'Gen4InternalError7': None, # generator 4 internal error 7 (magic?)
'Gen4KwhTot': 'Egen4_kWh', # generator 4 kwh total
'Gen4LastStartMd': None, # generator 4 last start mode
'Gen4LdFctAct': None, # generator 4 actual load factor
'Gen4MainsOverFrq': None, # generator 4 mains overfrequency
'Gen4MainsOverVolt': None, # generator 4 mains overvoltage
'Gen4MainsUnderFrq': None, # generator 4 mains underfrequency
'Gen4MainsUnderVolt': None, # generator 4 mains undervoltage
'Gen4MainsVectJump': None, # generator 4 mains vector jump (wha?)
'Gen4MaintenanceCall': None,
'Gen4ManualMd': None, # generator 4 manual mode
'Gen4MCBFail': None, # generator 4 MCB failure
'Gen4NonCritAl': None, # generator 4 non-critical alarm
'Gen4OilPrAftAct': None, # generator 4 actual oil pressure after filter
'Gen4OilPrBefAct': None, # generator 4 actual oil pressure before filter
'Gen4OilTact': None, # generator 4 actual oil temperature
'Gen4OpenFailGCB': None, # generator 4 open fail generator circuit breaker
'Gen4OpenSt': None, # generator 4 open state
'Gen4Pact': 'Pgen4_kVA', # generator 4 actual power
'Gen4PderAct': None, # generator 4 actual derated power
'Gen4PreGlowSt': None, # generator 4 pre-glow state
'Gen4PsetSt': None, # generator 4 power setpoint state
'Gen4PwrFctAct': 'PFgen4', # generator 4 actual power factor
'Gen4PwrMonAl': None, # generator 4 power monitor alarm
'Gen4QAct': 'Qgen4_kVAr', # generator 4 actual reactive power
'Gen4RpmAct': None, # generator 4 actual RPM
'Gen4RunningTimeOut': None, # generator 4 generator running timeout
'Gen4RunSt': None, # generator 4 running state
'Gen4ScadaMd': None, # generator 4 SCADA mode
'Gen4StartCnt': None, # generator 4 total number of starts
'Gen4StartingSt': None, # generator 4 starting state
'Gen4StoppingFail': None, # generator 4 failed to stop alarm
'Gen4StoppingSt': None, # generator 4 stopping state
'Gen4StopSt': None, # generator 4 stop state
'Gen4SyncSt': None, # generator 4 synchronisation state
'Gen4SyncTimeExcd': None, # generator 4 sync time exceeded
'Gen4TempDeratedSt': None,
'Gen4ThermOverCur': None, # generator 4 thermal overcurrent
'Gen4TimeoutSwchOnBlackBus': None, #
'Gen4TimeTillNextService': None, # generator 4 time until next service
'Gen4TrunTot': None, # generator 4 total run time
'Gen4U12Act': None, # generator 4 phase 1 to phase 2 voltage
'Gen4U1NAct': None, # generator 4 phase 1 to neutral voltage
'Gen4U23Act': None, # generator 4 phase 2 to phase 3 voltage
'Gen4U2NAct': None, # generator 4 phase 2 to neutral voltage
'Gen4U31Act': None, # generator 4 phase 3 to phase 1 voltage
'Gen4U3NAct': None, # generator 4 phase 3 to neutral voltage
'Gen4UnloadSt': None, # generator 4 unload state
'Gen4WarmUp': None, # generator 4 warm-up state
'Gen4WarnAl': None, # generator 4 warning alarm
'Gen4WatchdogPwr': None,
'Gen4WaterTinAct': None, # generator 4 actual water temp in
'Gen4WaterToutAct': None, # generator 4 actual water temp out
'Gen4WrongStart': None,
'GenActCfgPwr': None,
'GenActCfgSetsGen1': None,
'GenActCfgSetsGen2': None,
'GenActCfgSetsGen3': None,
'GenActCfgSetsGen4': None,
'GenActCfgSetsGen5': None,
'GenActCfgSetsGen6': None,
'GenActCfgSetsGen7': None,
'GenActCfgSetsGen8': None,
'GenBlackSt': None,
'GenCfgOnlSt': None,
'GenNoAvailSt': None,
'GenNonManStopSt': None,
'GenPact': None,
'GenPcfgSet': None,
'GenPwrUpSt': None,
'GenRunSt': None,
'GenShutdownSt': None,
'GenStopSt': None,
'GenSWDownSt': None,
'GenSWUpSt': None,
'GenTransSt': None,
'OIL_STOCK_PIML': None,
'PSetP': None,
'PSetQ': None,
'PvAvailP': None,
'PvMgrPvStRegCriticalAl': None,
'PvMgrPvStRegNoAvailSt': None,
'PvMgrPvStRegNonCriticalAl': None,
'PvMgrPvStRegPwrUpSt': None,
'PvMgrPvStRegRunSt': None,
'PvMgrPvStRegShutdownSt': None,
'PvMgrPvStRegStopSt': None,
'PvMgrPvStRegTransSt': None,
'PVP': 'Ppv_kW',
'PVQ': 'Qpv_kVAr',
'PvStReg': None,
'SkyCam1_10mEstPct': None,
'SkyCam1_10mOk': None,
'SkyCam1_2m10Ok': None,
'SkyCam1_2m1Ok': None,
'SkyCam1_2m2Ok': None,
'SkyCam1_2m3Ok': None,
'SkyCam1_2m4Ok': None,
'SkyCam1_2m5Ok': None,
'SkyCam1_2m6Ok': None,
'SkyCam1_2m7Ok': None,
'SkyCam1_2m8Ok': None,
'SkyCam1_2m9Ok': None,
'SkyCam1_2mEstPct': None,
'SkyCam1_2mOk': None,
'SkyCam1_30mEstPct': None,
'SkyCam1_30mOk': None,
'SkyCam1_Alarm': None,
'SkyCam1_CloudPct': None,
'SkyCam1_SupplierSpec': None,
'SkyCam1_Watchdog': None,
'SkyCam2_10mEstPct': None,
'SkyCam2_10mOk': None,
'SkyCam2_2m10Ok': None,
'SkyCam2_2m1Ok': None,
'SkyCam2_2m2Ok': None,
'SkyCam2_2m3Ok': None,
'SkyCam2_2m4Ok': None,
'SkyCam2_2m5Ok': None,
'SkyCam2_2m6Ok': None,
'SkyCam2_2m7Ok': None,
'SkyCam2_2m8Ok': None,
'SkyCam2_2m9Ok': None,
'SkyCam2_2mEstPct': None,
'SkyCam2_2mOk': None,
'SkyCam2_30mEstPct': None,
'SkyCam2_30mOk': None,
'SkyCam2_Alarm': None,
'SkyCam2_CloudPct': None,
'SkyCam2_SupplierSpec': None,
'SkyCam2_Watchdog': None,
'SkyCam3_10mEstPct': None,
'SkyCam3_10mOk': None,
'SkyCam3_2m10Ok': None,
'SkyCam3_2m1Ok': None,
'SkyCam3_2m2Ok': None,
'SkyCam3_2m3Ok': None,
'SkyCam3_2m4Ok': None,
'SkyCam3_2m5Ok': None,
'SkyCam3_2m6Ok': None,
'SkyCam3_2m7Ok': None,
'SkyCam3_2m8Ok': None,
'SkyCam3_2m9Ok': None,
'SkyCam3_2mEstPct': None,
'SkyCam3_2mOk': None,
'SkyCam3_30mEstPct': None,
'SkyCam3_30mOk': None,
'SkyCam3_Alarm': None,
'SkyCam3_CloudPct': None,
'SkyCam3_SupplierSpec': None,
'SkyCam3_Watchdog': None,
'StatBattChrgFail': None,
'StatBlackAl': None,
'StatEmrgStopAl': None,
'StatFact': None,
'StatFireAl': None,
'StatIntruderAl': None,
'StatLackOfCapAl': None,
'StatNoFedAl': None,
'StatNoGenAl': None,
'StatOverLoadAl': None,
'StatPact': None,
'StatPcloseAct': None,
'StatPconsAct': None,
'StatPllpAct': None,
'StatPowerMonAl': None,
'StatPspinAct': None,
'StatPTotalAct': None,
'StatPwrFctAct': None,
'StatPwrSupplyFailAl': None,
'StatPwrUpSt': None,
'StatQact': None,
'StatRunSt': None,
'StatShutdownSt': None,
'StatStatBlackCnt': None,
'StatStatKwhTot': None,
'StatStatMaxDemTot': None,
'StatStatTblackCnt': None,
'StatStopSt': None,
'StatTempSensAl': None,
'StatUact': None,
'StatUnderFAl': None,
'SYSTIMACT': None,
'SysTimeAct': None,
#
# Weather station data
#
'StatWindSpd': 'vwind_m/s',
'StatWindDir': 'dwind_deg',
'StatAmbTemp': 'Tamb_degC',
'PvCellTemp': 'Tcell_degC',
'StatRainfall': 'Rain_mm',
'StatRelHum': 'Hum_%',
'StatGHI': 'Gghi_W/m2'
}
def transform(pitag):
"""
>>> transform('foobar')
Traceback (most recent call last):
...
ValueError: unknown tag foobar
>>> transform('StatQact')
Traceback (most recent call last):
...
ValueError: StatQact has no mapping
>>> transform('Gen1Pact')
'Pgen1_kVA'
"""
try:
t = tags[pitag]
if t is None:
raise ValueError('%s has no mapping' % pitag)
else:
return t
except KeyError:
raise ValueError('unknown tag %s' % pitag)
|
bsd-3-clause
|
happy56/kivy
|
examples/widgets/lists/list_cascade_images.py
|
3
|
4936
|
from kivy.adapters.dictadapter import DictAdapter
from kivy.uix.selectableview import SelectableView
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.listview import ListView, ListItemButton
from kivy.lang import Builder
from kivy.factory import Factory
from fixtures import fruit_categories, fruit_data
from fruit_detail_view import FruitImageDetailView
# This is a copy of list_cascade.py with image thumbnails added to the list
# item views and a larger image shown in the detail view for the selected
# fruit. It uses the kv template method for providing the list item view to
# the listview showing the list of fruits for a selected category.
Factory.register('SelectableView', cls=SelectableView)
Factory.register('ListItemButton', cls=ListItemButton)
# [TODO] Problem: Had to add index here, to get it from ctx. Might need a
# "selection_template" to do this for the dev? Or is this
# the task of the dev to know and follow this need to
# code for index?
Builder.load_string('''
[ThumbnailedListItem@SelectableView+BoxLayout]:
index: ctx.index
fruit_name: ctx.text
size_hint_y: ctx.size_hint_y
height: ctx.height
Image
source: "fruit_images/{0}.32.jpg".format(ctx.text)
ListItemButton:
index: ctx.index
text: ctx.text
''')
# A custom adapter is needed here, because we must transform the selected
# fruit category into the list of fruit keys for that category.
#
class FruitsDictAdapter(DictAdapter):
def fruit_category_changed(self, fruit_categories_adapter, *args):
if len(fruit_categories_adapter.selection) == 0:
self.data = {}
return
category = \
fruit_categories[str(fruit_categories_adapter.selection[0])]
self.sorted_keys = category['fruits']
class CascadingView(GridLayout):
'''Implementation of a cascading style display, with a scrollable list
of fruit categories on the left, a list of thumbnailed fruit items for the
selected category in the middle, and a detail view on the right that shows
a larger fruit image with data.
See list_cascade_dict.py for the same example without images.
'''
def __init__(self, **kwargs):
kwargs['cols'] = 3
kwargs['size_hint'] = (1.0, 1.0)
super(CascadingView, self).__init__(**kwargs)
list_item_args_converter = \
lambda row_index, rec: {'text': rec['name'],
'size_hint_y': None,
'height': 25}
# Fruit categories list on the left:
#
categories = sorted(fruit_categories.keys())
fruit_categories_list_adapter = \
DictAdapter(
sorted_keys=categories,
data=fruit_categories,
args_converter=list_item_args_converter,
selection_mode='single',
allow_empty_selection=False,
cls=ListItemButton)
fruit_categories_list_view = \
ListView(adapter=fruit_categories_list_adapter,
size_hint=(.2, 1.0))
self.add_widget(fruit_categories_list_view)
# Fruits, for a given category, in the middle:
#
image_list_item_args_converter = \
lambda row_index, rec: {'text': rec['name'],
'size_hint_y': None,
'height': 32}
fruits_list_adapter = \
FruitsDictAdapter(
sorted_keys=fruit_categories[categories[0]]['fruits'],
data=fruit_data,
args_converter=image_list_item_args_converter,
selection_mode='single',
allow_empty_selection=False,
template='ThumbnailedListItem')
fruits_list_view = \
ListView(adapter=fruits_list_adapter,
size_hint=(.2, 1.0))
fruit_categories_list_adapter.bind(
on_selection_change=fruits_list_adapter.fruit_category_changed)
self.add_widget(fruits_list_view)
# Detail view, for a given fruit, on the right:
#
detail_view = FruitImageDetailView(
fruit_name=fruits_list_adapter.selection[0].fruit_name,
size_hint=(.6, 1.0))
fruits_list_adapter.bind(
on_selection_change=detail_view.fruit_changed)
self.add_widget(detail_view)
if __name__ == '__main__':
from kivy.base import runTouchApp
# All fruit categories will be shown in the left left (first argument),
# and the first category will be auto-selected -- Melons. So, set the
# second list to show the melon fruits (second argument).
runTouchApp(CascadingView(width=800))
|
lgpl-3.0
|
aquavitae/aafigure
|
aafigure/aafigure.py
|
1
|
46035
|
#!/usr/bin/env python
"""\
ASCII art to image converter.
This is the main module that contains the parser.
See svg.py and aa.py for output modules, that can render the parsed structure.
(C) 2006-2009 Chris Liechti <[email protected]>
This is open source software under the BSD license. See LICENSE.txt for more
details.
"""
import codecs
from .error import UnsupportedFormatError
from .shapes import *
from unicodedata import east_asian_width
import sys
NOMINAL_SIZE = 2
CLASS_LINE = 'line'
CLASS_STRING = 'str'
CLASS_RECTANGLE = 'rect'
CLASS_JOIN = 'join'
CLASS_FIXED = 'fixed'
DEFAULT_OPTIONS = dict(
background = '#ffffff',
foreground = '#000000',
line_width = 2.0,
scale = 1.0,
aspect = 1.0,
format = 'svg',
debug = False,
textual = False,
proportional = False,
encoding = 'utf-8',
widechars = 'F,W',
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class AsciiArtImage:
"""This class hold a ASCII art figure and has methods to parse it.
The resulting list of shapes is also stored here.
The image is parsed in 2 steps:
1. horizontal string detection.
2. generic shape detection.
Each character that is used in a shape or string is tagged. So that
further searches don't include it again (e.g. text in a string touching
a fill), respectively can use it correctly (e.g. join characters when
two or more lines hit).
"""
QUOTATION_CHARACTERS = list('"\'`')
def __init__(self, text, aspect_ratio=1, textual=False, widechars='F,W'):
"""Take a ASCII art figure and store it, prepare for ``recognize``"""
self.aspect_ratio = float(aspect_ratio)
self.textual = textual
# XXX TODO tab expansion
# detect size of input image, store as list of lines
self.image = []
max_x = 0
y = 0
# define character widths map
charwidths = {}
for key in ['F', 'H', 'W', 'Na', 'A', 'N']:
if key in widechars.split(','):
charwidths[key] = 2
else:
charwidths[key] = 1
for line in text.splitlines():
# extend length by 1 for each wide glyph
line_len = sum(charwidths[east_asian_width(c)] for c in line)
max_x = max(max_x, line_len)
# pad a space for each wide glyph
padded_line = ''.join(c+' '*(charwidths[east_asian_width(c)]-1) for c in line)
self.image.append(padded_line)
y += 1
self.width = max_x
self.height = y
# make sure it's rectangular (extend short lines to max width)
for y, line in enumerate(self.image):
if len(line) < max_x:
self.image[y] = line + ' '*(max_x-len(line))
# initialize other data structures
self.classification = [[None]*self.width for y in range(self.height)]
self.shapes = []
self.nominal_size = NOMINAL_SIZE
def __str__(self):
"""Return the original image"""
return '\n'.join([self.image[y] for y in range(self.height)])
def get(self, x, y):
"""Get character from image. Gives no error for access out of
bounds, just returns a space. This simplifies the scanner
functions.
"""
if 0 <= x < self.width and 0 <= y < self.height:
return self.image[y][x]
else:
return ' '
def tag(self, coordinates, classification):
"""Tag coordinates as used, store classification"""
for x, y in coordinates:
self.classification[y][x] = classification
def cls(self, x, y):
"""get tag at coordinate"""
try:
return self.classification[y][x]
except IndexError:
return 'outside'
# Coordinate conversion and shifting
def left(self, x):
return x*NOMINAL_SIZE*self.aspect_ratio
def hcenter(self, x):
return (x + 0.5)*NOMINAL_SIZE*self.aspect_ratio
def right(self, x):
return (x + 1)*NOMINAL_SIZE*self.aspect_ratio
def top(self, y):
return y*NOMINAL_SIZE
def vcenter(self, y):
return (y + 0.5)*NOMINAL_SIZE
def bottom(self, y):
return (y + 1)*NOMINAL_SIZE
def recognize(self):
"""
Try to convert ASCII art to vector graphics. The result is stored in
``self.shapes``.
"""
# XXX search for symbols
#~ #search for long strings
#~ for y in range(self.height):
#~ for x in range(self.width):
#~ character = self.image[y][x]
#~ if self.classification[y][x] is None:
#~ if character.isalnum():
#~ self.shapes.extend(
#~ self._follow_horizontal_string(x, y)
#~ )
# search for quoted texts
for y in range(self.height):
for x in range(self.width):
#if not yet classified, check for a line
character = self.image[y][x]
if character in self.QUOTATION_CHARACTERS and self.classification[y][x] is None:
self.shapes.extend(
self._follow_horizontal_string(x, y, quoted=True))
# search for standard shapes
for y in range(self.height):
for x in range(self.width):
#if not yet classified, check for a line
character = self.image[y][x]
if self.classification[y][x] is None:
if character == '-':
self.shapes.extend(self._follow_horizontal_line(x, y))
elif character == '|':
self.shapes.extend(self._follow_vertical_line(x, y))
elif character == '_':
self.shapes.extend(self._follow_lower_horizontal_line(x, y))
elif character == '~':
self.shapes.extend(self._follow_upper_horizontal_line(x, y))
elif character == '=':
self.shapes.extend(self._follow_horizontal_line(x, y, thick=True))
elif character in '\\/':
self.shapes.extend(self._follow_rounded_edge(x, y))
elif character == '+':
self.shapes.extend(self._plus_joiner(x, y))
elif character in self.FIXED_CHARACTERS:
self.shapes.extend(self.get_fixed_character(character)(x, y))
self.tag([(x, y)], CLASS_FIXED)
elif character in self.FILL_CHARACTERS:
if self.textual:
if self.get(x, y+1) == character:
self.shapes.extend(self._follow_fill(character, x, y))
else:
if (self.get(x+1, y) == character or self.get(x, y+1) == character):
self.shapes.extend(self._follow_fill(character, x, y))
# search for short strings too
for y in range(self.height):
for x in range(self.width):
character = self.image[y][x]
if self.classification[y][x] is None:
if character != ' ':
self.shapes.extend(self._follow_horizontal_string(x, y, accept_anything=True))
# - - - - - - - - - helper function for some shapes - - - - - - - - -
# Arrow drawing functions return the (new) starting point of the line and a
# list of shapes that draw the arrow. The line itself is not included in
# the list of shapes. The stating point is p1, possibly modified to match
# the shape of the arrow head.
#
# Use complex numbers as 2D vectors as that means easy transformations like
# scaling, rotation and translation
# - - - - - - - - - arrows - - - - - - - - -
def _standard_arrow(self, p1, p2):
"""-->
return a possibly modified starting point and a list of shapes
"""
direction_vector = p1 - p2
direction_vector /= abs(direction_vector)
return p1, [
Line(p1, p1-direction_vector*1.5+direction_vector*0.5j),
Line(p1, p1-direction_vector*1.5+direction_vector*-0.5j)
]
def _reversed_arrow(self, p1, p2):
"""--<"""
direction_vector = p1 - p2
direction_vector /= abs(direction_vector)
return p1-direction_vector*2, [
Line(p1-direction_vector*2.0, p1+direction_vector*(-0.5+0.5j)),
Line(p1-direction_vector*2.0, p1+direction_vector*(-0.5-0.5j))
]
def _circle_head(self, p1, p2, radius=0.5):
"""--o"""
direction_vector = p1 - p2
direction_vector /= abs(direction_vector)
return p1-direction_vector, [Circle(p1-direction_vector, radius)]
def _large_circle_head(self, p1, p2):
"""--O"""
return self._circle_head(p1, p2, radius=0.9)
def _rectangular_head(self, p1, p2):
"""--#"""
direction_vector = p1 - p2
direction_vector /= abs(direction_vector)
#~ return p1-direction_vector*1.414, [
#~ Rectangle(p1-direction_vector-direction_vector*(0.707+0.707j),
#~ p1-direction_vector+direction_vector*(0.707+0.707j))
#~ ]
return p1-direction_vector*1.707, [
Line(p1-direction_vector-direction_vector*(0.707+0.707j),
p1-direction_vector-direction_vector*(0.707-0.707j)),
Line(p1-direction_vector+direction_vector*(0.707+0.707j),
p1-direction_vector+direction_vector*(0.707-0.707j)),
Line(p1-direction_vector-direction_vector*(0.707+0.707j),
p1-direction_vector+direction_vector*(0.707-0.707j)),
Line(p1-direction_vector-direction_vector*(0.707-0.707j),
p1-direction_vector+direction_vector*(0.707+0.707j)),
]
# the same character can mean a different thing, depending from where the
# line is coming. this table maps line direction (dx,dy) and the arrow
# character to a arrow drawing function
ARROW_TYPES = [
#chr dx dy arrow type
('>', 1, 0, '_standard_arrow'),
('<', -1, 0, '_standard_arrow'),
('^', 0, -1, '_standard_arrow'),
('A', 0, -1, '_standard_arrow'),
('V', 0, 1, '_standard_arrow'),
('v', 0, 1, '_standard_arrow'),
('>', -1, 0, '_reversed_arrow'),
('<', 1, 0, '_reversed_arrow'),
('^', 0, 1, '_reversed_arrow'),
('V', 0, -1, '_reversed_arrow'),
('v', 0, -1, '_reversed_arrow'),
('o', 1, 0, '_circle_head'),
('o', -1, 0, '_circle_head'),
('o', 0, -1, '_circle_head'),
('o', 0, 1, '_circle_head'),
('O', 1, 0, '_large_circle_head'),
('O', -1, 0, '_large_circle_head'),
('O', 0, -1, '_large_circle_head'),
('O', 0, 1, '_large_circle_head'),
('#', 1, 0, '_rectangular_head'),
('#', -1, 0, '_rectangular_head'),
('#', 0, -1, '_rectangular_head'),
('#', 0, 1, '_rectangular_head'),
]
ARROW_HEADS = list('<>AVv^oO#')
def get_arrow(self, character, dx, dy):
"""return arrow drawing function or None"""
for head, ddx, ddy, function_name in self.ARROW_TYPES:
if character == head and dx == ddx and dy == ddy:
return getattr(self, function_name)
# - - - - - - - - - fills - - - - - - - - -
# Fill functions return a list of shapes. Each one if covering one cell
# size.
def _hatch_left(self, x, y):
return self._n_hatch_diagonal(x, y, 1, True)
def _hatch_right(self, x, y):
return self._n_hatch_diagonal(x, y, 1, False)
def _cross_hatch(self, x, y):
return self._n_hatch_diagonal(x, y, 1, True) + \
self._n_hatch_diagonal(x, y, 1, False)
def _double_hatch_left(self, x, y):
return self._n_hatch_diagonal(x, y, 2, True)
def _double_hatch_right(self, x, y):
return self._n_hatch_diagonal(x, y, 2, False)
def _double_cross_hatch(self, x, y):
return self._n_hatch_diagonal(x, y, 2, True) + \
self._n_hatch_diagonal(x, y, 2, False)
def _triple_hatch_left(self, x, y):
return self._n_hatch_diagonal(x, y, 3, True)
def _triple_hatch_right(self, x, y):
return self._n_hatch_diagonal(x, y, 3, False)
def _triple_cross_hatch(self, x, y):
return self._n_hatch_diagonal(x, y, 3, True) + \
self._n_hatch_diagonal(x, y, 3, False)
def _n_hatch_diagonal(self, x, y, n, left=False):
"""hatch generator function"""
d = 1/float(n)
result = []
if left:
for i in range(n):
result.append(Line(
Point(self.left(x), self.top(y+d*i)),
Point(self.right(x-d*i), self.bottom(y))
))
if n:
result.append(Line(
Point(self.right(x-d*i), self.top(y)),
Point(self.right(x), self.top(y+d*i))
))
else:
for i in range(n):
result.append(Line(Point(self.left(x), self.top(y+d*i)), Point(self.left(x+d*i), self.top(y))))
if n:
result.append(Line(Point(self.left(x+d*i), self.bottom(y)), Point(self.right(x), self.top(y+d*i))))
return result
def _hatch_v(self, x, y):
return self._n_hatch_straight(x, y, 1, True)
def _hatch_h(self, x, y):
return self._n_hatch_straight(x, y, 1, False)
def _hv_hatch(self, x, y):
return self._n_hatch_straight(x, y, 1, True) + \
self._n_hatch_straight(x, y, 1, False)
def _double_hatch_v(self, x, y):
return self._n_hatch_straight(x, y, 2, True)
def _double_hatch_h(self, x, y):
return self._n_hatch_straight(x, y, 2, False)
def _double_hv_hatch(self, x, y):
return self._n_hatch_straight(x, y, 2, True) + \
self._n_hatch_straight(x, y, 2, False)
def _triple_hatch_v(self, x, y):
return self._n_hatch_straight(x, y, 3, True)
def _triple_hatch_h(self, x, y):
return self._n_hatch_straight(x, y, 3, False)
def _triple_hv_hatch(self, x, y):
return self._n_hatch_straight(x, y, 3, True) + \
self._n_hatch_straight(x, y, 3, False)
def _n_hatch_straight(self, x, y, n, vertical=False):
"""hatch generator function"""
d = 1/float(n)
offset = 1.0/(n+1)
result = []
if vertical:
for i in range(n):
i = i + offset
result.append(Line(
Point(self.left(x+d*i), self.top(y)),
Point(self.left(x+d*i), self.bottom(y))
))
#~ if n:
#~ result.append(Line(Point(self.right(x-d*i), self.top(y)), Point(self.right(x), self.top(y+d*i))))
else:
for i in range(n):
i = i + offset
result.append(Line(
Point(self.left(x), self.top(y+d*i)),
Point(self.right(x), self.top(y+d*i))
))
#~ if n:
#~ result.append(Line(Point(self.left(x+d*i), self.bottom(y)), Point(self.right(x), self.top(y+d*i))))
return result
def _fill_trail(self, x, y):
return [
Line(
Point(self.left(x+0.707), self.top(y)),
Point(self.right(x), self.bottom(y-0.707))
),
Line(
Point(self.left(x), self.top(y+0.707)),
Point(self.right(x-0.707), self.bottom(y))
)
]
def _fill_foreground(self, x, y):
return [
Rectangle(
Point(self.left(x), self.top(y)),
Point(self.right(x), self.bottom(y))
)
]
def _fill_background(self, x, y):
return []
def _fill_small_circle(self, x, y):
return [
Circle(Point(self.left(x+0.5), self.top(y+0.5)), 0.2)
]
def _fill_medium_circle(self, x, y):
return [
Circle(Point(self.left(x+0.5), self.top(y+0.5)), 0.4)
]
def _fill_large_circle(self, x, y):
return [
Circle(Point(self.left(x+0.5), self.top(y+0.5)), 0.9)
]
def _fill_qmark(self, x, y):
return [
Label(Point(self.left(x), self.bottom(y)), '?')
]
def _fill_triangles(self, x, y):
return [
Line(Point(self.left(x+0.5), self.top(y+0.2)), Point(self.left(x+0.75), self.top(y+0.807))),
Line(Point(self.left(x+0.7), self.top(y+0.807)), Point(self.left(x+0.25), self.top(y+0.807))),
Line(Point(self.left(x+0.25), self.top(y+0.807)), Point(self.left(x+0.5), self.top(y+0.2))),
]
FILL_TYPES = [
('A', '_hatch_left'),
('B', '_hatch_right'),
('C', '_cross_hatch'),
('D', '_double_hatch_left'),
('E', '_double_hatch_right'),
('F', '_double_cross_hatch'),
('G', '_triple_hatch_left'),
('H', '_triple_hatch_right'),
('I', '_triple_cross_hatch'),
('J', '_hatch_v'),
('K', '_hatch_h'),
('L', '_hv_hatch'),
('M', '_double_hatch_v'),
('N', '_double_hatch_h'),
('O', '_double_hv_hatch'),
('P', '_triple_hatch_v'),
('Q', '_triple_hatch_h'),
('R', '_triple_hv_hatch'),
('S', '_fill_qmark'),
('T', '_fill_trail'),
('U', '_fill_small_circle'),
('V', '_fill_medium_circle'),
('W', '_fill_large_circle'),
('X', '_fill_foreground'),
('Y', '_fill_triangles'),
('Z', '_fill_background'),
]
FILL_CHARACTERS = ''.join([t+t.lower() for (t, f) in FILL_TYPES])
def get_fill(self, character):
"""return fill function"""
for head, function_name in self.FILL_TYPES:
if character == head:
return getattr(self, function_name)
raise ValueError('no such fill type')
# - - - - - - - - - fixed characters and their shapes - - - - - - - - -
def _open_triangle_left(self, x, y):
return [
Line(
Point(self.left(x), self.vcenter(y)),
Point(self.right(x), self.top(y))
),
Line(
Point(self.left(x), self.vcenter(y)),
Point(self.right(x), self.bottom(y))
)
]
def _open_triangle_right(self, x, y):
return [
Line(
Point(self.right(x), self.vcenter(y)),
Point(self.left(x), self.top(y))
),
Line(
Point(self.right(x), self.vcenter(y)),
Point(self.left(x), self.bottom(y))
)
]
def _circle(self, x, y):
return [
Circle(Point(self.hcenter(x), self.vcenter(y)), NOMINAL_SIZE/2.0)
]
FIXED_TYPES = [
('{', '_open_triangle_left'),
('}', '_open_triangle_right'),
('*', '_circle'),
]
FIXED_CHARACTERS = ''.join([t for (t, f) in FIXED_TYPES])
def get_fixed_character(self, character):
"""return fill function"""
for head, function_name in self.FIXED_TYPES:
if character == head:
return getattr(self, function_name)
raise ValueError('no such character')
# - - - - - - - - - helper function for shape recognition - - - - - - - - -
def _follow_vertical_line(self, x, y):
"""find a vertical line with optional arrow heads"""
# follow line to the bottom
_, end_y, line_end_style = self._follow_line(x, y, dy=1, line_character='|')
# follow line to the top
_, start_y, line_start_style = self._follow_line(x, y, dy=-1, line_character='|')
# if a '+' follows a line, then the line is stretched to hit the '+' center
start_y_fix = end_y_fix = 0
if self.get(x, start_y - 1) == '+':
start_y_fix = -0.5
if self.get(x, end_y + 1) == '+':
end_y_fix = 0.5
# tag characters as used (not the arrow heads)
self.tag([(x, y) for y in range(start_y, end_y + 1)], CLASS_LINE)
# return the new shape object with arrows etc.
p1 = complex(self.hcenter(x), self.top(start_y + start_y_fix))
p2 = complex(self.hcenter(x), self.bottom(end_y + end_y_fix))
shapes = []
if line_start_style:
p1, arrow_shapes = line_start_style(p1, p2)
shapes.extend(arrow_shapes)
if line_end_style:
p2, arrow_shapes = line_end_style(p2, p1)
shapes.extend(arrow_shapes)
shapes.append(Line(p1, p2))
return group(shapes)
def _follow_horizontal_line(self, x, y, thick=False):
"""find a horizontal line with optional arrow heads"""
if thick:
line_character = '='
else:
line_character = '-'
# follow line to the right
end_x, _, line_end_style = self._follow_line(x, y, dx=1, line_character=line_character)
# follow line to the left
start_x, _, line_start_style = self._follow_line(x, y, dx=-1, line_character=line_character)
start_x_fix = end_x_fix = 0
if self.get(start_x - 1, y) == '+':
start_x_fix = -0.5
if self.get(end_x + 1, y) == '+':
end_x_fix = 0.5
self.tag([(x, y) for x in range(start_x, end_x+1)], CLASS_LINE)
# return the new shape object with arrows etc.
p1 = complex(self.left(start_x + start_x_fix), self.vcenter(y))
p2 = complex(self.right(end_x + end_x_fix), self.vcenter(y))
shapes = []
if line_start_style:
p1, arrow_shapes = line_start_style(p1, p2)
shapes.extend(arrow_shapes)
if line_end_style:
p2, arrow_shapes = line_end_style(p2, p1)
shapes.extend(arrow_shapes)
shapes.append(Line(p1, p2, thick=thick))
return group(shapes)
def _follow_lower_horizontal_line(self, x, y):
"""find a horizontal line, the line is aligned to the bottom and a bit
wider, so that it can be used for shapes like this:
___
__| |___
"""
# follow line to the right
end_x, _, line_end_style = self._follow_line(x, y, dx=1, line_character='_', arrows=False)
# follow line to the left
start_x, _, line_start_style = self._follow_line(x, y, dx=-1, line_character='_', arrows=False)
self.tag([(x, y) for x in range(start_x, end_x+1)], CLASS_LINE)
# return the new shape object with arrows etc.
p1 = complex(self.hcenter(start_x-1), self.bottom(y))
p2 = complex(self.hcenter(end_x+1), self.bottom(y))
return [Line(p1, p2)]
def _follow_upper_horizontal_line(self, x, y):
"""find a horizontal line, the line is aligned to the bottom and a bit
wider, so that it can be used for shapes like this:
|~~~|
~~ ~~~
"""
# follow line to the right
end_x, _, line_end_style = self._follow_line(x, y, dx=1, line_character='~', arrows=False)
# follow line to the left
start_x, _, line_start_style = self._follow_line(x, y, dx=-1, line_character='~', arrows=False)
self.tag([(x, y) for x in range(start_x, end_x+1)], CLASS_LINE)
# return the new shape object with arrows etc.
p1 = complex(self.hcenter(start_x-1), self.top(y))
p2 = complex(self.hcenter(end_x+1), self.top(y))
return [Line(p1, p2)]
def _follow_line(self, x, y, dx=0, dy=0, line_character=None, arrows=True):
"""helper function for all the line functions"""
# follow line in the given direction
while 0 <= x < self.width and 0<= y < self.height and self.get(x+dx, y+dy) == line_character:
x += dx
y += dy
if arrows:
# check for arrow head
following_character = self.get(x + dx, y + dy)
if following_character in self.ARROW_HEADS:
line_end_style = self.get_arrow(following_character, dx, dy)
if line_end_style:
x += dx
y += dy
else:
line_end_style = None
else:
line_end_style = None
return x, y, line_end_style
def _plus_joiner(self, x, y):
"""adjacent '+' signs are connected with a line from center to center
required for images like these:
+---+ The box should be closed on all sides
| +---> and the arrow start should touch the box
+---+
"""
result = []
#~ for dx, dy in ((1,0), (-1,0), (0,1), (0,-1)):
# looking right and down is sufficient as the scan is done from left to
# right, top to bottom
for dx, dy in ((1, 0), (0, 1)):
if self.get(x + dx, y + dy) == '+':
result.append(Line(
Point(self.hcenter(x), self.vcenter(y)),
Point(self.hcenter(x + dx), self.vcenter(y + dy))
))
self.tag([(x, y)], CLASS_JOIN)
return result
def _follow_fill(self, character, start_x, start_y):
"""fill shapes like the ones below with a pattern. when the character is
upper case, draw a border too.
XXX aaa BB
XXX a
"""
fill = self.get_fill(character.upper())
border = character.isupper()
result = []
# flood fill algorithm, searching for similar characters
coordinates = []
to_scan = [(start_x, start_y)]
while to_scan:
x, y = to_scan.pop()
if self.cls(x, y) is None:
if self.get(x, y) == character:
result.extend(fill(x, y))
self.tag([(x, y)], CLASS_RECTANGLE)
if self.get(x + 1, y) == character:
if self.cls(x + 1, y) is None:
to_scan.append((x + 1, y))
elif border:
result.append(Line(
Point(self.right(x), self.top(y)),
Point(self.right(x), self.bottom(y))))
if self.get(x - 1, y) == character:
if self.cls(x - 1, y) is None:
to_scan.append((x - 1, y))
elif border:
result.append(Line(
Point(self.left(x), self.top(y)),
Point(self.left(x), self.bottom(y))))
if self.get(x, y + 1) == character:
if self.cls(x, y + 1) is None:
to_scan.append((x, y + 1))
elif border:
result.append(Line(
Point(self.left(x), self.bottom(y)),
Point(self.right(x), self.bottom(y))))
if self.get(x, y - 1) == character:
if self.cls(x, y - 1) is None:
to_scan.append((x, y - 1))
elif border:
result.append(Line(
Point(self.left(x), self.top(y)),
Point(self.right(x), self.top(y))))
return group(result)
def _follow_horizontal_string(self, start_x, y, accept_anything=False, quoted=False):
"""find a string. may contain single spaces, but the detection is
aborted after more than one space.
Text one "Text two"
accept_anything means that all non space characters are interpreted
as text.
"""
# follow line from left to right
if quoted:
quotation_character = self.get(start_x, y)
x = start_x + 1
else:
quotation_character = None
x = start_x
text = []
if self.get(x, y) != ' ':
text.append(self.get(x, y))
self.tag([(x, y)], CLASS_STRING)
is_first_space = True
while 0 <= x + 1 < self.width and self.cls(x + 1, y) is None:
if not quoted:
if self.get(x + 1, y) == ' ' and not is_first_space:
break
if not accept_anything and not self.get(x + 1, y).isalnum():
break
x += 1
character = self.get(x, y)
if character == quotation_character:
self.tag([(x, y)], CLASS_STRING)
break
text.append(character)
if character == ' ':
is_first_space = False
else:
is_first_space = True
if text[-1] == ' ':
del text[-1]
x -= 1
self.tag([(x, y) for x in range(start_x, x + 1)], CLASS_STRING)
return [Label(
Point(self.left(start_x), self.bottom(y)),
''.join(text)
)]
else:
return []
def _follow_rounded_edge(self, x, y):
"""check for rounded edges:
/- | -\- | and also \ / etc.
| -/ | \- - |
"""
result = []
if self.get(x, y) == '/':
# rounded rectangles
if (self.get(x + 1, y) == '-' and self.get(x, y + 1) == '|'):
# upper left corner
result.append(Arc(
Point(self.hcenter(x), self.bottom(y)), 90,
Point(self.right(x), self.vcenter(y)), 180
))
if self.get(x - 1, y) == '-' and self.get(x, y - 1) == '|':
# lower right corner
result.append(Arc(
Point(self.hcenter(x), self.top(y)), -90,
Point(self.left(x), self.vcenter(y)), 0
))
if not result:
# if used as diagonal line
p1 = p2 = None
a1 = a2 = 0
arc = c1 = c2 = False
if self.get(x + 1, y - 1) == '|':
p1 = Point(self.hcenter(x + 1), self.top(y))
a1 = -90
arc = c1 = True
elif self.get(x + 1, y - 1) == '+':
p1 = Point(self.hcenter(x + 1), self.vcenter(y - 1))
a1 = -135
elif self.get(x + 1, y - 1) == '-':
p1 = Point(self.right(x), self.vcenter(y - 1))
a1 = 180
arc = c1 = True
elif self.get(x + 1, y - 1) == '/':
p1 = Point(self.right(x), self.top(y))
a1 = -135
c1 = True
elif self.get(x + 1, y) == '|':
p1 = Point(self.hcenter(x + 1), self.top(y))
elif self.get(x, y - 1) == '-':
p1 = Point(self.right(x), self.vcenter(y - 1))
if self.get(x - 1, y + 1) == '|':
p2 = Point(self.hcenter(x - 1), self.top(y + 1))
a2 = 90
arc = c2 = True
elif self.get(x - 1, y + 1) == '+':
p2 = Point(self.hcenter(x - 1), self.vcenter(y + 1))
a2 = 45
elif self.get(x - 1, y + 1) == '-':
p2 = Point(self.left(x), self.vcenter(y + 1))
a2 = 0
arc = c2 = True
elif self.get(x - 1, y + 1) == '/':
p2 = Point(self.left(x), self.bottom(y))
a2 = 45
c2 = True
elif self.get(x - 1, y) == '|':
p2 = Point(self.hcenter(x - 1), self.bottom(y))
elif self.get(x, y + 1) == '-':
p2 = Point(self.left(x), self.vcenter(y + 1))
if p1 or p2:
if not p1:
p1 = Point(self.right(x), self.top(y))
if not p2:
p2 = Point(self.left(x), self.bottom(y))
if arc:
result.append(Arc(p1, a1, p2, a2, c1, c2))
else:
result.append(Line(p1, p2))
else: # '\'
# rounded rectangles
if self.get(x-1, y) == '-' and self.get(x, y + 1) == '|':
# upper right corner
result.append(Arc(
Point(self.hcenter(x), self.bottom(y)), 90,
Point(self.left(x), self.vcenter(y)), 0
))
if self.get(x+1, y) == '-' and self.get(x, y - 1) == '|':
# lower left corner
result.append(Arc(
Point(self.hcenter(x), self.top(y)), -90,
Point(self.right(x), self.vcenter(y)), 180
))
if not result:
# if used as diagonal line
p1 = p2 = None
a1 = a2 = 0
arc = c1 = c2 = False
if self.get(x - 1, y - 1) == '|':
p1 = Point(self.hcenter(x-1), self.top(y))
a1 = -90
arc = c1 = True
elif self.get(x - 1, y - 1) == '+':
p1 = Point(self.hcenter(x-1), self.vcenter(y - 1))
a1 = -45
elif self.get(x - 1, y - 1) == '-':
p1 = Point(self.left(x), self.vcenter(y-1))
a1 = 0
arc = c1 = True
elif self.get(x - 1, y - 1) == '\\':
p1 = Point(self.left(x), self.top(y))
a1 = -45
c1 = True
elif self.get(x - 1, y) == '|':
p1 = Point(self.hcenter(x-1), self.top(y))
elif self.get(x, y - 1) == '-':
p1 = Point(self.left(x), self.hcenter(y - 1))
if self.get(x + 1, y + 1) == '|':
p2 = Point(self.hcenter(x+1), self.top(y + 1))
a2 = 90
arc = c2 = True
elif self.get(x + 1, y + 1) == '+':
p2 = Point(self.hcenter(x+1), self.vcenter(y + 1))
a2 = 135
elif self.get(x + 1, y + 1) == '-':
p2 = Point(self.right(x), self.vcenter(y + 1))
a2 = 180
arc = c2 = True
elif self.get(x + 1, y + 1) == '\\':
p2 = Point(self.right(x), self.bottom(y))
a2 = 135
c2 = True
elif self.get(x + 1, y) == '|':
p2 = Point(self.hcenter(x+1), self.bottom(y))
elif self.get(x, y + 1) == '-':
p2 = Point(self.right(x), self.vcenter(y + 1))
if p1 or p2:
if not p1:
p1 = Point(self.left(x), self.top(y))
if not p2:
p2 = Point(self.right(x), self.bottom(y))
if arc:
result.append(Arc(p1, a1, p2, a2, c1, c2))
else:
result.append(Line(p1, p2))
if result:
self.tag([(x, y)], CLASS_JOIN)
return group(result)
def process(input, visitor_class, options=None):
"""\
Parse input and render using the given visitor class.
:param input: String or file like object with the image as text.
:param visitor_class: A class object, it will be used to render the
resulting image.
:param options: A dictionary containing the settings. When ``None`` is
given, defaults are used.
:returns: instantiated ``visitor_class`` and the image has already been
processed with the visitor.
:exception: This function can raise an ``UnsupportedFormatError`` exception
if the specified format is not supported.
"""
# remember user options (don't want to rename function parameter above)
user_options = options
# start with a copy of the defaults
options = DEFAULT_OPTIONS.copy()
if user_options is not None:
# override with settings passed by caller
options.update(user_options)
if 'fill' not in options or options['fill'] is None:
options['fill'] = options['foreground']
# if input is a file like object, read from it (otherwise it is assumed to
# be a string)
if hasattr(input, 'read'):
input = input.read()
if options['debug']:
sys.stderr.write('%r\n' % (input,))
aaimg = AsciiArtImage(input, options['aspect'], options['textual'], options['widechars'])
if options['debug']:
sys.stderr.write('%s\n' % (aaimg,))
aaimg.recognize()
visitor = visitor_class(options)
visitor.visit_image(aaimg)
return visitor
def render(input, output=None, options=None):
"""
Render an ASCII art figure to a file or file-like.
:param input: If ``input`` is a basestring subclass (str or unicode), the
text contained in ``input`` is rendered. If ``input is a file-like
object, the text to render is taken using ``input.read()``.
:param output: If no ``output`` is specified, the resulting rendered image
is returned as a string. If output is a basestring subclass, a file
with the name of ``output`` contents is created and the rendered image
is saved there. If ``output`` is a file-like object, ``output.write()``
is used to save the rendered image.
:param options: A dictionary containing the settings. When ``None`` is
given, defaults are used.
:returns: This function returns a tuple ``(visitor, output)``, where
``visitor`` is visitor instance that rendered the image and ``output``
is the image as requested by the ``output`` parameter (a ``str`` if it
was ``None``, or a file-like object otherwise, which you should
``close()`` if needed).
:exception: This function can raise an ``UnsupportedFormatError`` exception
if the specified format is not supported.
"""
if options is None:
options = {}
close_output = False
if output is None:
import io
options['file_like'] = io.StringIO()
elif isinstance(output, str):
options['file_like'] = file(output, 'wb')
close_output = True
else:
options['file_like'] = output
try:
# late import of visitor classes to not cause any import errors for
# unsupported backends (this would happen when a library a backend
# depends on is not installed)
if options['format'].lower() == 'svg':
from . import svg
visitor_class = svg.SVGOutputVisitor
elif options['format'].lower() == 'pdf':
from . import pdf
visitor_class = pdf.PDFOutputVisitor
elif options['format'].lower() == 'ascii':
from . import aa
visitor_class = aa.AsciiOutputVisitor
else:
# for all other formats, it may be a bitmap type. let
# PIL decide if it can write a file of that type.
from . import pil
visitor_class = pil.PILOutputVisitor
# now render and output the image
visitor = process(input, visitor_class, options)
finally:
if close_output:
options['file_like'].close()
return (visitor, options['file_like'])
def main():
"""implement an useful main for use as command line program"""
import sys
import optparse
import os.path
parser = optparse.OptionParser(
usage = "%prog [options] [file]",
version = """\
%prog 0.5
Copyright (C) 2006-2010 aafigure-team
Redistribution and use in source and binary forms, with or without
modification, are permitted under the terms of the BSD License.
THIS SOFTWARE IS PROVIDED BY THE AAFIGURE-TEAM ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AAFIGURE-TEAM BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""",
description = "ASCII art to image (SVG, PNG, JPEG, PDF and more) converter."
)
parser.add_option("-e", "--encoding",
dest = "encoding",
action = "store",
help = "character encoding of input text",
default = DEFAULT_OPTIONS['encoding'],
)
parser.add_option("-w", "--wide-chars",
dest = "widechars",
action = "store",
help = "unicode properties to be treated as wide glyph (e.g. 'F,W,A')",
default = DEFAULT_OPTIONS['widechars'],
)
parser.add_option("-o", "--output",
dest = "output",
metavar = "FILE",
help = "write output to FILE"
)
parser.add_option("-t", "--type",
dest = "format",
help = "filetype: png, jpg, svg (by default autodetect from filename)",
default = None,
)
parser.add_option("-D", "--debug",
dest = "debug",
action = "store_true",
help = "enable debug outputs",
default = DEFAULT_OPTIONS['debug'],
)
parser.add_option("-T", "--textual",
dest = "textual",
action = "store_true",
help = "disable horizontal fill detection",
default = DEFAULT_OPTIONS['textual'],
)
parser.add_option("-s", "--scale",
dest = "scale",
action = "store",
type = 'float',
help = "set scale",
default = DEFAULT_OPTIONS['scale'],
)
parser.add_option("-a", "--aspect",
dest = "aspect",
action = "store",
type = 'float',
help = "set aspect ratio",
default = DEFAULT_OPTIONS['aspect'],
)
parser.add_option("-l", "--linewidth",
dest = "line_width",
action = "store",
type = 'float',
help = "set width, svg only",
default = DEFAULT_OPTIONS['line_width'],
)
parser.add_option("--proportional",
dest = "proportional",
action = "store_true",
help = "use proportional font instead of fixed width",
default = DEFAULT_OPTIONS['proportional'],
)
parser.add_option("-f", "--foreground",
dest = "foreground",
action = "store",
help = "foreground color default=%default",
default = DEFAULT_OPTIONS['foreground'],
)
parser.add_option("-x", "--fill",
dest = "fill",
action = "store",
help = "foreground color default=foreground",
default = None,
)
parser.add_option("-b", "--background",
dest = "background",
action = "store",
help = "foreground color default=%default",
default = DEFAULT_OPTIONS['background'],
)
parser.add_option("-O", "--option",
dest = "_extra_options",
action = "append",
help = "pass special options to backends (expert user)",
)
(options, args) = parser.parse_args()
if len(args) > 1:
parser.error("too many arguments")
if options.format is None:
if options.output is None:
parser.error("Please specify output format with --type")
else:
options.format = os.path.splitext(options.output)[1][1:]
if args:
_input = file(args[0])
else:
_input = sys.stdin
input = codecs.getreader(options.encoding)(_input)
if options.output is None:
output = sys.stdout
else:
output = file(options.output, 'wb')
# explicit copying of parameters to the options dictionary
options_dict = {}
for key in ('widechars', 'textual', 'proportional',
'line_width', 'aspect', 'scale',
'format', 'debug'):
options_dict[key] = getattr(options, key)
# ensure all color parameters start with a '#'
# this is for the convenience of the user as typing the shell comment
# character isn't for everyone ;-)
for color in ('foreground', 'background', 'fill'):
value = getattr(options, color)
if value is not None:
if value[0] != '#':
options_dict[color] = '#%s' % value
else:
options_dict[color] = value
# copy extra options
if options._extra_options is not None:
for keyvalue in options._extra_options:
try:
key, value = keyvalue.split('=')
except ValueError:
parser.error('--option must be in the format <key>=<value> (not %r)' % (keyvalue,))
options_dict[key] = value
if options.debug:
sys.stderr.write('options=%r\n' % (options_dict,))
try:
(visitor, output) = render(input, output, options_dict)
output.close()
except UnsupportedFormatError as e:
print("ERROR: Can't output format '%s': %s" % (options.format, e))
# when module is run, run the command line tool
if __name__ == '__main__':
main()
|
bsd-3-clause
|
Adel-Magebinary/odoo
|
addons/account/wizard/account_report_general_journal.py
|
378
|
1697
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_general_journal(osv.osv_memory):
_inherit = "account.common.journal.report"
_name = 'account.general.journal'
_description = 'Account General Journal'
_columns = {
'journal_ids': fields.many2many('account.journal', 'account_general_journal_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
def _print_report(self, cr, uid, ids, data, context=None):
data = self.pre_print_report(cr, uid, ids, data, context=context)
return self.pool['report'].get_action(cr, uid, [], 'account.report_generaljournal', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
jcoady9/python-for-android
|
python3-alpha/python3-src/Lib/encodings/iso2022_jp_1.py
|
816
|
1061
|
#
# iso2022_jp_1.py: Python Unicode Codec for ISO2022_JP_1
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_1')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_1',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
apache-2.0
|
StephenChusang/py-faster-rcnn-tracker
|
lib/datasetfactory/ILSVRC.py
|
1
|
5021
|
import os
import os.path as osp
import xml.etree.ElementTree as ET
import numpy as np
import scipy.sparse
import scipy.io as sio
# from utils.cython_bbox import bbox_overlaps
years = {'2013': '2013',
'2014': ['0000', '0001', '0002', '0003', '0004', '0005', '0006']}
name = 'ILSVRC'
_MAX_TRAIN_NUM = 20000
def prepare_train_text(dataset):
""" Prepare training text with xml annotation files.
"""
# First step: get images' parent directories
ann_root = osp.join(dataset, 'Annotations')
_root_2013 = name + years['2013'] + '_train'
_2013 = osp.join(ann_root, 'DET', 'train', _root_2013)
dirs_2013 = [osp.join('DET', 'train', _root_2013, dir) for dir in os.listdir(_2013)]
dirs_2014 = [osp.join('DET', 'train', name + '2014' + '_train_' + sub) for sub in years['2014']]
dirs = dirs_2013 + dirs_2014
# Second step: get all the xml file paths
xmls = []
for _dir in dirs:
this_xmls = [osp.join(_dir, xml) for xml in os.listdir(osp.join(ann_root, _dir))]
xmls += this_xmls
print 'There are {} xml files.'.format(len(xmls))
# Third step: parse xml files and assign class labels
# if 'sysnets.txt' exists, we skip this part since it is time-consuming
if not os.path.exists(osp.join(dataset, 'sysnets.txt')):
sysnets = open(osp.join(dataset, 'sysnets.txt'), 'wb')
classes = []
for xml in xmls:
filename = osp.join(ann_root, xml)
tree = ET.parse(filename)
objs = tree.findall('object')
for obj in objs:
objname = obj.find('name').text.strip()
if objname not in classes:
classes.append(objname)
classes.sort()
# insert __background__
classes.insert(0, '__background__')
for ind, _class in enumerate(classes):
sysnets.write(_class + ' ' + str(ind) + '\n')
sysnets.close()
else:
print 'sysnets.txt exists and skip building sysnets.txt'
# Fourth step: write train
train_txt = open(osp.join(dataset, 'train.txt'), 'wb')
xmls = np.random.permutation(xmls)[: _MAX_TRAIN_NUM]
for ix, xml in enumerate(xmls):
img_path = osp.splitext(xml)[0]
train_txt.write(img_path + '\n')
if (ix + 1) % 1000 == 0:
print 'Processed {} files'.format(ix + 1)
train_txt.close()
def load_annotation(num_classes, xml, class_indexes):
tree = ET.parse(xml)
objs = tree.findall('object')
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
cls = class_indexes[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': seg_areas}
def _load_data(dataset, class_indexes):
train_txt = osp.join(dataset, 'train.txt')
with open(train_txt, 'rb') as f:
train_datas = [train_data.strip('\n') for train_data in f.readlines()]
print 'Totally {} training files'.format(len(train_datas))
image = [osp.join(dataset, 'Data', train_data) + '.JPEG' for train_data in train_datas]
annotations = [osp.join(dataset, 'Annotations', train_data) + '.xml' for train_data in train_datas]
roidb = [load_annotation(len(class_indexes), xml, class_indexes) for xml in annotations]
# add image path to each entry
for ind, entry in enumerate(roidb):
entry['image'] = image[ind]
return roidb
def _load_class_labels(dataset):
sysnets = osp.join(dataset, 'sysnets.txt')
sysnets = open(sysnets, 'rb')
clabels = [clabel.strip('\n') for clabel in sysnets.readlines()]
class_labels = {}
class_indexes = {}
for clabel in clabels:
clabel = clabel.split()
class_labels[int(clabel[1])] = clabel[0]
class_indexes[clabel[0]] = int(clabel[1])
return class_labels, class_indexes
def ILSVRC_handler(dataset):
dataset = dataset['dataset']
class_labels, class_indexes = _load_class_labels(dataset)
roidb = _load_data(dataset, class_indexes)
return class_labels, roidb
if __name__ == '__main__':
dataset = osp.join('data', 'ILSVRC2015')
prepare_train_text(dataset)
|
mit
|
scott-maddox/fdint
|
scripts/gen_test_scfd.py
|
1
|
5280
|
# Copyright (c) 2015, Scott J Maddox. All rights reserved.
# Use of this source code is governed by the BSD-3-Clause
# license that can be found in the LICENSE file.
'''
Uses numerical integration to calculate accurate values to test against.
This should only be run after `python setup.py build_ext --inplace`.
'''
import os
import sys
import fdint
tests_dir = os.path.join(os.path.dirname(__file__), '../fdint/tests/')
import warnings
import numpy
from numpy import exp, sqrt
from scipy.integrate import quad
INV_SQRT_PI_2 = 1.1283791670955126 # 2/sqrt(pi)
def quad_nonparabolic(phi, alpha):
def func(x):
return sqrt(x*(1+alpha*x))*(1+2*alpha*x)/(1.+exp(x-phi))*INV_SQRT_PI_2
r = quad(func, 0, numpy.inf,epsabs=1e-300,epsrel=1e-13,limit=100)
return r[0], r[1]
# phis = numpy.array([-50,-3,-2,-1,0,1,4,5,7,10,15,20,30,40,50], dtype=float)
phis = numpy.linspace(-50, 50, 101, dtype=float)
def write_header(f, modname, dependencies=''):
f.write("""# Copyright (c) 2015, Scott J Maddox. All rights reserved.
# Use of this source code is governed by the BSD-3-Clause
# license that can be found in the LICENSE file.
# This file was generated by `scripts/gen_test_scfd.py`.
# Do not edit this file directly, or your changes will be lost.
'''
Tests the `{modname}` module.
'''
# Make sure we import the local package
import os
import sys
sys.path.insert(0,
os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
from fdint import {modname}
import unittest
import numpy
import warnings
""".format(modname=modname))
f.write(dependencies)
f.write('\n')
f.write('class Test_{modname}(unittest.TestCase):\n'
''.format(modname=modname.upper()))
f.write('''
def assert_rtol(self, a, b, rtol):
assert rtol >= 0
rerr = abs(a-b)/a
if rerr > rtol:
self.fail('Outside of relative tolerance of {}: {}'
''.format(rtol, rerr))
''')
f.write('''
def assert_all_rtol(self, a, b, rtol):
assert (rtol >= 0).all()
a = numpy.array(a)
b = numpy.array(b)
rtol = numpy.array(rtol)
rerr = abs(a-b)/a
if (rerr > rtol).all():
self.fail('Outside of relative tolerance of {}: {}'
''.format(rtol, rerr))
''')
##################
# Test scfd module
modname='scfd'
alphas = numpy.linspace(0., 0.15, 5)
fpath = os.path.join(tests_dir, 'test_{modname}.py'.format(modname=modname))
with open(fpath, 'w') as f:
mod = getattr(fdint, modname)
write_header(f, modname)
fname = 'nonparabolic'
# scalar
i = 0
for alpha in alphas:
for phi in phis:
i += 1
with warnings.catch_warnings():
warnings.simplefilter("ignore")
true_nu, aerr_est = quad_nonparabolic(phi, alpha)
nu = getattr(mod,'{fname}'.format(fname=fname))(phi, alpha)
aerr = abs(nu-true_nu)
rtol = max(abs(2*aerr/true_nu), abs(2*aerr_est/true_nu))
suppress_warnings = (rtol > 2e-7 or
alpha >= 0.075 and phi >= 10 or
alpha >= 0.15 and phi >= 5 or
phi >= 40)
# scalar
f.write('\n')
f.write(' def test_{fname}_{i}(self):\n'.format(fname=fname,i=i))
f.write(' phi = {}\n'.format(phi))
f.write(' alpha = {}\n'.format(alpha))
if suppress_warnings:
f.write(' with warnings.catch_warnings():\n')
f.write(' warnings.simplefilter("ignore")\n ')
f.write(' nu = {modname}.{fname}(phi, alpha)\n'
.format(modname=modname, fname=fname))
f.write(' true_nu = {}\n'
.format(true_nu))
f.write(' rtol = {:.0e}\n'.format(rtol))
f.write(' self.assert_rtol(nu, true_nu, rtol)\n')
# vector
f.write(' vphi = numpy.zeros(2); vphi.fill(phi)\n')
f.write(' valpha = numpy.zeros(2); valpha.fill(alpha)\n')
if suppress_warnings:
f.write(' with warnings.catch_warnings():\n')
f.write(' warnings.simplefilter("ignore")\n ')
f.write(' vnu = {modname}.{fname}(vphi, valpha)\n'
.format(modname=modname, fname=fname))
f.write(' vtrue_nu = numpy.zeros(2); vtrue_nu.fill(true_nu)\n')
f.write(' vrtol = numpy.zeros(2); vrtol.fill(rtol)\n')
f.write(' self.assert_all_rtol(vnu, vtrue_nu, vrtol)\n')
# buffered vector
if suppress_warnings:
f.write(' with warnings.catch_warnings():\n')
f.write(' warnings.simplefilter("ignore")\n ')
f.write(' vnu = numpy.zeros(2); {modname}.{fname}(vphi, valpha, vnu)\n'
.format(modname=modname, fname=fname))
f.write(' self.assert_all_rtol(vnu, vtrue_nu, vrtol)\n')
f.write('\n')
f.write('if __name__ == "__main__":\n')
f.write(' unittest.main()')
|
bsd-3-clause
|
abhijeet9920/python_project
|
develop/lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/codingstatemachine.py
|
2931
|
2318
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart
from .compat import wrap_ord
class CodingStateMachine:
def __init__(self, sm):
self._mModel = sm
self._mCurrentBytePos = 0
self._mCurrentCharLen = 0
self.reset()
def reset(self):
self._mCurrentState = eStart
def next_state(self, c):
# for each byte we get its class
# if it is first byte, we also get byte length
# PY3K: aBuf is a byte stream, so c is an int, not a byte
byteCls = self._mModel['classTable'][wrap_ord(c)]
if self._mCurrentState == eStart:
self._mCurrentBytePos = 0
self._mCurrentCharLen = self._mModel['charLenTable'][byteCls]
# from byte's class and stateTable, we get its next state
curr_state = (self._mCurrentState * self._mModel['classFactor']
+ byteCls)
self._mCurrentState = self._mModel['stateTable'][curr_state]
self._mCurrentBytePos += 1
return self._mCurrentState
def get_current_charlen(self):
return self._mCurrentCharLen
def get_coding_state_machine(self):
return self._mModel['name']
|
mit
|
Johnetordoff/osf.io
|
admin/osf_groups/views.py
|
6
|
2534
|
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.urls import reverse
from django.views.generic import FormView, ListView
from osf.models import OSFGroup
from admin.osf_groups.forms import OSFGroupSearchForm
from admin.base.views import GuidView
from admin.osf_groups.serializers import serialize_group
class OSFGroupsView(PermissionRequiredMixin, GuidView):
""" Allow authorized admin user to view an osf group
"""
template_name = 'osf_groups/osf_groups.html'
context_object_name = 'group'
permission_required = 'osf.view_group'
raise_exception = True
def get_object(self, queryset=None):
id = self.kwargs.get('id')
osf_group = OSFGroup.objects.get(_id=id)
return serialize_group(osf_group)
class OSFGroupsFormView(PermissionRequiredMixin, FormView):
template_name = 'osf_groups/search.html'
object_type = 'osf_group'
permission_required = 'osf.view_group'
raise_exception = True
form_class = OSFGroupSearchForm
def __init__(self):
self.redirect_url = None
super(OSFGroupsFormView, self).__init__()
def form_valid(self, form):
id = form.data.get('id').strip()
name = form.data.get('name').strip()
self.redirect_url = reverse('osf_groups:search')
if id:
self.redirect_url = reverse('osf_groups:osf_group', kwargs={'id': id})
elif name:
self.redirect_url = reverse('osf_groups:osf_groups_list',) + '?name={}'.format(name)
return super(OSFGroupsFormView, self).form_valid(form)
@property
def success_url(self):
return self.redirect_url
class OSFGroupsListView(PermissionRequiredMixin, ListView):
""" Allow authorized admin user to view list of osf groups
"""
template_name = 'osf_groups/osf_groups_list.html'
paginate_by = 10
paginate_orphans = 1
permission_required = 'osf.view_group'
raise_exception = True
def get_queryset(self):
name = self.request.GET.get('name')
if name:
return OSFGroup.objects.filter(name__icontains=name)
return OSFGroup.objects.all()
def get_context_data(self, **kwargs):
query_set = kwargs.pop('object_list', self.object_list)
page_size = self.get_paginate_by(query_set)
paginator, page, query_set, is_paginated = self.paginate_queryset(
query_set, page_size)
return {
'groups': list(map(serialize_group, query_set)),
'page': page,
}
|
apache-2.0
|
ahmadio/edx-platform
|
lms/djangoapps/certificates/api.py
|
23
|
14808
|
"""Certificates API
This is a Python API for generating certificates asynchronously.
Other Django apps should use the API functions defined in this module
rather than importing Django models directly.
"""
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from eventtracking import tracker
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from xmodule.modulestore.django import modulestore
from util.organizations_helpers import get_course_organizations
from certificates.models import (
CertificateStatuses,
certificate_status_for_student,
CertificateGenerationCourseSetting,
CertificateGenerationConfiguration,
ExampleCertificateSet,
GeneratedCertificate,
CertificateTemplate,
)
from certificates.queue import XQueueCertInterface
log = logging.getLogger("edx.certificate")
def get_certificates_for_user(username):
"""
Retrieve certificate information for a particular user.
Arguments:
username (unicode): The identifier of the user.
Returns: list
Example Usage:
>>> get_certificates_for_user("bob")
[
{
"username": "bob",
"course_key": "edX/DemoX/Demo_Course",
"type": "verified",
"status": "downloadable",
"download_url": "http://www.example.com/cert.pdf",
"grade": "0.98",
"created": 2015-07-31T00:00:00Z,
"modified": 2015-07-31T00:00:00Z
}
]
"""
return [
{
"username": username,
"course_key": cert.course_id,
"type": cert.mode,
"status": cert.status,
"grade": cert.grade,
"created": cert.created_date,
"modified": cert.modified_date,
# NOTE: the download URL is not currently being set for webview certificates.
# In the future, we can update this to construct a URL to the webview certificate
# for courses that have this feature enabled.
"download_url": (
cert.download_url
if cert.status == CertificateStatuses.downloadable
else None
),
}
for cert in GeneratedCertificate.objects.filter(user__username=username).order_by("course_id")
]
def generate_user_certificates(student, course_key, course=None, insecure=False, generation_mode='batch',
forced_grade=None):
"""
It will add the add-cert request into the xqueue.
A new record will be created to track the certificate
generation task. If an error occurs while adding the certificate
to the queue, the task will have status 'error'. It also emits
`edx.certificate.created` event for analytics.
Args:
student (User)
course_key (CourseKey)
Keyword Arguments:
course (Course): Optionally provide the course object; if not provided
it will be loaded.
insecure - (Boolean)
generation_mode - who has requested certificate generation. Its value should `batch`
in case of django command and `self` if student initiated the request.
forced_grade - a string indicating to replace grade parameter. if present grading
will be skipped.
"""
xqueue = XQueueCertInterface()
if insecure:
xqueue.use_https = False
generate_pdf = not has_html_certificates_enabled(course_key, course)
status, cert = xqueue.add_cert(student, course_key,
course=course,
generate_pdf=generate_pdf,
forced_grade=forced_grade)
if status in [CertificateStatuses.generating, CertificateStatuses.downloadable]:
emit_certificate_event('created', student, course_key, course, {
'user_id': student.id,
'course_id': unicode(course_key),
'certificate_id': cert.verify_uuid,
'enrollment_mode': cert.mode,
'generation_mode': generation_mode
})
return status
def regenerate_user_certificates(student, course_key, course=None,
forced_grade=None, template_file=None, insecure=False):
"""
It will add the regen-cert request into the xqueue.
A new record will be created to track the certificate
generation task. If an error occurs while adding the certificate
to the queue, the task will have status 'error'.
Args:
student (User)
course_key (CourseKey)
Keyword Arguments:
course (Course): Optionally provide the course object; if not provided
it will be loaded.
grade_value - The grade string, such as "Distinction"
template_file - The template file used to render this certificate
insecure - (Boolean)
"""
xqueue = XQueueCertInterface()
if insecure:
xqueue.use_https = False
generate_pdf = not has_html_certificates_enabled(course_key, course)
return xqueue.regen_cert(
student,
course_key,
course=course,
forced_grade=forced_grade,
template_file=template_file,
generate_pdf=generate_pdf
)
def certificate_downloadable_status(student, course_key):
"""
Check the student existing certificates against a given course.
if status is not generating and not downloadable or error then user can view the generate button.
Args:
student (user object): logged-in user
course_key (CourseKey): ID associated with the course
Returns:
Dict containing student passed status also download url for cert if available
"""
current_status = certificate_status_for_student(student, course_key)
# If the certificate status is an error user should view that status is "generating".
# On the back-end, need to monitor those errors and re-submit the task.
response_data = {
'is_downloadable': False,
'is_generating': True if current_status['status'] in [CertificateStatuses.generating,
CertificateStatuses.error] else False,
'download_url': None
}
if current_status['status'] == CertificateStatuses.downloadable:
response_data['is_downloadable'] = True
response_data['download_url'] = current_status['download_url']
return response_data
def set_cert_generation_enabled(course_key, is_enabled):
"""Enable or disable self-generated certificates for a course.
There are two "switches" that control whether self-generated certificates
are enabled for a course:
1) Whether the self-generated certificates feature is enabled.
2) Whether self-generated certificates have been enabled for this particular course.
The second flag should be enabled *only* when someone has successfully
generated example certificates for the course. This helps avoid
configuration errors (for example, not having a template configured
for the course installed on the workers). The UI for the instructor
dashboard enforces this constraint.
Arguments:
course_key (CourseKey): The course identifier.
Keyword Arguments:
is_enabled (boolean): If provided, enable/disable self-generated
certificates for this course.
"""
CertificateGenerationCourseSetting.set_enabled_for_course(course_key, is_enabled)
cert_event_type = 'enabled' if is_enabled else 'disabled'
event_name = '.'.join(['edx', 'certificate', 'generation', cert_event_type])
tracker.emit(event_name, {
'course_id': unicode(course_key),
})
if is_enabled:
log.info(u"Enabled self-generated certificates for course '%s'.", unicode(course_key))
else:
log.info(u"Disabled self-generated certificates for course '%s'.", unicode(course_key))
def cert_generation_enabled(course_key):
"""Check whether certificate generation is enabled for a course.
There are two "switches" that control whether self-generated certificates
are enabled for a course:
1) Whether the self-generated certificates feature is enabled.
2) Whether self-generated certificates have been enabled for this particular course.
Certificates are enabled for a course only when both switches
are set to True.
Arguments:
course_key (CourseKey): The course identifier.
Returns:
boolean: Whether self-generated certificates are enabled
for the course.
"""
return (
CertificateGenerationConfiguration.current().enabled and
CertificateGenerationCourseSetting.is_enabled_for_course(course_key)
)
def generate_example_certificates(course_key):
"""Generate example certificates for a course.
Example certificates are used to validate that certificates
are configured correctly for the course. Staff members can
view the example certificates before enabling
the self-generated certificates button for students.
Several example certificates may be generated for a course.
For example, if a course offers both verified and honor certificates,
examples of both types of certificate will be generated.
If an error occurs while starting the certificate generation
job, the errors will be recorded in the database and
can be retrieved using `example_certificate_status()`.
Arguments:
course_key (CourseKey): The course identifier.
Returns:
None
"""
xqueue = XQueueCertInterface()
for cert in ExampleCertificateSet.create_example_set(course_key):
xqueue.add_example_cert(cert)
def has_html_certificates_enabled(course_key, course=None):
"""
Determine if a course has html certificates enabled.
Arguments:
course_key (CourseKey|str): A course key or a string representation
of one.
course (CourseDescriptor|CourseOverview): A course.
"""
html_certificates_enabled = False
try:
if not isinstance(course_key, CourseKey):
course_key = CourseKey.from_string(course_key)
course = course if course else CourseOverview.get_from_id(course_key)
if settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False) and course.cert_html_view_enabled:
html_certificates_enabled = True
except: # pylint: disable=bare-except
pass
return html_certificates_enabled
def example_certificates_status(course_key):
"""Check the status of example certificates for a course.
This will check the *latest* example certificate task.
This is generally what we care about in terms of enabling/disabling
self-generated certificates for a course.
Arguments:
course_key (CourseKey): The course identifier.
Returns:
list
Example Usage:
>>> from certificates import api as certs_api
>>> certs_api.example_certificate_status(course_key)
[
{
'description': 'honor',
'status': 'success',
'download_url': 'http://www.example.com/abcd/honor_cert.pdf'
},
{
'description': 'verified',
'status': 'error',
'error_reason': 'No template found!'
}
]
"""
return ExampleCertificateSet.latest_status(course_key)
def get_certificate_url(user_id, course_id):
"""
:return certificate url
"""
url = ""
if settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False):
url = reverse(
'certificates:html_view',
kwargs={
"user_id": str(user_id),
"course_id": unicode(course_id),
}
)
else:
try:
if isinstance(course_id, basestring):
course_id = CourseKey.from_string(course_id)
user_certificate = GeneratedCertificate.objects.get(
user=user_id,
course_id=course_id
)
url = user_certificate.download_url
except GeneratedCertificate.DoesNotExist:
log.critical(
'Unable to lookup certificate\n'
'user id: %d\n'
'course: %s', user_id, unicode(course_id)
)
return url
def get_active_web_certificate(course, is_preview_mode=None):
"""
Retrieves the active web certificate configuration for the specified course
"""
certificates = getattr(course, 'certificates', '{}')
configurations = certificates.get('certificates', [])
for config in configurations:
if config.get('is_active') or is_preview_mode:
return config
return None
def get_certificate_template(course_key, mode):
"""
Retrieves the custom certificate template based on course_key and mode.
"""
org_id, template = None, None
# fetch organization of the course
course_organization = get_course_organizations(course_key)
if course_organization:
org_id = course_organization[0]['id']
if org_id and mode:
template = CertificateTemplate.objects.filter(
organization_id=org_id,
course_key=course_key,
mode=mode,
is_active=True
)
# if don't template find by org and mode
if not template and org_id and mode:
template = CertificateTemplate.objects.filter(
organization_id=org_id,
mode=mode,
is_active=True
)
# if don't template find by only org
if not template and org_id:
template = CertificateTemplate.objects.filter(
organization_id=org_id,
is_active=True
)
# if we still don't template find by only course mode
if not template and mode:
template = CertificateTemplate.objects.filter(
mode=mode,
is_active=True
)
return template[0].template if template else None
def emit_certificate_event(event_name, user, course_id, course=None, event_data=None):
"""
Emits certificate event.
"""
event_name = '.'.join(['edx', 'certificate', event_name])
if course is None:
course = modulestore().get_course(course_id, depth=0)
context = {
'org_id': course.org,
'course_id': unicode(course_id)
}
data = {
'user_id': user.id,
'course_id': unicode(course_id),
'certificate_url': get_certificate_url(user.id, course_id)
}
event_data = event_data or {}
event_data.update(data)
with tracker.get_tracker().context(event_name, context):
tracker.emit(event_name, event_data)
|
agpl-3.0
|
thezawad/flexx
|
make/copyright.py
|
21
|
2104
|
""" Update all copyright notices to the current year.
Does a search for a specific copyright notice of last year and replaces
it with a version for this year. Other copyright mentionings are listed,
but left unmodified.
If an argument is given, use that as the name of the copyright holder,
otherwise use the name specifief in `make/__init__.py`.
"""
import os
import time
from make import ROOT_DIR, NAME
def copyright(name=''):
# Initialize
if not name:
name = '%s Development Team' % NAME
TEMPLATE = "# Copyright (c) %i, %s."
CURYEAR = int(time.strftime('%Y'))
OLDTEXT = TEMPLATE % (CURYEAR - 1, name)
NEWTEXT = TEMPLATE % (CURYEAR, name)
# Initialize counts
count_ok, count_replaced = 0, 0
print('looking for: ' + OLDTEXT)
# Processing the whole root directory
for dirpath, dirnames, filenames in os.walk(ROOT_DIR):
# Check if we should skip this directory
reldirpath = os.path.relpath(dirpath, ROOT_DIR)
if reldirpath[0] in '._' or reldirpath.endswith('__pycache__'):
continue
if os.path.split(reldirpath)[0] in ('build', 'dist'):
continue
# Process files
for fname in filenames:
if not fname.endswith('.py'):
continue
# Open and check
filename = os.path.join(dirpath, fname)
text = open(filename, 'rt').read()
if NEWTEXT in text:
count_ok += 1
elif OLDTEXT in text:
text = text.replace(OLDTEXT, NEWTEXT)
open(filename, 'wt').write(text)
print(
' Update copyright year in %s/%s' %
(reldirpath, fname))
count_replaced += 1
elif 'copyright' in text[:200].lower():
print(
' Unknown copyright mentioned in %s/%s' %
(reldirpath, fname))
# Report
print('Replaced %i copyright statements' % count_replaced)
print('Found %i copyright statements up to date' % count_ok)
|
bsd-2-clause
|
EBTCoin/EBT
|
contrib/bitrpc/bitrpc.py
|
2348
|
7835
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
mit
|
ashutoshvt/psi4
|
psi4/driver/qmmm.py
|
7
|
5473
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with classes to integrate MM charges into
a QM calculation.
"""
from psi4.driver import *
class Diffuse(object):
def __init__(self, molecule, basisname, ribasisname):
self.molecule = molecule
self.basisname = basisname
self.ribasisname = ribasisname
self.basis = None
self.ribasis = None
self.da = None
self.Da = None
self.wfn = None
def __str__(self):
s = ' => Diffuse <=\n\n'
s = s + ' ' + str(self.molecule) + '\n'
s = s + ' ' + self.basisname + '\n'
s = s + ' ' + self.ribasisname + '\n'
s = s + '\n'
return s
def fitScf(self):
"""Function to run scf and fit a system of diffuse charges to
resulting density.
"""
basisChanged = core.has_option_changed("BASIS")
ribasisChanged = core.has_option_changed("DF_BASIS_SCF")
scftypeChanged = core.has_option_changed("SCF_TYPE")
basis = core.get_option("BASIS")
ribasis = core.get_option("DF_BASIS_SCF")
scftype = core.get_global_option("SCF_TYPE")
core.print_out(" => Diffuse SCF (Determines Da) <=\n\n")
core.set_global_option("BASIS", self.basisname)
core.set_global_option("DF_BASIS_SCF", self.ribasisname)
core.set_global_option("SCF_TYPE", "DF")
E, ref = energy('scf', return_wfn=True, molecule=self.molecule)
self.wfn = ref
core.print_out("\n")
self.fitGeneral()
core.clean()
core.set_global_option("BASIS", basis)
core.set_global_option("DF_BASIS_SCF", ribasis)
core.set_global_option("SCF_TYPE", scftype)
if not basisChanged:
core.revoke_option_changed("BASIS")
if not ribasisChanged:
core.revoke_option_changed("DF_BASIS_SCF")
if not scftypeChanged:
core.revoke_option_changed("SCF_TYPE")
def fitGeneral(self):
"""Function to perform a general fit of diffuse charges
to wavefunction density.
"""
core.print_out(" => Diffuse Charge Fitting (Determines da) <=\n\n")
self.Da = self.wfn.Da()
self.basis = self.wfn.basisset()
parser = core.Gaussian94BasisSetParser()
self.ribasis = core.BasisSet.construct(parser, self.molecule, "DF_BASIS_SCF")
fitter = core.DFChargeFitter()
fitter.setPrimary(self.basis)
fitter.setAuxiliary(self.ribasis)
fitter.setD(self.Da)
self.da = fitter.fit()
self.da.scale(2.0)
def populateExtern(self, extern):
# Electronic Part
extern.addBasis(self.ribasis, self.da)
# Nuclear Part
for A in range(0, self.molecule.natom()):
extern.addCharge(self.molecule.Z(A), self.molecule.x(A), self.molecule.y(A), self.molecule.z(A))
class QMMM(object):
def __init__(self):
self.charges = []
self.diffuses = []
self.extern = core.ExternalPotential()
def addDiffuse(self, diffuse):
"""Function to add a diffuse charge field *diffuse*."""
self.diffuses.append(diffuse)
def addChargeBohr(self, Q, x, y, z):
"""Function to add a point charge of magnitude *Q* at
position (*x*, *y*, *z*) Bohr.
"""
self.charges.append([Q, x, y, z])
def addChargeAngstrom(self, Q, x, y, z):
"""Function to add a point charge of magnitude *Q* at
position (*x*, *y*, *z*) Angstroms.
"""
self.charges.append([Q, x / constants.bohr2angstroms, y / constants.bohr2angstroms, z / constants.bohr2angstroms])
def __str__(self):
s = ' ==> QMMM <==\n\n'
s = s + ' => Charges (a.u.) <=\n\n'
s = s + ' %11s %11s %11s %11s\n' % ('Z', 'x', 'y', 'z')
for k in range(0, len(self.charges)):
s = s + ' %11.7f %11.3E %11.3E %11.3E\n' % (self.charges[k][0], self.charges[k][1], self.charges[k][2], self.charges[k][3])
s = s + '\n'
s = s + ' => Diffuses <=\n\n'
for k in range(0, len(self.diffuses)):
s = s + str(self.diffuses[k])
return s
def populateExtern(self):
"""Function to define a charge field external to the
molecule through point and diffuse charges.
"""
# Charges
for charge in self.charges:
self.extern.addCharge(charge[0], charge[1], charge[2], charge[3])
# Diffuses
for diffuse in self.diffuses:
diffuse.populateExtern(self.extern)
|
lgpl-3.0
|
wgcv/SWW-Crashphone
|
lib/python2.7/site-packages/django/conf/locale/mk/formats.py
|
43
|
1744
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y'
SHORT_DATETIME_FORMAT = 'j.m.Y H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59'
'%d. %m. %Y %H:%M:%S.%f', # '25. 10. 2006 14:30:59.000200'
'%d. %m. %Y %H:%M', # '25. 10. 2006 14:30'
'%d. %m. %Y', # '25. 10. 2006'
'%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59'
'%d. %m. %y %H:%M:%S.%f', # '25. 10. 06 14:30:59.000200'
'%d. %m. %y %H:%M', # '25. 10. 06 14:30'
'%d. %m. %y', # '25. 10. 06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
apache-2.0
|
vnpy/vnpy
|
vnpy/api/rohon/rohon_constant.py
|
5
|
39659
|
THOST_FTDC_EXP_Normal = '0'
THOST_FTDC_EXP_GenOrderByTrade = '1'
THOST_FTDC_ICT_EID = '0'
THOST_FTDC_ICT_IDCard = '1'
THOST_FTDC_ICT_OfficerIDCard = '2'
THOST_FTDC_ICT_PoliceIDCard = '3'
THOST_FTDC_ICT_SoldierIDCard = '4'
THOST_FTDC_ICT_HouseholdRegister = '5'
THOST_FTDC_ICT_Passport = '6'
THOST_FTDC_ICT_TaiwanCompatriotIDCard = '7'
THOST_FTDC_ICT_HomeComingCard = '8'
THOST_FTDC_ICT_LicenseNo = '9'
THOST_FTDC_ICT_TaxNo = 'A'
THOST_FTDC_ICT_HMMainlandTravelPermit = 'B'
THOST_FTDC_ICT_TwMainlandTravelPermit = 'C'
THOST_FTDC_ICT_DrivingLicense = 'D'
THOST_FTDC_ICT_SocialID = 'F'
THOST_FTDC_ICT_LocalID = 'G'
THOST_FTDC_ICT_BusinessRegistration = 'H'
THOST_FTDC_ICT_HKMCIDCard = 'I'
THOST_FTDC_ICT_AccountsPermits = 'J'
THOST_FTDC_ICT_FrgPrmtRdCard = 'K'
THOST_FTDC_ICT_CptMngPrdLetter = 'L'
THOST_FTDC_ICT_UniformSocialCreditCode = 'N'
THOST_FTDC_ICT_CorporationCertNo = 'O'
THOST_FTDC_ICT_OtherCard = 'x'
THOST_FTDC_IR_All = '1'
THOST_FTDC_IR_Group = '2'
THOST_FTDC_IR_Single = '3'
THOST_FTDC_DR_All = '1'
THOST_FTDC_DR_Group = '2'
THOST_FTDC_DR_Single = '3'
THOST_FTDC_DS_Asynchronous = '1'
THOST_FTDC_DS_Synchronizing = '2'
THOST_FTDC_DS_Synchronized = '3'
THOST_FTDC_BDS_Synchronized = '1'
THOST_FTDC_BDS_Synchronizing = '2'
THOST_FTDC_ECS_NoConnection = '1'
THOST_FTDC_ECS_QryInstrumentSent = '2'
THOST_FTDC_ECS_GotInformation = '9'
THOST_FTDC_TCS_NotConnected = '1'
THOST_FTDC_TCS_Connected = '2'
THOST_FTDC_TCS_QryInstrumentSent = '3'
THOST_FTDC_TCS_SubPrivateFlow = '4'
THOST_FTDC_FC_DataAsync = '1'
THOST_FTDC_FC_ForceUserLogout = '2'
THOST_FTDC_FC_UserPasswordUpdate = '3'
THOST_FTDC_FC_BrokerPasswordUpdate = '4'
THOST_FTDC_FC_InvestorPasswordUpdate = '5'
THOST_FTDC_FC_OrderInsert = '6'
THOST_FTDC_FC_OrderAction = '7'
THOST_FTDC_FC_SyncSystemData = '8'
THOST_FTDC_FC_SyncBrokerData = '9'
THOST_FTDC_FC_BachSyncBrokerData = 'A'
THOST_FTDC_FC_SuperQuery = 'B'
THOST_FTDC_FC_ParkedOrderInsert = 'C'
THOST_FTDC_FC_ParkedOrderAction = 'D'
THOST_FTDC_FC_SyncOTP = 'E'
THOST_FTDC_FC_DeleteOrder = 'F'
THOST_FTDC_BFC_ForceUserLogout = '1'
THOST_FTDC_BFC_UserPasswordUpdate = '2'
THOST_FTDC_BFC_SyncBrokerData = '3'
THOST_FTDC_BFC_BachSyncBrokerData = '4'
THOST_FTDC_BFC_OrderInsert = '5'
THOST_FTDC_BFC_OrderAction = '6'
THOST_FTDC_BFC_AllQuery = '7'
THOST_FTDC_BFC_log = 'a'
THOST_FTDC_BFC_BaseQry = 'b'
THOST_FTDC_BFC_TradeQry = 'c'
THOST_FTDC_BFC_Trade = 'd'
THOST_FTDC_BFC_Virement = 'e'
THOST_FTDC_BFC_Risk = 'f'
THOST_FTDC_BFC_Session = 'g'
THOST_FTDC_BFC_RiskNoticeCtl = 'h'
THOST_FTDC_BFC_RiskNotice = 'i'
THOST_FTDC_BFC_BrokerDeposit = 'j'
THOST_FTDC_BFC_QueryFund = 'k'
THOST_FTDC_BFC_QueryOrder = 'l'
THOST_FTDC_BFC_QueryTrade = 'm'
THOST_FTDC_BFC_QueryPosition = 'n'
THOST_FTDC_BFC_QueryMarketData = 'o'
THOST_FTDC_BFC_QueryUserEvent = 'p'
THOST_FTDC_BFC_QueryRiskNotify = 'q'
THOST_FTDC_BFC_QueryFundChange = 'r'
THOST_FTDC_BFC_QueryInvestor = 's'
THOST_FTDC_BFC_QueryTradingCode = 't'
THOST_FTDC_BFC_ForceClose = 'u'
THOST_FTDC_BFC_PressTest = 'v'
THOST_FTDC_BFC_RemainCalc = 'w'
THOST_FTDC_BFC_NetPositionInd = 'x'
THOST_FTDC_BFC_RiskPredict = 'y'
THOST_FTDC_BFC_DataExport = 'z'
THOST_FTDC_BFC_RiskTargetSetup = 'A'
THOST_FTDC_BFC_MarketDataWarn = 'B'
THOST_FTDC_BFC_QryBizNotice = 'C'
THOST_FTDC_BFC_CfgBizNotice = 'D'
THOST_FTDC_BFC_SyncOTP = 'E'
THOST_FTDC_BFC_SendBizNotice = 'F'
THOST_FTDC_BFC_CfgRiskLevelStd = 'G'
THOST_FTDC_BFC_TbCommand = 'H'
THOST_FTDC_BFC_DeleteOrder = 'J'
THOST_FTDC_BFC_ParkedOrderInsert = 'K'
THOST_FTDC_BFC_ParkedOrderAction = 'L'
THOST_FTDC_BFC_ExecOrderNoCheck = 'M'
THOST_FTDC_BFC_Designate = 'N'
THOST_FTDC_BFC_StockDisposal = 'O'
THOST_FTDC_BFC_BrokerDepositWarn = 'Q'
THOST_FTDC_BFC_CoverWarn = 'S'
THOST_FTDC_BFC_PreExecOrder = 'T'
THOST_FTDC_BFC_ExecOrderRisk = 'P'
THOST_FTDC_BFC_PosiLimitWarn = 'U'
THOST_FTDC_BFC_QryPosiLimit = 'V'
THOST_FTDC_BFC_FBSign = 'W'
THOST_FTDC_BFC_FBAccount = 'X'
THOST_FTDC_OAS_Submitted = 'a'
THOST_FTDC_OAS_Accepted = 'b'
THOST_FTDC_OAS_Rejected = 'c'
THOST_FTDC_OST_AllTraded = '0'
THOST_FTDC_OST_PartTradedQueueing = '1'
THOST_FTDC_OST_PartTradedNotQueueing = '2'
THOST_FTDC_OST_NoTradeQueueing = '3'
THOST_FTDC_OST_NoTradeNotQueueing = '4'
THOST_FTDC_OST_Canceled = '5'
THOST_FTDC_OST_Unknown = 'a'
THOST_FTDC_OST_NotTouched = 'b'
THOST_FTDC_OST_Touched = 'c'
THOST_FTDC_OSS_InsertSubmitted = '0'
THOST_FTDC_OSS_CancelSubmitted = '1'
THOST_FTDC_OSS_ModifySubmitted = '2'
THOST_FTDC_OSS_Accepted = '3'
THOST_FTDC_OSS_InsertRejected = '4'
THOST_FTDC_OSS_CancelRejected = '5'
THOST_FTDC_OSS_ModifyRejected = '6'
THOST_FTDC_PSD_Today = '1'
THOST_FTDC_PSD_History = '2'
THOST_FTDC_PDT_UseHistory = '1'
THOST_FTDC_PDT_NoUseHistory = '2'
THOST_FTDC_ER_Broker = '1'
THOST_FTDC_ER_Host = '2'
THOST_FTDC_ER_Maker = '3'
THOST_FTDC_PC_Futures = '1'
THOST_FTDC_PC_Options = '2'
THOST_FTDC_PC_Combination = '3'
THOST_FTDC_PC_Spot = '4'
THOST_FTDC_PC_EFP = '5'
THOST_FTDC_PC_SpotOption = '6'
THOST_FTDC_PC_TAS = '7'
THOST_FTDC_PC_MI = 'I'
THOST_FTDC_APC_FutureSingle = '1'
THOST_FTDC_APC_OptionSingle = '2'
THOST_FTDC_APC_Futures = '3'
THOST_FTDC_APC_Options = '4'
THOST_FTDC_APC_TradingComb = '5'
THOST_FTDC_APC_UnTradingComb = '6'
THOST_FTDC_APC_AllTrading = '7'
THOST_FTDC_APC_All = '8'
THOST_FTDC_IP_NotStart = '0'
THOST_FTDC_IP_Started = '1'
THOST_FTDC_IP_Pause = '2'
THOST_FTDC_IP_Expired = '3'
THOST_FTDC_D_Buy = '0'
THOST_FTDC_D_Sell = '1'
THOST_FTDC_PT_Net = '1'
THOST_FTDC_PT_Gross = '2'
THOST_FTDC_PD_Net = '1'
THOST_FTDC_PD_Long = '2'
THOST_FTDC_PD_Short = '3'
THOST_FTDC_SS_NonActive = '1'
THOST_FTDC_SS_Startup = '2'
THOST_FTDC_SS_Operating = '3'
THOST_FTDC_SS_Settlement = '4'
THOST_FTDC_SS_SettlementFinished = '5'
THOST_FTDC_RA_Trade = '0'
THOST_FTDC_RA_Settlement = '1'
THOST_FTDC_HF_Speculation = '1'
THOST_FTDC_HF_Arbitrage = '2'
THOST_FTDC_HF_Hedge = '3'
THOST_FTDC_HF_MarketMaker = '5'
THOST_FTDC_HF_SpecHedge = '6'
THOST_FTDC_HF_HedgeSpec = '7'
THOST_FTDC_BHF_Speculation = '1'
THOST_FTDC_BHF_Arbitrage = '2'
THOST_FTDC_BHF_Hedge = '3'
THOST_FTDC_CIDT_Speculation = '1'
THOST_FTDC_CIDT_Arbitrage = '2'
THOST_FTDC_CIDT_Hedge = '3'
THOST_FTDC_CIDT_MarketMaker = '5'
THOST_FTDC_OPT_AnyPrice = '1'
THOST_FTDC_OPT_LimitPrice = '2'
THOST_FTDC_OPT_BestPrice = '3'
THOST_FTDC_OPT_LastPrice = '4'
THOST_FTDC_OPT_LastPricePlusOneTicks = '5'
THOST_FTDC_OPT_LastPricePlusTwoTicks = '6'
THOST_FTDC_OPT_LastPricePlusThreeTicks = '7'
THOST_FTDC_OPT_AskPrice1 = '8'
THOST_FTDC_OPT_AskPrice1PlusOneTicks = '9'
THOST_FTDC_OPT_AskPrice1PlusTwoTicks = 'A'
THOST_FTDC_OPT_AskPrice1PlusThreeTicks = 'B'
THOST_FTDC_OPT_BidPrice1 = 'C'
THOST_FTDC_OPT_BidPrice1PlusOneTicks = 'D'
THOST_FTDC_OPT_BidPrice1PlusTwoTicks = 'E'
THOST_FTDC_OPT_BidPrice1PlusThreeTicks = 'F'
THOST_FTDC_OPT_FiveLevelPrice = 'G'
THOST_FTDC_OF_Open = '0'
THOST_FTDC_OF_Close = '1'
THOST_FTDC_OF_ForceClose = '2'
THOST_FTDC_OF_CloseToday = '3'
THOST_FTDC_OF_CloseYesterday = '4'
THOST_FTDC_OF_ForceOff = '5'
THOST_FTDC_OF_LocalForceClose = '6'
THOST_FTDC_FCC_NotForceClose = '0'
THOST_FTDC_FCC_LackDeposit = '1'
THOST_FTDC_FCC_ClientOverPositionLimit = '2'
THOST_FTDC_FCC_MemberOverPositionLimit = '3'
THOST_FTDC_FCC_NotMultiple = '4'
THOST_FTDC_FCC_Violation = '5'
THOST_FTDC_FCC_Other = '6'
THOST_FTDC_FCC_PersonDeliv = '7'
THOST_FTDC_ORDT_Normal = '0'
THOST_FTDC_ORDT_DeriveFromQuote = '1'
THOST_FTDC_ORDT_DeriveFromCombination = '2'
THOST_FTDC_ORDT_Combination = '3'
THOST_FTDC_ORDT_ConditionalOrder = '4'
THOST_FTDC_ORDT_Swap = '5'
THOST_FTDC_ORDT_DeriveFromBlockTrade = '6'
THOST_FTDC_ORDT_DeriveFromEFPTrade = '7'
THOST_FTDC_TC_IOC = '1'
THOST_FTDC_TC_GFS = '2'
THOST_FTDC_TC_GFD = '3'
THOST_FTDC_TC_GTD = '4'
THOST_FTDC_TC_GTC = '5'
THOST_FTDC_TC_GFA = '6'
THOST_FTDC_VC_AV = '1'
THOST_FTDC_VC_MV = '2'
THOST_FTDC_VC_CV = '3'
THOST_FTDC_CC_Immediately = '1'
THOST_FTDC_CC_Touch = '2'
THOST_FTDC_CC_TouchProfit = '3'
THOST_FTDC_CC_ParkedOrder = '4'
THOST_FTDC_CC_LastPriceGreaterThanStopPrice = '5'
THOST_FTDC_CC_LastPriceGreaterEqualStopPrice = '6'
THOST_FTDC_CC_LastPriceLesserThanStopPrice = '7'
THOST_FTDC_CC_LastPriceLesserEqualStopPrice = '8'
THOST_FTDC_CC_AskPriceGreaterThanStopPrice = '9'
THOST_FTDC_CC_AskPriceGreaterEqualStopPrice = 'A'
THOST_FTDC_CC_AskPriceLesserThanStopPrice = 'B'
THOST_FTDC_CC_AskPriceLesserEqualStopPrice = 'C'
THOST_FTDC_CC_BidPriceGreaterThanStopPrice = 'D'
THOST_FTDC_CC_BidPriceGreaterEqualStopPrice = 'E'
THOST_FTDC_CC_BidPriceLesserThanStopPrice = 'F'
THOST_FTDC_CC_BidPriceLesserEqualStopPrice = 'H'
THOST_FTDC_AF_Delete = '0'
THOST_FTDC_AF_Modify = '3'
THOST_FTDC_TR_Allow = '0'
THOST_FTDC_TR_CloseOnly = '1'
THOST_FTDC_TR_Forbidden = '2'
THOST_FTDC_OSRC_Participant = '0'
THOST_FTDC_OSRC_Administrator = '1'
THOST_FTDC_TRDT_SplitCombination = '#'
THOST_FTDC_TRDT_Common = '0'
THOST_FTDC_TRDT_OptionsExecution = '1'
THOST_FTDC_TRDT_OTC = '2'
THOST_FTDC_TRDT_EFPDerived = '3'
THOST_FTDC_TRDT_CombinationDerived = '4'
THOST_FTDC_TRDT_BlockTrade = '5'
THOST_FTDC_SPOST_Common = '#'
THOST_FTDC_SPOST_Tas = '0'
THOST_FTDC_PSRC_LastPrice = '0'
THOST_FTDC_PSRC_Buy = '1'
THOST_FTDC_PSRC_Sell = '2'
THOST_FTDC_PSRC_OTC = '3'
THOST_FTDC_IS_BeforeTrading = '0'
THOST_FTDC_IS_NoTrading = '1'
THOST_FTDC_IS_Continous = '2'
THOST_FTDC_IS_AuctionOrdering = '3'
THOST_FTDC_IS_AuctionBalance = '4'
THOST_FTDC_IS_AuctionMatch = '5'
THOST_FTDC_IS_Closed = '6'
THOST_FTDC_IER_Automatic = '1'
THOST_FTDC_IER_Manual = '2'
THOST_FTDC_IER_Fuse = '3'
THOST_FTDC_BS_NoUpload = '1'
THOST_FTDC_BS_Uploaded = '2'
THOST_FTDC_BS_Failed = '3'
THOST_FTDC_RS_All = '1'
THOST_FTDC_RS_ByProduct = '2'
THOST_FTDC_RP_ByVolume = '1'
THOST_FTDC_RP_ByFeeOnHand = '2'
THOST_FTDC_RL_Level1 = '1'
THOST_FTDC_RL_Level2 = '2'
THOST_FTDC_RL_Level3 = '3'
THOST_FTDC_RL_Level4 = '4'
THOST_FTDC_RL_Level5 = '5'
THOST_FTDC_RL_Level6 = '6'
THOST_FTDC_RL_Level7 = '7'
THOST_FTDC_RL_Level8 = '8'
THOST_FTDC_RL_Level9 = '9'
THOST_FTDC_RSD_ByPeriod = '1'
THOST_FTDC_RSD_ByStandard = '2'
THOST_FTDC_MT_Out = '0'
THOST_FTDC_MT_In = '1'
THOST_FTDC_ISPI_MortgageRatio = '4'
THOST_FTDC_ISPI_MarginWay = '5'
THOST_FTDC_ISPI_BillDeposit = '9'
THOST_FTDC_ESPI_MortgageRatio = '1'
THOST_FTDC_ESPI_OtherFundItem = '2'
THOST_FTDC_ESPI_OtherFundImport = '3'
THOST_FTDC_ESPI_CFFEXMinPrepa = '6'
THOST_FTDC_ESPI_CZCESettlementType = '7'
THOST_FTDC_ESPI_ExchDelivFeeMode = '9'
THOST_FTDC_ESPI_DelivFeeMode = '0'
THOST_FTDC_ESPI_CZCEComMarginType = 'A'
THOST_FTDC_ESPI_DceComMarginType = 'B'
THOST_FTDC_ESPI_OptOutDisCountRate = 'a'
THOST_FTDC_ESPI_OptMiniGuarantee = 'b'
THOST_FTDC_SPI_InvestorIDMinLength = '1'
THOST_FTDC_SPI_AccountIDMinLength = '2'
THOST_FTDC_SPI_UserRightLogon = '3'
THOST_FTDC_SPI_SettlementBillTrade = '4'
THOST_FTDC_SPI_TradingCode = '5'
THOST_FTDC_SPI_CheckFund = '6'
THOST_FTDC_SPI_CommModelRight = '7'
THOST_FTDC_SPI_MarginModelRight = '9'
THOST_FTDC_SPI_IsStandardActive = '8'
THOST_FTDC_SPI_UploadSettlementFile = 'U'
THOST_FTDC_SPI_DownloadCSRCFile = 'D'
THOST_FTDC_SPI_SettlementBillFile = 'S'
THOST_FTDC_SPI_CSRCOthersFile = 'C'
THOST_FTDC_SPI_InvestorPhoto = 'P'
THOST_FTDC_SPI_CSRCData = 'R'
THOST_FTDC_SPI_InvestorPwdModel = 'I'
THOST_FTDC_SPI_CFFEXInvestorSettleFile = 'F'
THOST_FTDC_SPI_InvestorIDType = 'a'
THOST_FTDC_SPI_FreezeMaxReMain = 'r'
THOST_FTDC_SPI_IsSync = 'A'
THOST_FTDC_SPI_RelieveOpenLimit = 'O'
THOST_FTDC_SPI_IsStandardFreeze = 'X'
THOST_FTDC_SPI_CZCENormalProductHedge = 'B'
THOST_FTDC_TPID_EncryptionStandard = 'E'
THOST_FTDC_TPID_RiskMode = 'R'
THOST_FTDC_TPID_RiskModeGlobal = 'G'
THOST_FTDC_TPID_modeEncode = 'P'
THOST_FTDC_TPID_tickMode = 'T'
THOST_FTDC_TPID_SingleUserSessionMaxNum = 'S'
THOST_FTDC_TPID_LoginFailMaxNum = 'L'
THOST_FTDC_TPID_IsAuthForce = 'A'
THOST_FTDC_TPID_IsPosiFreeze = 'F'
THOST_FTDC_TPID_IsPosiLimit = 'M'
THOST_FTDC_TPID_ForQuoteTimeInterval = 'Q'
THOST_FTDC_TPID_IsFuturePosiLimit = 'B'
THOST_FTDC_TPID_IsFutureOrderFreq = 'C'
THOST_FTDC_TPID_IsExecOrderProfit = 'H'
THOST_FTDC_TPID_IsCheckBankAcc = 'I'
THOST_FTDC_TPID_PasswordDeadLine = 'J'
THOST_FTDC_TPID_IsStrongPassword = 'K'
THOST_FTDC_TPID_BalanceMorgage = 'a'
THOST_FTDC_TPID_MinPwdLen = 'O'
THOST_FTDC_TPID_LoginFailMaxNumForIP = 'U'
THOST_FTDC_TPID_PasswordPeriod = 'V'
THOST_FTDC_FI_SettlementFund = 'F'
THOST_FTDC_FI_Trade = 'T'
THOST_FTDC_FI_InvestorPosition = 'P'
THOST_FTDC_FI_SubEntryFund = 'O'
THOST_FTDC_FI_CZCECombinationPos = 'C'
THOST_FTDC_FI_CSRCData = 'R'
THOST_FTDC_FI_CZCEClose = 'L'
THOST_FTDC_FI_CZCENoClose = 'N'
THOST_FTDC_FI_PositionDtl = 'D'
THOST_FTDC_FI_OptionStrike = 'S'
THOST_FTDC_FI_SettlementPriceComparison = 'M'
THOST_FTDC_FI_NonTradePosChange = 'B'
THOST_FTDC_FUT_Settlement = '0'
THOST_FTDC_FUT_Check = '1'
THOST_FTDC_FFT_Txt = '0'
THOST_FTDC_FFT_Zip = '1'
THOST_FTDC_FFT_DBF = '2'
THOST_FTDC_FUS_SucceedUpload = '1'
THOST_FTDC_FUS_FailedUpload = '2'
THOST_FTDC_FUS_SucceedLoad = '3'
THOST_FTDC_FUS_PartSucceedLoad = '4'
THOST_FTDC_FUS_FailedLoad = '5'
THOST_FTDC_TD_Out = '0'
THOST_FTDC_TD_In = '1'
THOST_FTDC_SC_NoSpecialRule = '0'
THOST_FTDC_SC_NoSpringFestival = '1'
THOST_FTDC_IPT_LastSettlement = '1'
THOST_FTDC_IPT_LaseClose = '2'
THOST_FTDC_PLP_Active = '1'
THOST_FTDC_PLP_NonActive = '2'
THOST_FTDC_PLP_Canceled = '3'
THOST_FTDC_DM_CashDeliv = '1'
THOST_FTDC_DM_CommodityDeliv = '2'
THOST_FTDC_FIOT_FundIO = '1'
THOST_FTDC_FIOT_Transfer = '2'
THOST_FTDC_FIOT_SwapCurrency = '3'
THOST_FTDC_FT_Deposite = '1'
THOST_FTDC_FT_ItemFund = '2'
THOST_FTDC_FT_Company = '3'
THOST_FTDC_FT_InnerTransfer = '4'
THOST_FTDC_FD_In = '1'
THOST_FTDC_FD_Out = '2'
THOST_FTDC_FS_Record = '1'
THOST_FTDC_FS_Check = '2'
THOST_FTDC_FS_Charge = '3'
THOST_FTDC_PS_None = '1'
THOST_FTDC_PS_Publishing = '2'
THOST_FTDC_PS_Published = '3'
THOST_FTDC_ES_NonActive = '1'
THOST_FTDC_ES_Startup = '2'
THOST_FTDC_ES_Initialize = '3'
THOST_FTDC_ES_Initialized = '4'
THOST_FTDC_ES_Close = '5'
THOST_FTDC_ES_Closed = '6'
THOST_FTDC_ES_Settlement = '7'
THOST_FTDC_STS_Initialize = '0'
THOST_FTDC_STS_Settlementing = '1'
THOST_FTDC_STS_Settlemented = '2'
THOST_FTDC_STS_Finished = '3'
THOST_FTDC_CT_Person = '0'
THOST_FTDC_CT_Company = '1'
THOST_FTDC_CT_Fund = '2'
THOST_FTDC_CT_SpecialOrgan = '3'
THOST_FTDC_CT_Asset = '4'
THOST_FTDC_BT_Trade = '0'
THOST_FTDC_BT_TradeSettle = '1'
THOST_FTDC_FAS_Low = '1'
THOST_FTDC_FAS_Normal = '2'
THOST_FTDC_FAS_Focus = '3'
THOST_FTDC_FAS_Risk = '4'
THOST_FTDC_FAS_ByTrade = '1'
THOST_FTDC_FAS_ByDeliv = '2'
THOST_FTDC_FAS_None = '3'
THOST_FTDC_FAS_FixFee = '4'
THOST_FTDC_PWDT_Trade = '1'
THOST_FTDC_PWDT_Account = '2'
THOST_FTDC_AG_All = '1'
THOST_FTDC_AG_OnlyLost = '2'
THOST_FTDC_AG_OnlyGain = '3'
THOST_FTDC_AG_None = '4'
THOST_FTDC_ICP_Include = '0'
THOST_FTDC_ICP_NotInclude = '2'
THOST_FTDC_AWT_Enable = '0'
THOST_FTDC_AWT_Disable = '2'
THOST_FTDC_AWT_NoHoldEnable = '3'
THOST_FTDC_FPWD_UnCheck = '0'
THOST_FTDC_FPWD_Check = '1'
THOST_FTDC_TT_BankToFuture = '0'
THOST_FTDC_TT_FutureToBank = '1'
THOST_FTDC_TVF_Invalid = '0'
THOST_FTDC_TVF_Valid = '1'
THOST_FTDC_TVF_Reverse = '2'
THOST_FTDC_RN_CD = '0'
THOST_FTDC_RN_ZT = '1'
THOST_FTDC_RN_QT = '2'
THOST_FTDC_SEX_None = '0'
THOST_FTDC_SEX_Man = '1'
THOST_FTDC_SEX_Woman = '2'
THOST_FTDC_UT_Investor = '0'
THOST_FTDC_UT_Operator = '1'
THOST_FTDC_UT_SuperUser = '2'
THOST_FTDC_RATETYPE_MarginRate = '2'
THOST_FTDC_NOTETYPE_TradeSettleBill = '1'
THOST_FTDC_NOTETYPE_TradeSettleMonth = '2'
THOST_FTDC_NOTETYPE_CallMarginNotes = '3'
THOST_FTDC_NOTETYPE_ForceCloseNotes = '4'
THOST_FTDC_NOTETYPE_TradeNotes = '5'
THOST_FTDC_NOTETYPE_DelivNotes = '6'
THOST_FTDC_SBS_Day = '1'
THOST_FTDC_SBS_Volume = '2'
THOST_FTDC_ST_Day = '0'
THOST_FTDC_ST_Month = '1'
THOST_FTDC_URT_Logon = '1'
THOST_FTDC_URT_Transfer = '2'
THOST_FTDC_URT_EMail = '3'
THOST_FTDC_URT_Fax = '4'
THOST_FTDC_URT_ConditionOrder = '5'
THOST_FTDC_MPT_PreSettlementPrice = '1'
THOST_FTDC_MPT_SettlementPrice = '2'
THOST_FTDC_MPT_AveragePrice = '3'
THOST_FTDC_MPT_OpenPrice = '4'
THOST_FTDC_BGS_None = '0'
THOST_FTDC_BGS_NoGenerated = '1'
THOST_FTDC_BGS_Generated = '2'
THOST_FTDC_AT_HandlePositionAlgo = '1'
THOST_FTDC_AT_FindMarginRateAlgo = '2'
THOST_FTDC_HPA_Base = '1'
THOST_FTDC_HPA_DCE = '2'
THOST_FTDC_HPA_CZCE = '3'
THOST_FTDC_FMRA_Base = '1'
THOST_FTDC_FMRA_DCE = '2'
THOST_FTDC_FMRA_CZCE = '3'
THOST_FTDC_HTAA_Base = '1'
THOST_FTDC_HTAA_DCE = '2'
THOST_FTDC_HTAA_CZCE = '3'
THOST_FTDC_PST_Order = '1'
THOST_FTDC_PST_Open = '2'
THOST_FTDC_PST_Fund = '3'
THOST_FTDC_PST_Settlement = '4'
THOST_FTDC_PST_Company = '5'
THOST_FTDC_PST_Corporation = '6'
THOST_FTDC_PST_LinkMan = '7'
THOST_FTDC_PST_Ledger = '8'
THOST_FTDC_PST_Trustee = '9'
THOST_FTDC_PST_TrusteeCorporation = 'A'
THOST_FTDC_PST_TrusteeOpen = 'B'
THOST_FTDC_PST_TrusteeContact = 'C'
THOST_FTDC_PST_ForeignerRefer = 'D'
THOST_FTDC_PST_CorporationRefer = 'E'
THOST_FTDC_QIR_All = '1'
THOST_FTDC_QIR_Group = '2'
THOST_FTDC_QIR_Single = '3'
THOST_FTDC_IRS_Normal = '1'
THOST_FTDC_IRS_Warn = '2'
THOST_FTDC_IRS_Call = '3'
THOST_FTDC_IRS_Force = '4'
THOST_FTDC_IRS_Exception = '5'
THOST_FTDC_UET_Login = '1'
THOST_FTDC_UET_Logout = '2'
THOST_FTDC_UET_Trading = '3'
THOST_FTDC_UET_TradingError = '4'
THOST_FTDC_UET_UpdatePassword = '5'
THOST_FTDC_UET_Authenticate = '6'
THOST_FTDC_UET_SubmitSysInfo = '7'
THOST_FTDC_UET_Transfer = '8'
THOST_FTDC_UET_Other = '9'
THOST_FTDC_ICS_Close = '0'
THOST_FTDC_ICS_CloseToday = '1'
THOST_FTDC_SM_Non = '0'
THOST_FTDC_SM_Instrument = '1'
THOST_FTDC_SM_Product = '2'
THOST_FTDC_SM_Investor = '3'
THOST_FTDC_PAOS_NotSend = '1'
THOST_FTDC_PAOS_Send = '2'
THOST_FTDC_PAOS_Deleted = '3'
THOST_FTDC_VDS_Dealing = '1'
THOST_FTDC_VDS_DeaclSucceed = '2'
THOST_FTDC_ORGS_Standard = '0'
THOST_FTDC_ORGS_ESunny = '1'
THOST_FTDC_ORGS_KingStarV6 = '2'
THOST_FTDC_VTS_NaturalDeal = '0'
THOST_FTDC_VTS_SucceedEnd = '1'
THOST_FTDC_VTS_FailedEND = '2'
THOST_FTDC_VTS_Exception = '3'
THOST_FTDC_VTS_ManualDeal = '4'
THOST_FTDC_VTS_MesException = '5'
THOST_FTDC_VTS_SysException = '6'
THOST_FTDC_VBAT_BankBook = '1'
THOST_FTDC_VBAT_BankCard = '2'
THOST_FTDC_VBAT_CreditCard = '3'
THOST_FTDC_VMS_Natural = '0'
THOST_FTDC_VMS_Canceled = '9'
THOST_FTDC_VAA_NoAvailAbility = '0'
THOST_FTDC_VAA_AvailAbility = '1'
THOST_FTDC_VAA_Repeal = '2'
THOST_FTDC_VTC_BankBankToFuture = '102001'
THOST_FTDC_VTC_BankFutureToBank = '102002'
THOST_FTDC_VTC_FutureBankToFuture = '202001'
THOST_FTDC_VTC_FutureFutureToBank = '202002'
THOST_FTDC_GEN_Program = '0'
THOST_FTDC_GEN_HandWork = '1'
THOST_FTDC_CFMMCKK_REQUEST = 'R'
THOST_FTDC_CFMMCKK_AUTO = 'A'
THOST_FTDC_CFMMCKK_MANUAL = 'M'
THOST_FTDC_CFT_IDCard = '0'
THOST_FTDC_CFT_Passport = '1'
THOST_FTDC_CFT_OfficerIDCard = '2'
THOST_FTDC_CFT_SoldierIDCard = '3'
THOST_FTDC_CFT_HomeComingCard = '4'
THOST_FTDC_CFT_HouseholdRegister = '5'
THOST_FTDC_CFT_LicenseNo = '6'
THOST_FTDC_CFT_InstitutionCodeCard = '7'
THOST_FTDC_CFT_TempLicenseNo = '8'
THOST_FTDC_CFT_NoEnterpriseLicenseNo = '9'
THOST_FTDC_CFT_OtherCard = 'x'
THOST_FTDC_CFT_SuperDepAgree = 'a'
THOST_FTDC_FBC_Others = '0'
THOST_FTDC_FBC_TransferDetails = '1'
THOST_FTDC_FBC_CustAccStatus = '2'
THOST_FTDC_FBC_AccountTradeDetails = '3'
THOST_FTDC_FBC_FutureAccountChangeInfoDetails = '4'
THOST_FTDC_FBC_CustMoneyDetail = '5'
THOST_FTDC_FBC_CustCancelAccountInfo = '6'
THOST_FTDC_FBC_CustMoneyResult = '7'
THOST_FTDC_FBC_OthersExceptionResult = '8'
THOST_FTDC_FBC_CustInterestNetMoneyDetails = '9'
THOST_FTDC_FBC_CustMoneySendAndReceiveDetails = 'a'
THOST_FTDC_FBC_CorporationMoneyTotal = 'b'
THOST_FTDC_FBC_MainbodyMoneyTotal = 'c'
THOST_FTDC_FBC_MainPartMonitorData = 'd'
THOST_FTDC_FBC_PreparationMoney = 'e'
THOST_FTDC_FBC_BankMoneyMonitorData = 'f'
THOST_FTDC_CEC_Exchange = '1'
THOST_FTDC_CEC_Cash = '2'
THOST_FTDC_YNI_Yes = '0'
THOST_FTDC_YNI_No = '1'
THOST_FTDC_BLT_CurrentMoney = '0'
THOST_FTDC_BLT_UsableMoney = '1'
THOST_FTDC_BLT_FetchableMoney = '2'
THOST_FTDC_BLT_FreezeMoney = '3'
THOST_FTDC_GD_Unknown = '0'
THOST_FTDC_GD_Male = '1'
THOST_FTDC_GD_Female = '2'
THOST_FTDC_FPF_BEN = '0'
THOST_FTDC_FPF_OUR = '1'
THOST_FTDC_FPF_SHA = '2'
THOST_FTDC_PWKT_ExchangeKey = '0'
THOST_FTDC_PWKT_PassWordKey = '1'
THOST_FTDC_PWKT_MACKey = '2'
THOST_FTDC_PWKT_MessageKey = '3'
THOST_FTDC_PWT_Query = '0'
THOST_FTDC_PWT_Fetch = '1'
THOST_FTDC_PWT_Transfer = '2'
THOST_FTDC_PWT_Trade = '3'
THOST_FTDC_EM_NoEncry = '0'
THOST_FTDC_EM_DES = '1'
THOST_FTDC_EM_3DES = '2'
THOST_FTDC_BRF_BankNotNeedRepeal = '0'
THOST_FTDC_BRF_BankWaitingRepeal = '1'
THOST_FTDC_BRF_BankBeenRepealed = '2'
THOST_FTDC_BRORF_BrokerNotNeedRepeal = '0'
THOST_FTDC_BRORF_BrokerWaitingRepeal = '1'
THOST_FTDC_BRORF_BrokerBeenRepealed = '2'
THOST_FTDC_TS_Bank = '0'
THOST_FTDC_TS_Future = '1'
THOST_FTDC_TS_Store = '2'
THOST_FTDC_LF_Yes = '0'
THOST_FTDC_LF_No = '1'
THOST_FTDC_BAS_Normal = '0'
THOST_FTDC_BAS_Freeze = '1'
THOST_FTDC_BAS_ReportLoss = '2'
THOST_FTDC_MAS_Normal = '0'
THOST_FTDC_MAS_Cancel = '1'
THOST_FTDC_MSS_Point = '0'
THOST_FTDC_MSS_PrePoint = '1'
THOST_FTDC_MSS_CancelPoint = '2'
THOST_FTDC_SYT_FutureBankTransfer = '0'
THOST_FTDC_SYT_StockBankTransfer = '1'
THOST_FTDC_SYT_TheThirdPartStore = '2'
THOST_FTDC_TEF_NormalProcessing = '0'
THOST_FTDC_TEF_Success = '1'
THOST_FTDC_TEF_Failed = '2'
THOST_FTDC_TEF_Abnormal = '3'
THOST_FTDC_TEF_ManualProcessedForException = '4'
THOST_FTDC_TEF_CommuFailedNeedManualProcess = '5'
THOST_FTDC_TEF_SysErrorNeedManualProcess = '6'
THOST_FTDC_PSS_NotProcess = '0'
THOST_FTDC_PSS_StartProcess = '1'
THOST_FTDC_PSS_Finished = '2'
THOST_FTDC_CUSTT_Person = '0'
THOST_FTDC_CUSTT_Institution = '1'
THOST_FTDC_FBTTD_FromBankToFuture = '1'
THOST_FTDC_FBTTD_FromFutureToBank = '2'
THOST_FTDC_OOD_Open = '1'
THOST_FTDC_OOD_Destroy = '0'
THOST_FTDC_AVAF_Invalid = '0'
THOST_FTDC_AVAF_Valid = '1'
THOST_FTDC_AVAF_Repeal = '2'
THOST_FTDC_OT_Bank = '1'
THOST_FTDC_OT_Future = '2'
THOST_FTDC_OT_PlateForm = '9'
THOST_FTDC_OL_HeadQuarters = '1'
THOST_FTDC_OL_Branch = '2'
THOST_FTDC_PID_FutureProtocal = '0'
THOST_FTDC_PID_ICBCProtocal = '1'
THOST_FTDC_PID_ABCProtocal = '2'
THOST_FTDC_PID_CBCProtocal = '3'
THOST_FTDC_PID_CCBProtocal = '4'
THOST_FTDC_PID_BOCOMProtocal = '5'
THOST_FTDC_PID_FBTPlateFormProtocal = 'X'
THOST_FTDC_CM_ShortConnect = '0'
THOST_FTDC_CM_LongConnect = '1'
THOST_FTDC_SRM_ASync = '0'
THOST_FTDC_SRM_Sync = '1'
THOST_FTDC_BAT_BankBook = '1'
THOST_FTDC_BAT_SavingCard = '2'
THOST_FTDC_BAT_CreditCard = '3'
THOST_FTDC_FAT_BankBook = '1'
THOST_FTDC_FAT_SavingCard = '2'
THOST_FTDC_FAT_CreditCard = '3'
THOST_FTDC_OS_Ready = '0'
THOST_FTDC_OS_CheckIn = '1'
THOST_FTDC_OS_CheckOut = '2'
THOST_FTDC_OS_CheckFileArrived = '3'
THOST_FTDC_OS_CheckDetail = '4'
THOST_FTDC_OS_DayEndClean = '5'
THOST_FTDC_OS_Invalid = '9'
THOST_FTDC_CCBFM_ByAmount = '1'
THOST_FTDC_CCBFM_ByMonth = '2'
THOST_FTDC_CAPIT_Client = '1'
THOST_FTDC_CAPIT_Server = '2'
THOST_FTDC_CAPIT_UserApi = '3'
THOST_FTDC_LS_Connected = '1'
THOST_FTDC_LS_Disconnected = '2'
THOST_FTDC_BPWDF_NoCheck = '0'
THOST_FTDC_BPWDF_BlankCheck = '1'
THOST_FTDC_BPWDF_EncryptCheck = '2'
THOST_FTDC_SAT_AccountID = '1'
THOST_FTDC_SAT_CardID = '2'
THOST_FTDC_SAT_SHStockholderID = '3'
THOST_FTDC_SAT_SZStockholderID = '4'
THOST_FTDC_TRFS_Normal = '0'
THOST_FTDC_TRFS_Repealed = '1'
THOST_FTDC_SPTYPE_Broker = '0'
THOST_FTDC_SPTYPE_Bank = '1'
THOST_FTDC_REQRSP_Request = '0'
THOST_FTDC_REQRSP_Response = '1'
THOST_FTDC_FBTUET_SignIn = '0'
THOST_FTDC_FBTUET_FromBankToFuture = '1'
THOST_FTDC_FBTUET_FromFutureToBank = '2'
THOST_FTDC_FBTUET_OpenAccount = '3'
THOST_FTDC_FBTUET_CancelAccount = '4'
THOST_FTDC_FBTUET_ChangeAccount = '5'
THOST_FTDC_FBTUET_RepealFromBankToFuture = '6'
THOST_FTDC_FBTUET_RepealFromFutureToBank = '7'
THOST_FTDC_FBTUET_QueryBankAccount = '8'
THOST_FTDC_FBTUET_QueryFutureAccount = '9'
THOST_FTDC_FBTUET_SignOut = 'A'
THOST_FTDC_FBTUET_SyncKey = 'B'
THOST_FTDC_FBTUET_ReserveOpenAccount = 'C'
THOST_FTDC_FBTUET_CancelReserveOpenAccount = 'D'
THOST_FTDC_FBTUET_ReserveOpenAccountConfirm = 'E'
THOST_FTDC_FBTUET_Other = 'Z'
THOST_FTDC_DBOP_Insert = '0'
THOST_FTDC_DBOP_Update = '1'
THOST_FTDC_DBOP_Delete = '2'
THOST_FTDC_SYNF_Yes = '0'
THOST_FTDC_SYNF_No = '1'
THOST_FTDC_SYNT_OneOffSync = '0'
THOST_FTDC_SYNT_TimerSync = '1'
THOST_FTDC_SYNT_TimerFullSync = '2'
THOST_FTDC_FBEDIR_Settlement = '0'
THOST_FTDC_FBEDIR_Sale = '1'
THOST_FTDC_FBERES_Success = '0'
THOST_FTDC_FBERES_InsufficientBalance = '1'
THOST_FTDC_FBERES_UnknownTrading = '8'
THOST_FTDC_FBERES_Fail = 'x'
THOST_FTDC_FBEES_Normal = '0'
THOST_FTDC_FBEES_ReExchange = '1'
THOST_FTDC_FBEFG_DataPackage = '0'
THOST_FTDC_FBEFG_File = '1'
THOST_FTDC_FBEAT_NotTrade = '0'
THOST_FTDC_FBEAT_Trade = '1'
THOST_FTDC_FBEUET_SignIn = '0'
THOST_FTDC_FBEUET_Exchange = '1'
THOST_FTDC_FBEUET_ReExchange = '2'
THOST_FTDC_FBEUET_QueryBankAccount = '3'
THOST_FTDC_FBEUET_QueryExchDetial = '4'
THOST_FTDC_FBEUET_QueryExchSummary = '5'
THOST_FTDC_FBEUET_QueryExchRate = '6'
THOST_FTDC_FBEUET_CheckBankAccount = '7'
THOST_FTDC_FBEUET_SignOut = '8'
THOST_FTDC_FBEUET_Other = 'Z'
THOST_FTDC_FBERF_UnProcessed = '0'
THOST_FTDC_FBERF_WaitSend = '1'
THOST_FTDC_FBERF_SendSuccess = '2'
THOST_FTDC_FBERF_SendFailed = '3'
THOST_FTDC_FBERF_WaitReSend = '4'
THOST_FTDC_NC_NOERROR = '0'
THOST_FTDC_NC_Warn = '1'
THOST_FTDC_NC_Call = '2'
THOST_FTDC_NC_Force = '3'
THOST_FTDC_NC_CHUANCANG = '4'
THOST_FTDC_NC_Exception = '5'
THOST_FTDC_FCT_Manual = '0'
THOST_FTDC_FCT_Single = '1'
THOST_FTDC_FCT_Group = '2'
THOST_FTDC_RNM_System = '0'
THOST_FTDC_RNM_SMS = '1'
THOST_FTDC_RNM_EMail = '2'
THOST_FTDC_RNM_Manual = '3'
THOST_FTDC_RNS_NotGen = '0'
THOST_FTDC_RNS_Generated = '1'
THOST_FTDC_RNS_SendError = '2'
THOST_FTDC_RNS_SendOk = '3'
THOST_FTDC_RNS_Received = '4'
THOST_FTDC_RNS_Confirmed = '5'
THOST_FTDC_RUE_ExportData = '0'
THOST_FTDC_COST_LastPriceAsc = '0'
THOST_FTDC_COST_LastPriceDesc = '1'
THOST_FTDC_COST_AskPriceAsc = '2'
THOST_FTDC_COST_AskPriceDesc = '3'
THOST_FTDC_COST_BidPriceAsc = '4'
THOST_FTDC_COST_BidPriceDesc = '5'
THOST_FTDC_UOAST_NoSend = '0'
THOST_FTDC_UOAST_Sended = '1'
THOST_FTDC_UOAST_Generated = '2'
THOST_FTDC_UOAST_SendFail = '3'
THOST_FTDC_UOAST_Success = '4'
THOST_FTDC_UOAST_Fail = '5'
THOST_FTDC_UOAST_Cancel = '6'
THOST_FTDC_UOACS_NoApply = '1'
THOST_FTDC_UOACS_Submited = '2'
THOST_FTDC_UOACS_Sended = '3'
THOST_FTDC_UOACS_Success = '4'
THOST_FTDC_UOACS_Refuse = '5'
THOST_FTDC_UOACS_Cancel = '6'
THOST_FTDC_QT_Radio = '1'
THOST_FTDC_QT_Option = '2'
THOST_FTDC_QT_Blank = '3'
THOST_FTDC_BT_Request = '1'
THOST_FTDC_BT_Response = '2'
THOST_FTDC_BT_Notice = '3'
THOST_FTDC_CRC_Success = '0'
THOST_FTDC_CRC_Working = '1'
THOST_FTDC_CRC_InfoFail = '2'
THOST_FTDC_CRC_IDCardFail = '3'
THOST_FTDC_CRC_OtherFail = '4'
THOST_FTDC_CfMMCCT_All = '0'
THOST_FTDC_CfMMCCT_Person = '1'
THOST_FTDC_CfMMCCT_Company = '2'
THOST_FTDC_CfMMCCT_Other = '3'
THOST_FTDC_CfMMCCT_SpecialOrgan = '4'
THOST_FTDC_CfMMCCT_Asset = '5'
THOST_FTDC_EIDT_SHFE = 'S'
THOST_FTDC_EIDT_CZCE = 'Z'
THOST_FTDC_EIDT_DCE = 'D'
THOST_FTDC_EIDT_CFFEX = 'J'
THOST_FTDC_EIDT_INE = 'N'
THOST_FTDC_ECIDT_Hedge = '1'
THOST_FTDC_ECIDT_Arbitrage = '2'
THOST_FTDC_ECIDT_Speculation = '3'
THOST_FTDC_UF_NoUpdate = '0'
THOST_FTDC_UF_Success = '1'
THOST_FTDC_UF_Fail = '2'
THOST_FTDC_UF_TCSuccess = '3'
THOST_FTDC_UF_TCFail = '4'
THOST_FTDC_UF_Cancel = '5'
THOST_FTDC_AOID_OpenInvestor = '1'
THOST_FTDC_AOID_ModifyIDCard = '2'
THOST_FTDC_AOID_ModifyNoIDCard = '3'
THOST_FTDC_AOID_ApplyTradingCode = '4'
THOST_FTDC_AOID_CancelTradingCode = '5'
THOST_FTDC_AOID_CancelInvestor = '6'
THOST_FTDC_AOID_FreezeAccount = '8'
THOST_FTDC_AOID_ActiveFreezeAccount = '9'
THOST_FTDC_ASID_NoComplete = '1'
THOST_FTDC_ASID_Submited = '2'
THOST_FTDC_ASID_Checked = '3'
THOST_FTDC_ASID_Refused = '4'
THOST_FTDC_ASID_Deleted = '5'
THOST_FTDC_UOASM_ByAPI = '1'
THOST_FTDC_UOASM_ByFile = '2'
THOST_FTDC_EvM_ADD = '1'
THOST_FTDC_EvM_UPDATE = '2'
THOST_FTDC_EvM_DELETE = '3'
THOST_FTDC_EvM_CHECK = '4'
THOST_FTDC_EvM_COPY = '5'
THOST_FTDC_EvM_CANCEL = '6'
THOST_FTDC_EvM_Reverse = '7'
THOST_FTDC_UOAA_ASR = '1'
THOST_FTDC_UOAA_ASNR = '2'
THOST_FTDC_UOAA_NSAR = '3'
THOST_FTDC_UOAA_NSR = '4'
THOST_FTDC_EvM_InvestorGroupFlow = '1'
THOST_FTDC_EvM_InvestorRate = '2'
THOST_FTDC_EvM_InvestorCommRateModel = '3'
THOST_FTDC_CL_Zero = '0'
THOST_FTDC_CL_One = '1'
THOST_FTDC_CL_Two = '2'
THOST_FTDC_CHS_Init = '0'
THOST_FTDC_CHS_Checking = '1'
THOST_FTDC_CHS_Checked = '2'
THOST_FTDC_CHS_Refuse = '3'
THOST_FTDC_CHS_Cancel = '4'
THOST_FTDC_CHU_Unused = '0'
THOST_FTDC_CHU_Used = '1'
THOST_FTDC_CHU_Fail = '2'
THOST_FTDC_BAO_ByAccProperty = '0'
THOST_FTDC_BAO_ByFBTransfer = '1'
THOST_FTDC_MBTS_ByInstrument = '0'
THOST_FTDC_MBTS_ByDayInsPrc = '1'
THOST_FTDC_MBTS_ByDayIns = '2'
THOST_FTDC_FTC_BankLaunchBankToBroker = '102001'
THOST_FTDC_FTC_BrokerLaunchBankToBroker = '202001'
THOST_FTDC_FTC_BankLaunchBrokerToBank = '102002'
THOST_FTDC_FTC_BrokerLaunchBrokerToBank = '202002'
THOST_FTDC_OTP_NONE = '0'
THOST_FTDC_OTP_TOTP = '1'
THOST_FTDC_OTPS_Unused = '0'
THOST_FTDC_OTPS_Used = '1'
THOST_FTDC_OTPS_Disuse = '2'
THOST_FTDC_BUT_Investor = '1'
THOST_FTDC_BUT_BrokerUser = '2'
THOST_FTDC_FUTT_Commodity = '1'
THOST_FTDC_FUTT_Financial = '2'
THOST_FTDC_FET_Restriction = '0'
THOST_FTDC_FET_TodayRestriction = '1'
THOST_FTDC_FET_Transfer = '2'
THOST_FTDC_FET_Credit = '3'
THOST_FTDC_FET_InvestorWithdrawAlm = '4'
THOST_FTDC_FET_BankRestriction = '5'
THOST_FTDC_FET_Accountregister = '6'
THOST_FTDC_FET_ExchangeFundIO = '7'
THOST_FTDC_FET_InvestorFundIO = '8'
THOST_FTDC_AST_FBTransfer = '0'
THOST_FTDC_AST_ManualEntry = '1'
THOST_FTDC_CST_UnifyAccount = '0'
THOST_FTDC_CST_ManualEntry = '1'
THOST_FTDC_UR_All = '0'
THOST_FTDC_UR_Single = '1'
THOST_FTDC_BG_Investor = '2'
THOST_FTDC_BG_Group = '1'
THOST_FTDC_TSSM_Instrument = '1'
THOST_FTDC_TSSM_Product = '2'
THOST_FTDC_TSSM_Exchange = '3'
THOST_FTDC_ESM_Relative = '1'
THOST_FTDC_ESM_Typical = '2'
THOST_FTDC_RIR_All = '1'
THOST_FTDC_RIR_Model = '2'
THOST_FTDC_RIR_Single = '3'
THOST_FTDC_SDS_Initialize = '0'
THOST_FTDC_SDS_Settlementing = '1'
THOST_FTDC_SDS_Settlemented = '2'
THOST_FTDC_TSRC_NORMAL = '0'
THOST_FTDC_TSRC_QUERY = '1'
THOST_FTDC_FSM_Product = '1'
THOST_FTDC_FSM_Exchange = '2'
THOST_FTDC_FSM_All = '3'
THOST_FTDC_BIR_Property = '1'
THOST_FTDC_BIR_All = '2'
THOST_FTDC_PIR_All = '1'
THOST_FTDC_PIR_Property = '2'
THOST_FTDC_PIR_Single = '3'
THOST_FTDC_FIS_NoCreate = '0'
THOST_FTDC_FIS_Created = '1'
THOST_FTDC_FIS_Failed = '2'
THOST_FTDC_FGS_FileTransmit = '0'
THOST_FTDC_FGS_FileGen = '1'
THOST_FTDC_SoM_Add = '1'
THOST_FTDC_SoM_Update = '2'
THOST_FTDC_SoM_Delete = '3'
THOST_FTDC_SoM_Copy = '4'
THOST_FTDC_SoM_AcTive = '5'
THOST_FTDC_SoM_CanCel = '6'
THOST_FTDC_SoM_ReSet = '7'
THOST_FTDC_SoT_UpdatePassword = '0'
THOST_FTDC_SoT_UserDepartment = '1'
THOST_FTDC_SoT_RoleManager = '2'
THOST_FTDC_SoT_RoleFunction = '3'
THOST_FTDC_SoT_BaseParam = '4'
THOST_FTDC_SoT_SetUserID = '5'
THOST_FTDC_SoT_SetUserRole = '6'
THOST_FTDC_SoT_UserIpRestriction = '7'
THOST_FTDC_SoT_DepartmentManager = '8'
THOST_FTDC_SoT_DepartmentCopy = '9'
THOST_FTDC_SoT_Tradingcode = 'A'
THOST_FTDC_SoT_InvestorStatus = 'B'
THOST_FTDC_SoT_InvestorAuthority = 'C'
THOST_FTDC_SoT_PropertySet = 'D'
THOST_FTDC_SoT_ReSetInvestorPasswd = 'E'
THOST_FTDC_SoT_InvestorPersonalityInfo = 'F'
THOST_FTDC_CSRCQ_Current = '0'
THOST_FTDC_CSRCQ_History = '1'
THOST_FTDC_FRS_Normal = '1'
THOST_FTDC_FRS_Freeze = '0'
THOST_FTDC_STST_Standard = '0'
THOST_FTDC_STST_NonStandard = '1'
THOST_FTDC_RPT_Freeze = '1'
THOST_FTDC_RPT_FreezeActive = '2'
THOST_FTDC_RPT_OpenLimit = '3'
THOST_FTDC_RPT_RelieveOpenLimit = '4'
THOST_FTDC_AMLDS_Normal = '0'
THOST_FTDC_AMLDS_Deleted = '1'
THOST_FTDC_AMLCHS_Init = '0'
THOST_FTDC_AMLCHS_Checking = '1'
THOST_FTDC_AMLCHS_Checked = '2'
THOST_FTDC_AMLCHS_RefuseReport = '3'
THOST_FTDC_AMLDT_DrawDay = '0'
THOST_FTDC_AMLDT_TouchDay = '1'
THOST_FTDC_AMLCL_CheckLevel0 = '0'
THOST_FTDC_AMLCL_CheckLevel1 = '1'
THOST_FTDC_AMLCL_CheckLevel2 = '2'
THOST_FTDC_AMLCL_CheckLevel3 = '3'
THOST_FTDC_EFT_CSV = '0'
THOST_FTDC_EFT_EXCEL = '1'
THOST_FTDC_EFT_DBF = '2'
THOST_FTDC_SMT_Before = '1'
THOST_FTDC_SMT_Settlement = '2'
THOST_FTDC_SMT_After = '3'
THOST_FTDC_SMT_Settlemented = '4'
THOST_FTDC_SML_Must = '1'
THOST_FTDC_SML_Alarm = '2'
THOST_FTDC_SML_Prompt = '3'
THOST_FTDC_SML_Ignore = '4'
THOST_FTDC_SMG_Exhcange = '1'
THOST_FTDC_SMG_ASP = '2'
THOST_FTDC_SMG_CSRC = '3'
THOST_FTDC_LUT_Repeatable = '1'
THOST_FTDC_LUT_Unrepeatable = '2'
THOST_FTDC_DAR_Settle = '1'
THOST_FTDC_DAR_Exchange = '2'
THOST_FTDC_DAR_CSRC = '3'
THOST_FTDC_MGT_ExchMarginRate = '0'
THOST_FTDC_MGT_InstrMarginRate = '1'
THOST_FTDC_MGT_InstrMarginRateTrade = '2'
THOST_FTDC_ACT_Intraday = '1'
THOST_FTDC_ACT_Long = '2'
THOST_FTDC_MRT_Exchange = '1'
THOST_FTDC_MRT_Investor = '2'
THOST_FTDC_MRT_InvestorTrade = '3'
THOST_FTDC_BUS_UnBak = '0'
THOST_FTDC_BUS_BakUp = '1'
THOST_FTDC_BUS_BakUped = '2'
THOST_FTDC_BUS_BakFail = '3'
THOST_FTDC_SIS_UnInitialize = '0'
THOST_FTDC_SIS_Initialize = '1'
THOST_FTDC_SIS_Initialized = '2'
THOST_FTDC_SRS_NoCreate = '0'
THOST_FTDC_SRS_Create = '1'
THOST_FTDC_SRS_Created = '2'
THOST_FTDC_SRS_CreateFail = '3'
THOST_FTDC_SSS_UnSaveData = '0'
THOST_FTDC_SSS_SaveDatad = '1'
THOST_FTDC_SAS_UnArchived = '0'
THOST_FTDC_SAS_Archiving = '1'
THOST_FTDC_SAS_Archived = '2'
THOST_FTDC_SAS_ArchiveFail = '3'
THOST_FTDC_CTPT_Unkown = '0'
THOST_FTDC_CTPT_MainCenter = '1'
THOST_FTDC_CTPT_BackUp = '2'
THOST_FTDC_CDT_Normal = '0'
THOST_FTDC_CDT_SpecFirst = '1'
THOST_FTDC_MFUR_None = '0'
THOST_FTDC_MFUR_Margin = '1'
THOST_FTDC_MFUR_All = '2'
THOST_FTDC_MFUR_CNY3 = '3'
THOST_FTDC_SPT_CzceHedge = '1'
THOST_FTDC_SPT_IneForeignCurrency = '2'
THOST_FTDC_SPT_DceOpenClose = '3'
THOST_FTDC_FMT_Mortgage = '1'
THOST_FTDC_FMT_Redemption = '2'
THOST_FTDC_ASPI_BaseMargin = '1'
THOST_FTDC_ASPI_LowestInterest = '2'
THOST_FTDC_FMD_In = '1'
THOST_FTDC_FMD_Out = '2'
THOST_FTDC_BT_Profit = '0'
THOST_FTDC_BT_Loss = '1'
THOST_FTDC_BT_Other = 'Z'
THOST_FTDC_SST_Manual = '0'
THOST_FTDC_SST_Automatic = '1'
THOST_FTDC_CED_Settlement = '0'
THOST_FTDC_CED_Sale = '1'
THOST_FTDC_CSS_Entry = '1'
THOST_FTDC_CSS_Approve = '2'
THOST_FTDC_CSS_Refuse = '3'
THOST_FTDC_CSS_Revoke = '4'
THOST_FTDC_CSS_Send = '5'
THOST_FTDC_CSS_Success = '6'
THOST_FTDC_CSS_Failure = '7'
THOST_FTDC_REQF_NoSend = '0'
THOST_FTDC_REQF_SendSuccess = '1'
THOST_FTDC_REQF_SendFailed = '2'
THOST_FTDC_REQF_WaitReSend = '3'
THOST_FTDC_RESF_Success = '0'
THOST_FTDC_RESF_InsuffiCient = '1'
THOST_FTDC_RESF_UnKnown = '8'
THOST_FTDC_EXS_Before = '0'
THOST_FTDC_EXS_After = '1'
THOST_FTDC_CR_Domestic = '1'
THOST_FTDC_CR_GMT = '2'
THOST_FTDC_CR_Foreign = '3'
THOST_FTDC_HB_No = '0'
THOST_FTDC_HB_Yes = '1'
THOST_FTDC_SM_Normal = '1'
THOST_FTDC_SM_Emerge = '2'
THOST_FTDC_SM_Restore = '3'
THOST_FTDC_TPT_Full = '1'
THOST_FTDC_TPT_Increment = '2'
THOST_FTDC_TPT_BackUp = '3'
THOST_FTDC_LM_Trade = '0'
THOST_FTDC_LM_Transfer = '1'
THOST_FTDC_CPT_Instrument = '1'
THOST_FTDC_CPT_Margin = '2'
THOST_FTDC_HT_Yes = '1'
THOST_FTDC_HT_No = '0'
THOST_FTDC_AMT_Bank = '1'
THOST_FTDC_AMT_Securities = '2'
THOST_FTDC_AMT_Fund = '3'
THOST_FTDC_AMT_Insurance = '4'
THOST_FTDC_AMT_Trust = '5'
THOST_FTDC_AMT_Other = '9'
THOST_FTDC_CFIOT_FundIO = '0'
THOST_FTDC_CFIOT_SwapCurrency = '1'
THOST_FTDC_CAT_Futures = '1'
THOST_FTDC_CAT_AssetmgrFuture = '2'
THOST_FTDC_CAT_AssetmgrTrustee = '3'
THOST_FTDC_CAT_AssetmgrTransfer = '4'
THOST_FTDC_LT_Chinese = '1'
THOST_FTDC_LT_English = '2'
THOST_FTDC_AMCT_Person = '1'
THOST_FTDC_AMCT_Organ = '2'
THOST_FTDC_AMCT_SpecialOrgan = '4'
THOST_FTDC_ASST_Futures = '3'
THOST_FTDC_ASST_SpecialOrgan = '4'
THOST_FTDC_CIT_HasExch = '0'
THOST_FTDC_CIT_HasATP = '1'
THOST_FTDC_CIT_HasDiff = '2'
THOST_FTDC_DT_HandDeliv = '1'
THOST_FTDC_DT_PersonDeliv = '2'
THOST_FTDC_MMSA_NO = '0'
THOST_FTDC_MMSA_YES = '1'
THOST_FTDC_CACT_Person = '0'
THOST_FTDC_CACT_Company = '1'
THOST_FTDC_CACT_Other = '2'
THOST_FTDC_UOAAT_Futures = '1'
THOST_FTDC_UOAAT_SpecialOrgan = '2'
THOST_FTDC_DEN_Buy = '0'
THOST_FTDC_DEN_Sell = '1'
THOST_FTDC_OFEN_Open = '0'
THOST_FTDC_OFEN_Close = '1'
THOST_FTDC_OFEN_ForceClose = '2'
THOST_FTDC_OFEN_CloseToday = '3'
THOST_FTDC_OFEN_CloseYesterday = '4'
THOST_FTDC_OFEN_ForceOff = '5'
THOST_FTDC_OFEN_LocalForceClose = '6'
THOST_FTDC_HFEN_Speculation = '1'
THOST_FTDC_HFEN_Arbitrage = '2'
THOST_FTDC_HFEN_Hedge = '3'
THOST_FTDC_FIOTEN_FundIO = '1'
THOST_FTDC_FIOTEN_Transfer = '2'
THOST_FTDC_FIOTEN_SwapCurrency = '3'
THOST_FTDC_FTEN_Deposite = '1'
THOST_FTDC_FTEN_ItemFund = '2'
THOST_FTDC_FTEN_Company = '3'
THOST_FTDC_FTEN_InnerTransfer = '4'
THOST_FTDC_FDEN_In = '1'
THOST_FTDC_FDEN_Out = '2'
THOST_FTDC_FMDEN_In = '1'
THOST_FTDC_FMDEN_Out = '2'
THOST_FTDC_CP_CallOptions = '1'
THOST_FTDC_CP_PutOptions = '2'
THOST_FTDC_STM_Continental = '0'
THOST_FTDC_STM_American = '1'
THOST_FTDC_STM_Bermuda = '2'
THOST_FTDC_STT_Hedge = '0'
THOST_FTDC_STT_Match = '1'
THOST_FTDC_APPT_NotStrikeNum = '4'
THOST_FTDC_GUDS_Gen = '0'
THOST_FTDC_GUDS_Hand = '1'
THOST_FTDC_OER_NoExec = 'n'
THOST_FTDC_OER_Canceled = 'c'
THOST_FTDC_OER_OK = '0'
THOST_FTDC_OER_NoPosition = '1'
THOST_FTDC_OER_NoDeposit = '2'
THOST_FTDC_OER_NoParticipant = '3'
THOST_FTDC_OER_NoClient = '4'
THOST_FTDC_OER_NoInstrument = '6'
THOST_FTDC_OER_NoRight = '7'
THOST_FTDC_OER_InvalidVolume = '8'
THOST_FTDC_OER_NoEnoughHistoryTrade = '9'
THOST_FTDC_OER_Unknown = 'a'
THOST_FTDC_COMBT_Future = '0'
THOST_FTDC_COMBT_BUL = '1'
THOST_FTDC_COMBT_BER = '2'
THOST_FTDC_COMBT_STD = '3'
THOST_FTDC_COMBT_STG = '4'
THOST_FTDC_COMBT_PRT = '5'
THOST_FTDC_COMBT_CLD = '6'
THOST_FTDC_COMBT_OPL = '7'
THOST_FTDC_COMBT_BFO = '8'
THOST_FTDC_DCECOMBT_SPL = '0'
THOST_FTDC_DCECOMBT_OPL = '1'
THOST_FTDC_DCECOMBT_SP = '2'
THOST_FTDC_DCECOMBT_SPC = '3'
THOST_FTDC_DCECOMBT_BLS = '4'
THOST_FTDC_DCECOMBT_BES = '5'
THOST_FTDC_DCECOMBT_CAS = '6'
THOST_FTDC_DCECOMBT_STD = '7'
THOST_FTDC_DCECOMBT_STG = '8'
THOST_FTDC_DCECOMBT_BFO = '9'
THOST_FTDC_DCECOMBT_SFO = 'a'
THOST_FTDC_ORPT_PreSettlementPrice = '1'
THOST_FTDC_ORPT_OpenPrice = '4'
THOST_FTDC_ORPT_MaxPreSettlementPrice = '5'
THOST_FTDC_BLAG_Default = '1'
THOST_FTDC_BLAG_IncludeOptValLost = '2'
THOST_FTDC_ACTP_Exec = '1'
THOST_FTDC_ACTP_Abandon = '2'
THOST_FTDC_FQST_Submitted = 'a'
THOST_FTDC_FQST_Accepted = 'b'
THOST_FTDC_FQST_Rejected = 'c'
THOST_FTDC_VM_Absolute = '0'
THOST_FTDC_VM_Ratio = '1'
THOST_FTDC_EOPF_Reserve = '0'
THOST_FTDC_EOPF_UnReserve = '1'
THOST_FTDC_EOCF_AutoClose = '0'
THOST_FTDC_EOCF_NotToClose = '1'
THOST_FTDC_PTE_Futures = '1'
THOST_FTDC_PTE_Options = '2'
THOST_FTDC_CUFN_CUFN_O = 'O'
THOST_FTDC_CUFN_CUFN_T = 'T'
THOST_FTDC_CUFN_CUFN_P = 'P'
THOST_FTDC_CUFN_CUFN_N = 'N'
THOST_FTDC_CUFN_CUFN_L = 'L'
THOST_FTDC_CUFN_CUFN_F = 'F'
THOST_FTDC_CUFN_CUFN_C = 'C'
THOST_FTDC_CUFN_CUFN_M = 'M'
THOST_FTDC_DUFN_DUFN_O = 'O'
THOST_FTDC_DUFN_DUFN_T = 'T'
THOST_FTDC_DUFN_DUFN_P = 'P'
THOST_FTDC_DUFN_DUFN_F = 'F'
THOST_FTDC_DUFN_DUFN_C = 'C'
THOST_FTDC_DUFN_DUFN_D = 'D'
THOST_FTDC_DUFN_DUFN_M = 'M'
THOST_FTDC_DUFN_DUFN_S = 'S'
THOST_FTDC_SUFN_SUFN_O = 'O'
THOST_FTDC_SUFN_SUFN_T = 'T'
THOST_FTDC_SUFN_SUFN_P = 'P'
THOST_FTDC_SUFN_SUFN_F = 'F'
THOST_FTDC_CFUFN_SUFN_T = 'T'
THOST_FTDC_CFUFN_SUFN_P = 'P'
THOST_FTDC_CFUFN_SUFN_F = 'F'
THOST_FTDC_CFUFN_SUFN_S = 'S'
THOST_FTDC_CMDR_Comb = '0'
THOST_FTDC_CMDR_UnComb = '1'
THOST_FTDC_CMDR_DelComb = '2'
THOST_FTDC_STOV_RealValue = '1'
THOST_FTDC_STOV_ProfitValue = '2'
THOST_FTDC_STOV_RealRatio = '3'
THOST_FTDC_STOV_ProfitRatio = '4'
THOST_FTDC_ROAST_Processing = '0'
THOST_FTDC_ROAST_Cancelled = '1'
THOST_FTDC_ROAST_Opened = '2'
THOST_FTDC_ROAST_Invalid = '3'
THOST_FTDC_WPSR_Lib = '1'
THOST_FTDC_WPSR_Manual = '2'
THOST_FTDC_OSCF_CloseSelfOptionPosition = '1'
THOST_FTDC_OSCF_ReserveOptionPosition = '2'
THOST_FTDC_OSCF_SellCloseSelfFuturePosition = '3'
THOST_FTDC_OSCF_ReserveFuturePosition = '4'
THOST_FTDC_BZTP_Future = '1'
THOST_FTDC_BZTP_Stock = '2'
THOST_FTDC_APP_TYPE_Investor = '1'
THOST_FTDC_APP_TYPE_InvestorRelay = '2'
THOST_FTDC_APP_TYPE_OperatorRelay = '3'
THOST_FTDC_APP_TYPE_UnKnown = '4'
THOST_FTDC_RV_Right = '0'
THOST_FTDC_RV_Refuse = '1'
THOST_FTDC_OTC_TRDT_Block = '0'
THOST_FTDC_OTC_TRDT_EFP = '1'
THOST_FTDC_OTC_MT_DV01 = '1'
THOST_FTDC_OTC_MT_ParValue = '2'
|
mit
|
flaviobarros/spyre
|
examples/stocks_example.py
|
3
|
2387
|
# tested with python2.7 and 3.4
from spyre import server
import pandas as pd
import json
try:
import urllib2
except ImportError:
import urllib.request as urllib2
class StockExample(server.App):
def __init__(self):
# implements a simple caching mechanism to avoid multiple calls to the yahoo finance api
self.data_cache = None
self.params_cache = None
title = "Historical Stock Prices"
inputs = [{ "type":'dropdown',
"label": 'Company',
"options" : [ {"label": "Google", "value":"GOOG"},
{"label": "Yahoo", "value":"YHOO"},
{"label": "Apple", "value":"AAPL"}],
"value":'GOOG',
"key": 'ticker',
"action_id": "update_data"}]
controls = [{ "type" : "hidden",
"id" : "update_data"}]
tabs = ["Plot", "Table"]
outputs = [{ "type" : "plot",
"id" : "plot",
"control_id" : "update_data",
"tab" : "Plot"},
{ "type" : "table",
"id" : "table_id",
"control_id" : "update_data",
"tab" : "Table",
"on_page_load" : True }]
def getData(self, params):
params.pop("output_id",None) # caching layer
if self.params_cache!=params: # caching layer
ticker = params['ticker']
# make call to yahoo finance api to get historical stock data
api_url = 'https://chartapi.finance.yahoo.com/instrument/1.0/{}/chartdata;type=quote;range=3m/json'.format(ticker)
result = urllib2.urlopen(api_url).read()
data = json.loads(result.decode('utf-8').replace('finance_charts_json_callback( ','')[:-1]) # strip away the javascript and load json
self.company_name = data['meta']['Company-Name']
df = pd.DataFrame.from_records(data['series'])
df['Date'] = pd.to_datetime(df['Date'],format='%Y%m%d')
self.data_cache = df # caching layer
self.params_cache = params # caching layer
return self.data_cache
def getPlot(self, params):
### implements a simple caching mechanism to avoid multiple calls to the yahoo finance api ###
params.pop("output_id",None)
while self.params_cache!=params:
pass
###############################################################################################
df = self.getData(params)
plt_obj = df.set_index('Date').drop(['volume'],axis=1).plot()
plt_obj.set_ylabel("Price")
plt_obj.set_title(self.company_name)
fig = plt_obj.get_figure()
return fig
if __name__ == '__main__':
app = StockExample()
app.launch(port=9093)
|
mit
|
chongtianfeiyu/kbengine
|
kbe/res/scripts/common/Lib/pty.py
|
120
|
4763
|
"""Pseudo terminal utilities."""
# Bugs: No signal handling. Doesn't set slave termios and window size.
# Only tested on Linux.
# See: W. Richard Stevens. 1992. Advanced Programming in the
# UNIX Environment. Chapter 19.
# Author: Steen Lumholt -- with additions by Guido.
from select import select
import os
import tty
__all__ = ["openpty","fork","spawn"]
STDIN_FILENO = 0
STDOUT_FILENO = 1
STDERR_FILENO = 2
CHILD = 0
def openpty():
"""openpty() -> (master_fd, slave_fd)
Open a pty master/slave pair, using os.openpty() if possible."""
try:
return os.openpty()
except (AttributeError, OSError):
pass
master_fd, slave_name = _open_terminal()
slave_fd = slave_open(slave_name)
return master_fd, slave_fd
def master_open():
"""master_open() -> (master_fd, slave_name)
Open a pty master and return the fd, and the filename of the slave end.
Deprecated, use openpty() instead."""
try:
master_fd, slave_fd = os.openpty()
except (AttributeError, OSError):
pass
else:
slave_name = os.ttyname(slave_fd)
os.close(slave_fd)
return master_fd, slave_name
return _open_terminal()
def _open_terminal():
"""Open pty master and return (master_fd, tty_name)."""
for x in 'pqrstuvwxyzPQRST':
for y in '0123456789abcdef':
pty_name = '/dev/pty' + x + y
try:
fd = os.open(pty_name, os.O_RDWR)
except OSError:
continue
return (fd, '/dev/tty' + x + y)
raise OSError('out of pty devices')
def slave_open(tty_name):
"""slave_open(tty_name) -> slave_fd
Open the pty slave and acquire the controlling terminal, returning
opened filedescriptor.
Deprecated, use openpty() instead."""
result = os.open(tty_name, os.O_RDWR)
try:
from fcntl import ioctl, I_PUSH
except ImportError:
return result
try:
ioctl(result, I_PUSH, "ptem")
ioctl(result, I_PUSH, "ldterm")
except OSError:
pass
return result
def fork():
"""fork() -> (pid, master_fd)
Fork and make the child a session leader with a controlling terminal."""
try:
pid, fd = os.forkpty()
except (AttributeError, OSError):
pass
else:
if pid == CHILD:
try:
os.setsid()
except OSError:
# os.forkpty() already set us session leader
pass
return pid, fd
master_fd, slave_fd = openpty()
pid = os.fork()
if pid == CHILD:
# Establish a new session.
os.setsid()
os.close(master_fd)
# Slave becomes stdin/stdout/stderr of child.
os.dup2(slave_fd, STDIN_FILENO)
os.dup2(slave_fd, STDOUT_FILENO)
os.dup2(slave_fd, STDERR_FILENO)
if (slave_fd > STDERR_FILENO):
os.close (slave_fd)
# Explicitly open the tty to make it become a controlling tty.
tmp_fd = os.open(os.ttyname(STDOUT_FILENO), os.O_RDWR)
os.close(tmp_fd)
else:
os.close(slave_fd)
# Parent and child process.
return pid, master_fd
def _writen(fd, data):
"""Write all the data to a descriptor."""
while data:
n = os.write(fd, data)
data = data[n:]
def _read(fd):
"""Default read function."""
return os.read(fd, 1024)
def _copy(master_fd, master_read=_read, stdin_read=_read):
"""Parent copy loop.
Copies
pty master -> standard output (master_read)
standard input -> pty master (stdin_read)"""
fds = [master_fd, STDIN_FILENO]
while True:
rfds, wfds, xfds = select(fds, [], [])
if master_fd in rfds:
data = master_read(master_fd)
if not data: # Reached EOF.
fds.remove(master_fd)
else:
os.write(STDOUT_FILENO, data)
if STDIN_FILENO in rfds:
data = stdin_read(STDIN_FILENO)
if not data:
fds.remove(STDIN_FILENO)
else:
_writen(master_fd, data)
def spawn(argv, master_read=_read, stdin_read=_read):
"""Create a spawned process."""
if type(argv) == type(''):
argv = (argv,)
pid, master_fd = fork()
if pid == CHILD:
os.execlp(argv[0], *argv)
try:
mode = tty.tcgetattr(STDIN_FILENO)
tty.setraw(STDIN_FILENO)
restore = 1
except tty.error: # This is the same as termios.error
restore = 0
try:
_copy(master_fd, master_read, stdin_read)
except OSError:
if restore:
tty.tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode)
os.close(master_fd)
return os.waitpid(pid, 0)[1]
|
lgpl-3.0
|
shravya-ks/ECN-ns3
|
src/bridge/bindings/modulegen__gcc_LP64.py
|
2
|
194047
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.bridge', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## bridge-helper.h (module 'bridge'): ns3::BridgeHelper [class]
module.add_class('BridgeHelper')
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class]
module.add_class('NetDeviceContainer', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NetDeviceQueue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NetDeviceQueue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::QueueItem', 'ns3::empty', 'ns3::DefaultDeleter<ns3::QueueItem>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## channel.h (module 'network'): ns3::Channel [class]
module.add_class('Channel', import_from_module='ns.network', parent=root_module['ns3::Object'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class]
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class]
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
## net-device.h (module 'network'): ns3::NetDeviceQueue [class]
module.add_class('NetDeviceQueue', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >'])
## net-device.h (module 'network'): ns3::NetDeviceQueueInterface [class]
module.add_class('NetDeviceQueueInterface', import_from_module='ns.network', parent=root_module['ns3::Object'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::QueueItem [class]
module.add_class('QueueItem', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >'])
## net-device.h (module 'network'): ns3::QueueItem::Uint8Values [enumeration]
module.add_enum('Uint8Values', ['IP_DSFIELD'], outer_class=root_module['ns3::QueueItem'], import_from_module='ns.network')
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## bridge-channel.h (module 'bridge'): ns3::BridgeChannel [class]
module.add_class('BridgeChannel', parent=root_module['ns3::Channel'])
## bridge-net-device.h (module 'bridge'): ns3::BridgeNetDevice [class]
module.add_class('BridgeNetDevice', parent=root_module['ns3::NetDevice'])
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
## Register a nested module for the namespace TracedValueCallback
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_types_ns3_TracedValueCallback(module):
root_module = module.get_root()
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *', u'ns3::TracedValueCallback::Time')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) **', u'ns3::TracedValueCallback::Time*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *&', u'ns3::TracedValueCallback::Time&')
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3BridgeHelper_methods(root_module, root_module['ns3::BridgeHelper'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3NetDeviceQueue_Ns3Empty_Ns3DefaultDeleter__lt__ns3NetDeviceQueue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >'])
register_Ns3SimpleRefCount__Ns3QueueItem_Ns3Empty_Ns3DefaultDeleter__lt__ns3QueueItem__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3Channel_methods(root_module, root_module['ns3::Channel'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NetDeviceQueue_methods(root_module, root_module['ns3::NetDeviceQueue'])
register_Ns3NetDeviceQueueInterface_methods(root_module, root_module['ns3::NetDeviceQueueInterface'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3QueueItem_methods(root_module, root_module['ns3::QueueItem'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3BridgeChannel_methods(root_module, root_module['ns3::BridgeChannel'])
register_Ns3BridgeNetDevice_methods(root_module, root_module['ns3::BridgeNetDevice'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3BridgeHelper_methods(root_module, cls):
## bridge-helper.h (module 'bridge'): ns3::BridgeHelper::BridgeHelper(ns3::BridgeHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BridgeHelper const &', 'arg0')])
## bridge-helper.h (module 'bridge'): ns3::BridgeHelper::BridgeHelper() [constructor]
cls.add_constructor([])
## bridge-helper.h (module 'bridge'): ns3::NetDeviceContainer ns3::BridgeHelper::Install(ns3::Ptr<ns3::Node> node, ns3::NetDeviceContainer c) [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::NetDeviceContainer', 'c')])
## bridge-helper.h (module 'bridge'): ns3::NetDeviceContainer ns3::BridgeHelper::Install(std::string nodeName, ns3::NetDeviceContainer c) [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('std::string', 'nodeName'), param('ns3::NetDeviceContainer', 'c')])
## bridge-helper.h (module 'bridge'): void ns3::BridgeHelper::SetDeviceAttribute(std::string n1, ns3::AttributeValue const & v1) [member function]
cls.add_method('SetDeviceAttribute',
'void',
[param('std::string', 'n1'), param('ns3::AttributeValue const &', 'v1')])
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
deprecated=True, is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function]
cls.add_method('IsDocumentation',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3Mac48Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac48Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv4Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv6Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function]
cls.add_method('GetMulticast6Prefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function]
cls.add_method('GetMulticastPrefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function]
cls.add_method('IsGroup',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3NetDeviceContainer_methods(root_module, cls):
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor]
cls.add_constructor([])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor]
cls.add_constructor([param('std::string', 'devName')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NetDeviceContainer', 'other')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'deviceName')])
## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >',
[],
is_const=True)
## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >',
[],
is_const=True)
## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_const=True)
## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor]
cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor]
cls.add_constructor([param('long double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable]
cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NetDeviceQueue_Ns3Empty_Ns3DefaultDeleter__lt__ns3NetDeviceQueue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter< ns3::NetDeviceQueue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3QueueItem_Ns3Empty_Ns3DefaultDeleter__lt__ns3QueueItem__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::SimpleRefCount(ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter< ns3::QueueItem > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function]
cls.add_method('As',
'ns3::TimeWithUnit',
[param('ns3::Time::Unit const', 'unit')],
is_const=True)
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function]
cls.add_method('GetDays',
'double',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function]
cls.add_method('GetHours',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function]
cls.add_method('GetMinutes',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function]
cls.add_method('GetYears',
'double',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function]
cls.add_method('StaticInit',
'bool',
[],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3Channel_methods(root_module, cls):
## channel.h (module 'network'): ns3::Channel::Channel(ns3::Channel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Channel const &', 'arg0')])
## channel.h (module 'network'): ns3::Channel::Channel() [constructor]
cls.add_constructor([])
## channel.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Channel::GetDevice(uint32_t i) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## channel.h (module 'network'): uint32_t ns3::Channel::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## channel.h (module 'network'): uint32_t ns3::Channel::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## channel.h (module 'network'): static ns3::TypeId ns3::Channel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3Mac48AddressChecker_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')])
return
def register_Ns3Mac48AddressValue_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'value')])
## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac48Address',
[],
is_const=True)
## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac48Address const &', 'value')])
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3NetDeviceQueue_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDeviceQueue::NetDeviceQueue(ns3::NetDeviceQueue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDeviceQueue const &', 'arg0')])
## net-device.h (module 'network'): ns3::NetDeviceQueue::NetDeviceQueue() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): bool ns3::NetDeviceQueue::IsStopped() const [member function]
cls.add_method('IsStopped',
'bool',
[],
is_const=True)
## net-device.h (module 'network'): void ns3::NetDeviceQueue::SetWakeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetWakeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDeviceQueue::Start() [member function]
cls.add_method('Start',
'void',
[],
is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDeviceQueue::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDeviceQueue::Wake() [member function]
cls.add_method('Wake',
'void',
[],
is_virtual=True)
return
def register_Ns3NetDeviceQueueInterface_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDeviceQueueInterface::NetDeviceQueueInterface(ns3::NetDeviceQueueInterface const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDeviceQueueInterface const &', 'arg0')])
## net-device.h (module 'network'): ns3::NetDeviceQueueInterface::NetDeviceQueueInterface() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::CreateTxQueues() [member function]
cls.add_method('CreateTxQueues',
'void',
[])
## net-device.h (module 'network'): uint8_t ns3::NetDeviceQueueInterface::GetNTxQueues() const [member function]
cls.add_method('GetNTxQueues',
'uint8_t',
[],
is_const=True)
## net-device.h (module 'network'): ns3::Callback<unsigned char, ns3::Ptr<ns3::QueueItem>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::NetDeviceQueueInterface::GetSelectQueueCallback() const [member function]
cls.add_method('GetSelectQueueCallback',
'ns3::Callback< unsigned char, ns3::Ptr< ns3::QueueItem >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::NetDeviceQueue> ns3::NetDeviceQueueInterface::GetTxQueue(uint8_t i) const [member function]
cls.add_method('GetTxQueue',
'ns3::Ptr< ns3::NetDeviceQueue >',
[param('uint8_t', 'i')],
is_const=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDeviceQueueInterface::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::SetSelectQueueCallback(ns3::Callback<unsigned char, ns3::Ptr<ns3::QueueItem>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetSelectQueueCallback',
'void',
[param('ns3::Callback< unsigned char, ns3::Ptr< ns3::QueueItem >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')])
## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::SetTxQueuesN(uint8_t numTxQueues) [member function]
cls.add_method('SetTxQueuesN',
'void',
[param('uint8_t', 'numTxQueues')])
## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3QueueItem_methods(root_module, cls):
cls.add_output_stream_operator()
## net-device.h (module 'network'): ns3::QueueItem::QueueItem(ns3::Ptr<ns3::Packet> p) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Packet >', 'p')])
## net-device.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::QueueItem::GetPacket() const [member function]
cls.add_method('GetPacket',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## net-device.h (module 'network'): uint32_t ns3::QueueItem::GetPacketSize() const [member function]
cls.add_method('GetPacketSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::QueueItem::GetUint8Value(ns3::QueueItem::Uint8Values field, uint8_t & value) const [member function]
cls.add_method('GetUint8Value',
'bool',
[param('ns3::QueueItem::Uint8Values', 'field'), param('uint8_t &', 'value')],
is_const=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::QueueItem::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_Ns3BridgeChannel_methods(root_module, cls):
## bridge-channel.h (module 'bridge'): static ns3::TypeId ns3::BridgeChannel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bridge-channel.h (module 'bridge'): ns3::BridgeChannel::BridgeChannel() [constructor]
cls.add_constructor([])
## bridge-channel.h (module 'bridge'): void ns3::BridgeChannel::AddChannel(ns3::Ptr<ns3::Channel> bridgedChannel) [member function]
cls.add_method('AddChannel',
'void',
[param('ns3::Ptr< ns3::Channel >', 'bridgedChannel')])
## bridge-channel.h (module 'bridge'): uint32_t ns3::BridgeChannel::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True, is_virtual=True)
## bridge-channel.h (module 'bridge'): ns3::Ptr<ns3::NetDevice> ns3::BridgeChannel::GetDevice(uint32_t i) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
return
def register_Ns3BridgeNetDevice_methods(root_module, cls):
## bridge-net-device.h (module 'bridge'): ns3::BridgeNetDevice::BridgeNetDevice() [constructor]
cls.add_constructor([])
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::AddBridgePort(ns3::Ptr<ns3::NetDevice> bridgePort) [member function]
cls.add_method('AddBridgePort',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'bridgePort')])
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::NetDevice> ns3::BridgeNetDevice::GetBridgePort(uint32_t n) const [member function]
cls.add_method('GetBridgePort',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'n')],
is_const=True)
## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::Channel> ns3::BridgeNetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): uint32_t ns3::BridgeNetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): uint16_t ns3::BridgeNetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): uint32_t ns3::BridgeNetDevice::GetNBridgePorts() const [member function]
cls.add_method('GetNBridgePorts',
'uint32_t',
[],
is_const=True)
## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::Node> ns3::BridgeNetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): static ns3::TypeId ns3::BridgeNetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::ForwardBroadcast(ns3::Ptr<ns3::NetDevice> incomingPort, ns3::Ptr<const ns3::Packet> packet, uint16_t protocol, ns3::Mac48Address src, ns3::Mac48Address dst) [member function]
cls.add_method('ForwardBroadcast',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'incomingPort'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Mac48Address', 'src'), param('ns3::Mac48Address', 'dst')],
visibility='protected')
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::ForwardUnicast(ns3::Ptr<ns3::NetDevice> incomingPort, ns3::Ptr<const ns3::Packet> packet, uint16_t protocol, ns3::Mac48Address src, ns3::Mac48Address dst) [member function]
cls.add_method('ForwardUnicast',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'incomingPort'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Mac48Address', 'src'), param('ns3::Mac48Address', 'dst')],
visibility='protected')
## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::NetDevice> ns3::BridgeNetDevice::GetLearnedState(ns3::Mac48Address source) [member function]
cls.add_method('GetLearnedState',
'ns3::Ptr< ns3::NetDevice >',
[param('ns3::Mac48Address', 'source')],
visibility='protected')
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::Learn(ns3::Mac48Address source, ns3::Ptr<ns3::NetDevice> port) [member function]
cls.add_method('Learn',
'void',
[param('ns3::Mac48Address', 'source'), param('ns3::Ptr< ns3::NetDevice >', 'port')],
visibility='protected')
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::ReceiveFromDevice(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<const ns3::Packet> packet, uint16_t protocol, ns3::Address const & source, ns3::Address const & destination, ns3::NetDevice::PacketType packetType) [member function]
cls.add_method('ReceiveFromDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'destination'), param('ns3::NetDevice::PacketType', 'packetType')],
visibility='protected')
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
register_functions_ns3_TracedValueCallback(module.get_submodule('TracedValueCallback'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def register_functions_ns3_TracedValueCallback(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
|
gpl-2.0
|
ilc/imgserv
|
paste/util/doctest24.py
|
26
|
99418
|
# Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters ([email protected]).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
'is_private',
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re, types
import unittest, difflib, pdb, tempfile
import warnings
from StringIO import StringIO
# Don't whine about the deprecated is_private function in this
# module's tests.
warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
__name__, 0)
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
flag = 1 << len(OPTIONFLAGS_BY_NAME)
OPTIONFLAGS_BY_NAME[name] = flag
return flag
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def is_private(prefix, base):
"""prefix, base -> true iff name prefix + "." + base is "private".
Prefix may be an empty string, and base does not contain a period.
Prefix is ignored (although functions you write conforming to this
protocol may make use of it).
Return true iff base begins with an (at least one) underscore, but
does not both begin and end with (at least) two underscores.
>>> is_private("a.b", "my_func")
False
>>> is_private("____", "_my_func")
True
>>> is_private("someclass", "__init__")
False
>>> is_private("sometypo", "__init_")
True
>>> is_private("x.y.z", "_")
True
>>> is_private("_x.y.z", "__")
False
>>> is_private("", "") # senseless but consistent
False
"""
warnings.warn("is_private is deprecated; it wasn't useful; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning, stacklevel=2)
return base[:1] == "_" and not base[:2] == "__" == base[-2:]
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, (str, unicode)):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning every
non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
pdb.Pdb.__init__(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError, 'Expected a module: %r' % module
if path.startswith('/'):
raise ValueError, 'Module-relative files may not have absolute paths'
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that preceed the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, basestring), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
# This lets us sort tests by name:
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp((self.name, self.filename, self.lineno, id(self)),
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, _namefilter=None, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
# _namefilter is undocumented, and exists only for temporary backward-
# compatibility support of testmod's deprecated isprivate mess.
self._namefilter = _namefilter
def find(self, obj, name=None, module=None, globs=None,
extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
# Recursively expore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
return tests
def _filter(self, obj, prefix, base):
"""
Return true if the given object should not be examined.
"""
return (self._namefilter is not None and
self._namefilter(prefix, base))
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is object.func_globals
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print 'Finding tests in %s' % name
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, basestring):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, basestring)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).im_func
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, basestring):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, basestring):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.im_func
if inspect.isfunction(obj): obj = obj.func_code
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> for test in tests:
... print runner.run(test)
(0, 2)
(0, 1)
(0, 2)
(0, 2)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
(0, 7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then supress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec compile(example.source, filename, "single",
compileflags, 1) in test.globs
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'[^:]*:', example.exc_msg)
m2 = re.match(r'[^:]*:', exc_msg)
if m1 and m2 and check(m1.group(0), m2.group(0),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return failures, tries
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>[\w\.]+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(True)
else:
return self.save_linecache_getlines(filename)#?, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print len(notests), "items had no tests:"
notests.sort()
for thing in notests:
print " ", thing
if passed:
print len(passed), "items passed all tests:"
passed.sort()
for thing, count in passed:
print " %3d tests in %s" % (count, thing)
if failed:
print self.DIVIDER
print len(failed), "items had failures:"
failed.sort()
for thing, (f, t) in failed:
print " %3d of %3d in %s" % (f, t, thing)
if verbose:
print totalt, "tests in", len(self._name2ft), "items."
print totalt - totalf, "passed and", totalf, "failed."
if totalf:
print "***Test Failed***", totalf, "failures."
elif verbose:
print "Test passed."
return totalf, totalt
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
print "*** DocTestRunner.merge: '" + name + "' in both" \
" testers; summing outcomes."
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException, failure:
... pass
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
(0, 1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__. Unless isprivate is specified, private names
are not skipped.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See doctest.__doc__ for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Deprecated in Python 2.4:
Optional keyword arg "isprivate" specifies a function used to
determine whether a name is private. The default function is
treat all functions as public. Optionally, "isprivate" can be
set to doctest.is_private to skip over functions marked as private
using the underscore naming convention; see its docs for details.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if isprivate is not None:
warnings.warn("the isprivate argument is deprecated; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning)
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser()):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if module_relative:
package = _normalize_module(package)
filename = _module_relative_path(package, filename)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
# Read the file, convert it to a test, and run it.
s = open(filename).read()
test = parser.get_doctest(s, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None,
isprivate=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.isprivate = isprivate
self.optionflags = optionflags
self.testfinder = DocTestFinder(_namefilter=isprivate)
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print "Running string", name
(f,t) = self.testrunner.run(test)
if self.verbose:
print f, "of", t, "examples failed in string", name
return (f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return (f,t)
def rundict(self, d, name, module=None):
import new
m = new.module(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import new
m = new.module(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> old = _unittest_reportflags
>>> set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> import doctest
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexepcted
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException, failure:
... pass
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test)
self.tearDown()
def id(self):
return self._dt_test.name
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if globs is None:
globs = module.__dict__
if not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
raise ValueError(module, "has no tests")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(), **options):
if globs is None:
globs = {}
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
if module_relative:
package = _normalize_module(package)
path = _module_relative_path(package, path)
# Find the file and read it.
name = os.path.basename(path)
doc = open(path).read()
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print script_from_examples(text)
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
return '\n'.join(output)
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
f = open(srcfilename, 'w')
f.write(src)
f.close()
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print sys.exc_info()[1]
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print 'foo\n\nbar\n'
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print range(1000) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print range(30) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
r = unittest.TextTestRunner()
r.run(DocTestSuite())
if __name__ == "__main__":
_test()
|
mit
|
pearlcoin-project/pearlcoin
|
qa/rpc-tests/keypool.py
|
86
|
3165
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the wallet keypool, and interaction with wallet encryption/locking
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class KeyPoolTest(BitcoinTestFramework):
def run_test(self):
nodes = self.nodes
addr_before_encrypting = nodes[0].getnewaddress()
addr_before_encrypting_data = nodes[0].validateaddress(addr_before_encrypting)
wallet_info_old = nodes[0].getwalletinfo()
assert(addr_before_encrypting_data['hdmasterkeyid'] == wallet_info_old['hdmasterkeyid'])
# Encrypt wallet and wait to terminate
nodes[0].encryptwallet('test')
bitcoind_processes[0].wait()
# Restart node 0
nodes[0] = start_node(0, self.options.tmpdir)
# Keep creating keys
addr = nodes[0].getnewaddress()
addr_data = nodes[0].validateaddress(addr)
wallet_info = nodes[0].getwalletinfo()
assert(addr_before_encrypting_data['hdmasterkeyid'] != wallet_info['hdmasterkeyid'])
assert(addr_data['hdmasterkeyid'] == wallet_info['hdmasterkeyid'])
try:
addr = nodes[0].getnewaddress()
raise AssertionError('Keypool should be exhausted after one address')
except JSONRPCException as e:
assert(e.error['code']==-12)
# put three new keys in the keypool
nodes[0].walletpassphrase('test', 12000)
nodes[0].keypoolrefill(3)
nodes[0].walletlock()
# drain the keys
addr = set()
addr.add(nodes[0].getrawchangeaddress())
addr.add(nodes[0].getrawchangeaddress())
addr.add(nodes[0].getrawchangeaddress())
addr.add(nodes[0].getrawchangeaddress())
# assert that four unique addresses were returned
assert(len(addr) == 4)
# the next one should fail
try:
addr = nodes[0].getrawchangeaddress()
raise AssertionError('Keypool should be exhausted after three addresses')
except JSONRPCException as e:
assert(e.error['code']==-12)
# refill keypool with three new addresses
nodes[0].walletpassphrase('test', 1)
nodes[0].keypoolrefill(3)
# test walletpassphrase timeout
time.sleep(1.1)
assert_equal(nodes[0].getwalletinfo()["unlocked_until"], 0)
# drain them by mining
nodes[0].generate(1)
nodes[0].generate(1)
nodes[0].generate(1)
nodes[0].generate(1)
try:
nodes[0].generate(1)
raise AssertionError('Keypool should be exhausted after three addesses')
except JSONRPCException as e:
assert(e.error['code']==-12)
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
def setup_network(self):
self.nodes = self.setup_nodes()
if __name__ == '__main__':
KeyPoolTest().main()
|
mit
|
petemounce/ansible
|
lib/ansible/module_utils/rax.py
|
27
|
12110
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by
# Ansible still belong to the author of the module, and may assign their own
# license to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import re
from uuid import UUID
from ansible.module_utils.basic import BOOLEANS
from ansible.module_utils.six import text_type, binary_type
FINAL_STATUSES = ('ACTIVE', 'ERROR')
VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use',
'error', 'error_deleting')
CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN',
'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN']
CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS',
'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP',
'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP']
NON_CALLABLES = (text_type, binary_type, bool, dict, int, list, type(None))
PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
def rax_slugify(value):
"""Prepend a key with rax_ and normalize the key name"""
return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_'))
def rax_clb_node_to_dict(obj):
"""Function to convert a CLB Node object to a dict"""
if not obj:
return {}
node = obj.to_dict()
node['id'] = obj.id
node['weight'] = obj.weight
return node
def rax_to_dict(obj, obj_type='standard'):
"""Generic function to convert a pyrax object to a dict
obj_type values:
standard
clb
server
"""
instance = {}
for key in dir(obj):
value = getattr(obj, key)
if obj_type == 'clb' and key == 'nodes':
instance[key] = []
for node in value:
instance[key].append(rax_clb_node_to_dict(node))
elif (isinstance(value, list) and len(value) > 0 and
not isinstance(value[0], NON_CALLABLES)):
instance[key] = []
for item in value:
instance[key].append(rax_to_dict(item))
elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
if obj_type == 'server':
if key == 'image':
if not value:
instance['rax_boot_source'] = 'volume'
else:
instance['rax_boot_source'] = 'local'
key = rax_slugify(key)
instance[key] = value
if obj_type == 'server':
for attr in ['id', 'accessIPv4', 'name', 'status']:
instance[attr] = instance.get(rax_slugify(attr))
return instance
def rax_find_bootable_volume(module, rax_module, server, exit=True):
"""Find a servers bootable volume"""
cs = rax_module.cloudservers
cbs = rax_module.cloud_blockstorage
server_id = rax_module.utils.get_id(server)
volumes = cs.volumes.get_server_volumes(server_id)
bootable_volumes = []
for volume in volumes:
vol = cbs.get(volume)
if module.boolean(vol.bootable):
bootable_volumes.append(vol)
if not bootable_volumes:
if exit:
module.fail_json(msg='No bootable volumes could be found for '
'server %s' % server_id)
else:
return False
elif len(bootable_volumes) > 1:
if exit:
module.fail_json(msg='Multiple bootable volumes found for server '
'%s' % server_id)
else:
return False
return bootable_volumes[0]
def rax_find_image(module, rax_module, image, exit=True):
"""Find a server image by ID or Name"""
cs = rax_module.cloudservers
try:
UUID(image)
except ValueError:
try:
image = cs.images.find(human_id=image)
except(cs.exceptions.NotFound,
cs.exceptions.NoUniqueMatch):
try:
image = cs.images.find(name=image)
except (cs.exceptions.NotFound,
cs.exceptions.NoUniqueMatch):
if exit:
module.fail_json(msg='No matching image found (%s)' %
image)
else:
return False
return rax_module.utils.get_id(image)
def rax_find_volume(module, rax_module, name):
"""Find a Block storage volume by ID or name"""
cbs = rax_module.cloud_blockstorage
try:
UUID(name)
volume = cbs.get(name)
except ValueError:
try:
volume = cbs.find(name=name)
except rax_module.exc.NotFound:
volume = None
except Exception as e:
module.fail_json(msg='%s' % e)
return volume
def rax_find_network(module, rax_module, network):
"""Find a cloud network by ID or name"""
cnw = rax_module.cloud_networks
try:
UUID(network)
except ValueError:
if network.lower() == 'public':
return cnw.get_server_networks(PUBLIC_NET_ID)
elif network.lower() == 'private':
return cnw.get_server_networks(SERVICE_NET_ID)
else:
try:
network_obj = cnw.find_network_by_label(network)
except (rax_module.exceptions.NetworkNotFound,
rax_module.exceptions.NetworkLabelNotUnique):
module.fail_json(msg='No matching network found (%s)' %
network)
else:
return cnw.get_server_networks(network_obj)
else:
return cnw.get_server_networks(network)
def rax_find_server(module, rax_module, server):
"""Find a Cloud Server by ID or name"""
cs = rax_module.cloudservers
try:
UUID(server)
server = cs.servers.get(server)
except ValueError:
servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
if not servers:
module.fail_json(msg='No Server was matched by name, '
'try using the Server ID instead')
if len(servers) > 1:
module.fail_json(msg='Multiple servers matched by name, '
'try using the Server ID instead')
# We made it this far, grab the first and hopefully only server
# in the list
server = servers[0]
return server
def rax_find_loadbalancer(module, rax_module, loadbalancer):
"""Find a Cloud Load Balancer by ID or name"""
clb = rax_module.cloud_loadbalancers
try:
found = clb.get(loadbalancer)
except:
found = []
for lb in clb.list():
if loadbalancer == lb.name:
found.append(lb)
if not found:
module.fail_json(msg='No loadbalancer was matched')
if len(found) > 1:
module.fail_json(msg='Multiple loadbalancers matched')
# We made it this far, grab the first and hopefully only item
# in the list
found = found[0]
return found
def rax_argument_spec():
"""Return standard base dictionary used for the argument_spec
argument in AnsibleModule
"""
return dict(
api_key=dict(type='str', aliases=['password'], no_log=True),
auth_endpoint=dict(type='str'),
credentials=dict(type='path', aliases=['creds_file']),
env=dict(type='str'),
identity_type=dict(type='str', default='rackspace'),
region=dict(type='str'),
tenant_id=dict(type='str'),
tenant_name=dict(type='str'),
username=dict(type='str'),
verify_ssl=dict(type='bool'),
)
def rax_required_together():
"""Return the default list used for the required_together argument to
AnsibleModule"""
return [['api_key', 'username']]
def setup_rax_module(module, rax_module, region_required=True):
"""Set up pyrax in a standard way for all modules"""
rax_module.USER_AGENT = 'ansible/%s %s' % (module.ansible_version,
rax_module.USER_AGENT)
api_key = module.params.get('api_key')
auth_endpoint = module.params.get('auth_endpoint')
credentials = module.params.get('credentials')
env = module.params.get('env')
identity_type = module.params.get('identity_type')
region = module.params.get('region')
tenant_id = module.params.get('tenant_id')
tenant_name = module.params.get('tenant_name')
username = module.params.get('username')
verify_ssl = module.params.get('verify_ssl')
if env is not None:
rax_module.set_environment(env)
rax_module.set_setting('identity_type', identity_type)
if verify_ssl is not None:
rax_module.set_setting('verify_ssl', verify_ssl)
if auth_endpoint is not None:
rax_module.set_setting('auth_endpoint', auth_endpoint)
if tenant_id is not None:
rax_module.set_setting('tenant_id', tenant_id)
if tenant_name is not None:
rax_module.set_setting('tenant_name', tenant_name)
try:
username = username or os.environ.get('RAX_USERNAME')
if not username:
username = rax_module.get_setting('keyring_username')
if username:
api_key = 'USE_KEYRING'
if not api_key:
api_key = os.environ.get('RAX_API_KEY')
credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or
os.environ.get('RAX_CREDS_FILE'))
region = (region or os.environ.get('RAX_REGION') or
rax_module.get_setting('region'))
except KeyError as e:
module.fail_json(msg='Unable to load %s' % e.message)
try:
if api_key and username:
if api_key == 'USE_KEYRING':
rax_module.keyring_auth(username, region=region)
else:
rax_module.set_credentials(username, api_key=api_key,
region=region)
elif credentials:
credentials = os.path.expanduser(credentials)
rax_module.set_credential_file(credentials, region=region)
else:
raise Exception('No credentials supplied!')
except Exception as e:
if e.message:
msg = str(e.message)
else:
msg = repr(e)
module.fail_json(msg=msg)
if region_required and region not in rax_module.regions:
module.fail_json(msg='%s is not a valid region, must be one of: %s' %
(region, ','.join(rax_module.regions)))
return rax_module
|
gpl-3.0
|
SciTools/cube_browser
|
lib/cube_browser/explorer.py
|
1
|
15222
|
from collections import OrderedDict
import glob
import os
try:
# Python 3
from urllib.parse import urlparse, parse_qs
except ImportError:
# Python 2
from urlparse import urlparse, parse_qs
import IPython.display
import cartopy.crs as ccrs
import ipywidgets
import iris
import iris.plot as iplt
import matplotlib.pyplot as plt
import traitlets
import cube_browser
# Clear output, such as autosave disable notification.
IPython.display.clear_output()
class FilePicker(object):
"""
File picker widgets.
"""
def __init__(self, initial_value='', default=''):
if initial_value == '':
try:
initial_value = iris.sample_data_path('')
except ValueError:
initial_value = ''
# Define the file system path for input files.
self._path = ipywidgets.Text(
description='Path:',
value=initial_value,
width="100%")
# Observe the path.
self._path.observe(self._handle_path, names='value')
# Use default path value to initialise file options.
options = []
if os.path.exists(self._path.value):
options = glob.glob('{}/*'.format(self._path.value))
options.sort()
default_list = []
for default_value in default.split(','):
if default_value in options:
default_list.append(default_value)
default_tuple = tuple(default_list)
# Defines the files selected to be loaded.
self._files = ipywidgets.SelectMultiple(
description='Files:',
options=OrderedDict([(os.path.basename(f), f)
for f in options]),
value=default_tuple,
width="100%"
)
self.deleter = ipywidgets.Button(description='delete tab',
height='32px', width='75px')
hbox = ipywidgets.HBox(children=[self._files, self.deleter])
self._box = ipywidgets.Box(children=[self._path, hbox], width="100%")
@property
def files(self):
"""The files from the FilePicker."""
return self._files.value
def _handle_path(self, sender):
"""Path box action."""
if os.path.exists(self._path.value):
options = glob.glob('{}/*'.format(self._path.value))
options.sort()
self._files.value = ()
self._files.options = OrderedDict([(os.path.basename(f), f)
for f in options])
else:
self._files.options = OrderedDict()
self._files.width = "100%"
@property
def box(self):
"""The IPywidgets box to display."""
return self._box
class PlotControl(object):
"""Control widgets for a plot."""
def __init__(self):
self.mpl_kwargs = {}
# Defines the cube which is to be plotted.
self.cube_picker = ipywidgets.Dropdown(description='Cubes:',
options=('None', None),
value=None,
width='50%')
# Define the type of cube browser plot required
self.plot_type = ipywidgets.Dropdown(
description='Plot type:',
options={'pcolormesh': cube_browser.Pcolormesh,
'contour': cube_browser.Contour,
'contourf': cube_browser.Contourf},
value=cube_browser.Pcolormesh)
self.x_coord = ipywidgets.Dropdown(
description='X Coord',
options=('None', None))
self.y_coord = ipywidgets.Dropdown(
description='Y Coord',
options=('None', None))
self.cmap = ipywidgets.Text(
description='colour map')
# Handle events:
self.cube_picker.observe(self._handle_cube_selection,
names='value')
self.cmap.observe(self._handle_cmap, names='value')
self.plot_type.observe(self._handle_plot_type, names='value')
self._box = ipywidgets.Box(children=[self.cube_picker,
self.plot_type,
self.x_coord,
self.y_coord,
self.cmap])
def _handle_cube_selection(self, sender):
"""Cube selector action."""
if self.cube_picker.value is not None:
cube = self.cube_picker.cubes[self.cube_picker.value]
options = [('None', None)]
options += [(coord.name(), coord.name()) for coord in
cube.coords(dim_coords=True)]
ndims = cube.ndim
for i in range(ndims):
options.append(('dim{}'.format(i), i))
self.x_coord.options = options
if (cube.coords(axis='X', dim_coords=True) and
cube.coord(axis='X', dim_coords=True).name() in
[o[1] for o in self.x_coord.options]):
default = cube.coord(axis='X', dim_coords=True).name()
self.x_coord.value = default
self.y_coord.options = options
if (cube.coords(axis='Y', dim_coords=True) and
cube.coord(axis='Y', dim_coords=True).name() in
[o[1] for o in self.y_coord.options]):
default = cube.coord(axis='Y', dim_coords=True).name()
self.y_coord.value = default
def _handle_cmap(self, sender):
# This tests that the colour map string is valid: else warns.
from matplotlib.cm import cmap_d
cmap_string = self.cmap.value
if cmap_string and cmap_string in cmap_d.keys():
self.mpl_kwargs['cmap'] = cmap_string
self.cmap.description = 'colour map'
else:
self.cmap.description = 'not a cmap'
def _handle_plot_type(self, sender):
cmap = self.cmap.value
self.mpl_kwargs = {}
if cmap:
self.mpl_kwargs['cmap'] = cmap
@property
def box(self):
"""The IPywidgets box to display."""
return self._box
class Explorer(traitlets.HasTraits):
"""
IPyWidgets and workflow for exploring collections of cubes.
"""
_cubes = traitlets.List()
def __init__(self, url=''):
self.file_pickers = []
if url:
o = urlparse(url)
query = parse_qs(o.query)
pwd, = query.get('pwd', [''])
for fname in query.get('files', []):
self.file_pickers.append(FilePicker(pwd, os.path.join(pwd, fname)))
for fpath in query.get('folders', []):
self.file_pickers.append(FilePicker(fpath))
if not self.file_pickers:
self.file_pickers.append(FilePicker())
# Define load action.
self._load_button = ipywidgets.Button(description="load these files")
self._load_button.on_click(self._handle_load)
self._file_tab_button = ipywidgets.Button(description="add tab")
self._file_tab_button.on_click(self._handle_new_tab)
self._subplots = ipywidgets.RadioButtons(description='subplots',
options=[1, 2])
self._subplots.observe(self._handle_nplots, names='value')
# Plot action button.
self._plot_button = ipywidgets.Button(description="Plot my cube")
self._plot_button.on_click(self._goplot)
# Configure layout of the Explorer.
self._plot_container = ipywidgets.Box()
# Define a Tab container for the main controls in the browse interface.
children = [fp.box for fp in self.file_pickers]
self.ftabs = ipywidgets.Tab(children=children)
children = [self._load_button, self._file_tab_button]
self.bbox = ipywidgets.HBox(children=children)
children = [self.ftabs, self.bbox]
self._file_picker_tabs = ipywidgets.Box(children=children)
# Define the plot controls, start with 1 (self._subplots default)
self.plot_controls = [PlotControl()]
pcc_children = [pc.box for pc in self.plot_controls]
self._plot_control_container = ipywidgets.Tab(children=pcc_children)
self._plot_control_container.set_title(0, 'Plot Axes 0')
# Define an Accordian for files, subplots and plots
acc_children = [self._file_picker_tabs, self._subplots,
self._plot_control_container]
self._accord = ipywidgets.Accordion(children=acc_children)
self._accord.set_title(0, 'Files')
self._accord.set_title(1, 'SubPlots')
self._accord.set_title(2, 'Plots')
# Initialise cubes container
self._cubes = []
# Display the browse interface.
IPython.display.display(self._accord)
IPython.display.display(self._plot_button)
IPython.display.display(self._plot_container)
@property
def mpl_kwargs(self):
"""
The list of dictionaries of matplotlib keyword arguements in use
the PlotControls.
"""
return [pc.mpl_kwargs for pc in self.plot_controls]
@property
def cubes(self):
"""The list of cubes the explorer is currently working with."""
return self._cubes
@cubes.setter
def cubes(self, new_cubes):
"""To update the list of cubes the explorer is working with."""
self._cubes = new_cubes
@traitlets.observe('_cubes')
def update_cubes_list(self, change=None):
"""
Update the list of cubes available in the Explorer.
Assigning an updated list into `cubes` automatically runs this.
"""
# Build options list, using index values into the cube list.
# This avoids the loading of cube's data payload when the
# widget tests equality on selection.
options = [('{}: {}'.format(i, cube.summary(shorten=True)), i)
for i, cube in enumerate(self._cubes)]
for pc in self.plot_controls:
# Provide the cubes list to the cube_picker, to index into.
pc.cube_picker.cubes = self._cubes
pc.cube_picker.options = [('None', None)] + pc.cube_picker.options
pc.cube_picker.value = None
pc.cube_picker.options = [('None', None)] + options
if options:
pc.cube_picker.value = options[0][1]
pc.cube_picker.options = options
def _handle_load(self, sender):
"""Load button action."""
IPython.display.clear_output()
sender.description = 'loading......'
fpfs = [fp.files for fp in self.file_pickers]
selected_files = reduce(list.__add__, (list(files) for files in fpfs))
# Reassigning into self._cubes updates the cube_pickers.
self._cubes = iris.load(selected_files)
self._cubes = self._cubes.concatenate()
sender.description = 'files loaded, reload'
IPython.display.clear_output()
def _handle_new_tab(self, sender):
"""Add new file tab."""
self.file_pickers.append(FilePicker())
self._update_filepickers()
def _update_filepickers(self):
children = [fp.box for fp in self.file_pickers]
for i, child in enumerate(children):
fp.deleter.index = i
fp.deleter.on_click(self._handle_delete_tab)
self.ftabs = ipywidgets.Tab(children=children)
self._file_picker_tabs.children = [self.ftabs, self.bbox]
def _handle_delete_tab(self, sender):
"""remove a file tab"""
self.file_pickers.pop(sender.index)
self._update_filepickers()
def _handle_nplots(self, sender):
if self._subplots.value == 1:
self.plot_controls = [self.plot_controls[0]]
elif self._subplots.value == 2:
self.plot_controls = [self.plot_controls[0], PlotControl()]
pcc_children = [pc.box for pc in self.plot_controls]
self._plot_control_container.children = pcc_children
for i in range(self._subplots.value):
label = 'Plot Axes {}'.format(i)
self._plot_control_container.set_title(i, label)
self.update_cubes_list()
def _goplot(self, sender):
"""Create the cube_browser.Plot2D and cube_browser.Browser"""
IPython.display.clear_output()
fig = plt.figure(figsize=(16, 7))
sub_plots = 110
if self._subplots.value == 2:
sub_plots = 120
confs = []
for spl, pc in enumerate(self.plot_controls):
spl += 1
cube = None
if pc.cube_picker.value is not None:
cube = self.cubes[pc.cube_picker.value]
if cube and spl <= self._subplots.value:
pc_x_name = pc.x_coord.value
pc_y_name = pc.y_coord.value
x_coords = cube.coords(axis='X', dim_coords=True)
if len(x_coords) == 1:
x_name = x_coords[0].name()
else:
x_name = None
y_coords = cube.coords(axis='Y', dim_coords=True)
if len(y_coords) == 1:
y_name = y_coords[0].name()
else:
y_name = None
if x_name == pc_x_name and y_name == pc_y_name:
proj = iplt.default_projection(cube) or ccrs.PlateCarree()
ax = fig.add_subplot(sub_plots + spl, projection=proj)
# If the spatial extent is small, use high-res coastlines
extent = iplt.default_projection_extent(cube)
x0, y0 = ccrs.PlateCarree().transform_point(extent[0],
extent[2],
proj)
x1, y1 = ccrs.PlateCarree().transform_point(extent[1],
extent[3],
proj)
if x1-x0 < 20 and y1-y0 < 20:
ax.coastlines(resolution='10m')
elif x1-x0 < 180 and y1-y0 < 90:
ax.coastlines(resolution='50m')
else:
ax.coastlines()
else:
ax = plt.gca()
ax = fig.add_subplot(sub_plots+spl)
plot_type = pc.plot_type
coords = [pc_x_name, pc_y_name]
confs.append(plot_type.value(cube, ax, coords=coords,
**pc.mpl_kwargs))
title = cube.name().replace('_', ' ').capitalize()
ax.set_title(title)
self.browser = cube_browser.Browser(confs)
self.browser.on_change(None)
# For each PlotControl, assign the plot's mpl_kwargs back to
# that PlotControl.
for pc, plot in zip(self.plot_controls, confs):
pc.mpl_kwargs = plot.kwargs
self._plot_container.children = [self.browser.form]
|
bsd-3-clause
|
meduz/scikit-learn
|
benchmarks/bench_plot_lasso_path.py
|
84
|
4005
|
"""Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
# ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
# ax.legend()
i += 1
plt.show()
|
bsd-3-clause
|
naturali/tensorflow
|
tensorflow/contrib/learn/python/learn/__init__.py
|
4
|
2319
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""High level API for learning with TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import estimators
from tensorflow.contrib.learn.python.learn import graph_actions
from tensorflow.contrib.learn.python.learn import learn_io as io
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors
from tensorflow.contrib.learn.python.learn import ops
from tensorflow.contrib.learn.python.learn import preprocessing
from tensorflow.contrib.learn.python.learn import utils
from tensorflow.contrib.learn.python.learn.dataframe import *
from tensorflow.contrib.learn.python.learn.estimators import *
from tensorflow.contrib.learn.python.learn.evaluable import Evaluable
from tensorflow.contrib.learn.python.learn.experiment import Experiment
from tensorflow.contrib.learn.python.learn.graph_actions import evaluate
from tensorflow.contrib.learn.python.learn.graph_actions import infer
from tensorflow.contrib.learn.python.learn.graph_actions import run_feeds
from tensorflow.contrib.learn.python.learn.graph_actions import run_n
from tensorflow.contrib.learn.python.learn.graph_actions import train
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.contrib.learn.python.learn.monitors import NanLossDuringTrainingError
from tensorflow.contrib.learn.python.learn.trainable import Trainable
# pylint: enable=wildcard-import
|
apache-2.0
|
aaronsw/watchdog
|
vendor/pyExcelerator-0.6.3a/build/lib/pyExcelerator/Worksheet.py
|
3
|
43893
|
#!/usr/bin/env python
# -*- coding: windows-1251 -*-
# Copyright (C) 2005 Roman V. Kiseliov
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# 3. All advertising materials mentioning features or use of this
# software must display the following acknowledgment:
# "This product includes software developed by
# Roman V. Kiseliov <[email protected]>."
#
# 4. Redistributions of any form whatsoever must retain the following
# acknowledgment:
# "This product includes software developed by
# Roman V. Kiseliov <[email protected]>."
#
# THIS SOFTWARE IS PROVIDED BY Roman V. Kiseliov ``AS IS'' AND ANY
# EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Roman V. Kiseliov OR
# ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
'''
BOF
UNCALCED
INDEX
Calculation Settings Block
PRINTHEADERS
PRINTGRIDLINES
GRIDSET
GUTS
DEFAULTROWHEIGHT
WSBOOL
Page Settings Block
Worksheet Protection Block
DEFCOLWIDTH
COLINFO
SORT
DIMENSIONS
Row Blocks
WINDOW2
SCL
PANE
SELECTION
STANDARDWIDTH
MERGEDCELLS
LABELRANGES
PHONETIC
Conditional Formatting Table
Hyperlink Table
Data Validity Table
SHEETLAYOUT (BIFF8X only)
SHEETPROTECTION (BIFF8X only)
RANGEPROTECTION (BIFF8X only)
EOF
'''
__rev_id__ = """$Id: Worksheet.py,v 1.7 2005/08/11 08:53:48 rvk Exp $"""
import BIFFRecords
import Bitmap
import Formatting
import Style
from Deco import *
class Worksheet(object):
from Workbook import Workbook
#################################################################
## Constructor
#################################################################
@accepts(object, (str, unicode), Workbook)
def __init__(self, sheetname, parent_book):
import Row
self.Row = Row.Row
import Column
self.Column = Column.Column
self.__name = sheetname
self.__parent = parent_book
self.__rows = {}
self.__cols = {}
self.__merged_ranges = []
self.__bmp_rec = ''
self.__show_formulas = 0
self.__show_grid = 1
self.__show_headers = 1
self.__panes_frozen = 0
self.__show_empty_as_zero = 1
self.__auto_colour_grid = 1
self.__cols_right_to_left = 0
self.__show_outline = 1
self.__remove_splits = 0
self.__selected = 0
self.__hidden = 0
self.__page_preview = 0
self.__first_visible_row = 0
self.__first_visible_col = 0
self.__grid_colour = 0x40
self.__preview_magn = 0
self.__normal_magn = 0
self.__vert_split_pos = None
self.__horz_split_pos = None
self.__vert_split_first_visible = None
self.__horz_split_first_visible = None
self.__split_active_pane = None
self.__row_gut_width = 0
self.__col_gut_height = 0
self.__show_auto_page_breaks = 1
self.__dialogue_sheet = 0
self.__auto_style_outline = 0
self.__outline_below = 0
self.__outline_right = 0
self.__fit_num_pages = 0
self.__show_row_outline = 1
self.__show_col_outline = 1
self.__alt_expr_eval = 0
self.__alt_formula_entries = 0
self.__row_default_height = 0x00FF
self.__col_default_width = 0x0008
self.__calc_mode = 1
self.__calc_count = 0x0064
self.__RC_ref_mode = 1
self.__iterations_on = 0
self.__delta = 0.001
self.__save_recalc = 0
self.__print_headers = 0
self.__print_grid = 0
self.__grid_set = 1
self.__vert_page_breaks = []
self.__horz_page_breaks = []
self.__header_str = '&P'
self.__footer_str = '&F'
self.__print_centered_vert = 0
self.__print_centered_horz = 1
self.__left_margin = 0.3 #0.5
self.__right_margin = 0.3 #0.5
self.__top_margin = 0.61 #1.0
self.__bottom_margin = 0.37 #1.0
self.__paper_size_code = 9 # A4
self.__print_scaling = 100
self.__start_page_number = 1
self.__fit_width_to_pages = 1
self.__fit_height_to_pages = 1
self.__print_in_rows = 1
self.__portrait = 1
self.__print_not_colour = 0
self.__print_draft = 0
self.__print_notes = 0
self.__print_notes_at_end = 0
self.__print_omit_errors = 0
self.__print_hres = 0x012C # 300 dpi
self.__print_vres = 0x012C # 300 dpi
self.__header_margin = 0.1
self.__footer_margin = 0.1
self.__copies_num = 1
self.__wnd_protect = 0
self.__obj_protect = 0
self.__protect = 0
self.__scen_protect = 0
self.__password = ''
#################################################################
## Properties, "getters", "setters"
#################################################################
@accepts(object, (str, unicode))
def set_name(self, value):
self.__name = value
def get_name(self):
return self.__name
name = property(get_name, set_name)
#################################################################
def get_parent(self):
return self.__parent
parent = property(get_parent)
#################################################################
def get_rows(self):
return self.__rows
rows = property(get_rows)
#################################################################
def get_cols(self):
return self.__cols
cols = property(get_cols)
#################################################################
def get_merged_ranges(self):
return self.__merged_ranges
merged_ranges = property(get_merged_ranges)
#################################################################
def get_bmp_rec(self):
return self.__bmp_rec
bmp_rec = property(get_bmp_rec)
#################################################################
@accepts(object, bool)
def set_show_formulas(self, value):
self.__show_formulas = int(value)
def get_show_formulas(self):
return bool(self.__show_formulas)
show_formulas = property(get_show_formulas, set_show_formulas)
#################################################################
@accepts(object, bool)
def set_show_grid(self, value):
self.__show_grid = int(value)
def get_show_grid(self):
return bool(self.__show_grid)
show_grid = property(get_show_grid, set_show_grid)
#################################################################
@accepts(object, bool)
def set_show_headers(self, value):
self.__show_headers = int(value)
def get_show_headers(self):
return bool(self.__show_headers)
show_headers = property(get_show_headers, set_show_headers)
#################################################################
@accepts(object, bool)
def set_panes_frozen(self, value):
self.__panes_frozen = int(value)
def get_panes_frozen(self):
return bool(self.__panes_frozen)
panes_frozen = property(get_panes_frozen, set_panes_frozen)
#################################################################
@accepts(object, bool)
def set_show_empty_as_zero(self, value):
self.__show_empty_as_zero = int(value)
def get_show_empty_as_zero(self):
return bool(self.__show_empty_as_zero)
show_empty_as_zero = property(get_show_empty_as_zero, set_show_empty_as_zero)
#################################################################
@accepts(object, bool)
def set_auto_colour_grid(self, value):
self.__auto_colour_grid = int(value)
def get_auto_colour_grid(self):
return bool(self.__auto_colour_grid)
auto_colour_grid = property(get_auto_colour_grid, set_auto_colour_grid)
#################################################################
@accepts(object, bool)
def set_cols_right_to_left(self, value):
self.__cols_right_to_left = int(value)
def get_cols_right_to_left(self):
return bool(self.__cols_right_to_left)
cols_right_to_left = property(get_cols_right_to_left, set_cols_right_to_left)
#################################################################
@accepts(object, bool)
def set_show_outline(self, value):
self.__show_outline = int(value)
def get_show_outline(self):
return bool(self.__show_outline)
show_outline = property(get_show_outline, set_show_outline)
#################################################################
@accepts(object, bool)
def set_remove_splits(self, value):
self.__remove_splits = int(value)
def get_remove_splits(self):
return bool(self.__remove_splits)
remove_splits = property(get_remove_splits, set_remove_splits)
#################################################################
@accepts(object, bool)
def set_selected(self, value):
self.__selected = int(value)
def get_selected(self):
return bool(self.__selected)
selected = property(get_selected, set_selected)
#################################################################
@accepts(object, bool)
def set_hidden(self, value):
self.__hidden = int(value)
def get_hidden(self):
return bool(self.__hidden)
hidden = property(get_hidden, set_hidden)
#################################################################
@accepts(object, bool)
def set_page_preview(self, value):
self.__page_preview = int(value)
def get_page_preview(self):
return bool(self.__page_preview)
page_preview = property(get_page_preview, set_page_preview)
#################################################################
@accepts(object, int)
def set_first_visible_row(self, value):
self.__first_visible_row = value
def get_first_visible_row(self):
return self.__first_visible_row
first_visible_row = property(get_first_visible_row, set_first_visible_row)
#################################################################
@accepts(object, int)
def set_first_visible_col(self, value):
self.__first_visible_col = value
def get_first_visible_col(self):
return self.__first_visible_col
first_visible_col = property(get_first_visible_col, set_first_visible_col)
#################################################################
@accepts(object, int)
def set_grid_colour(self, value):
self.__grid_colour = value
def get_grid_colour(self):
return self.__grid_colour
grid_colour = property(get_grid_colour, set_grid_colour)
#################################################################
@accepts(object, int)
def set_preview_magn(self, value):
self.__preview_magn = value
def get_preview_magn(self):
return self.__preview_magn
preview_magn = property(get_preview_magn, set_preview_magn)
#################################################################
@accepts(object, int)
def set_normal_magn(self, value):
self.__normal_magn = value
def get_normal_magn(self):
return self.__normal_magn
normal_magn = property(get_normal_magn, set_normal_magn)
#################################################################
@accepts(object, int)
def set_vert_split_pos(self, value):
self.__vert_split_pos = abs(value)
def get_vert_split_pos(self):
return self.__vert_split_pos
vert_split_pos = property(get_vert_split_pos, set_vert_split_pos)
#################################################################
@accepts(object, int)
def set_horz_split_pos(self, value):
self.__horz_split_pos = abs(value)
def get_horz_split_pos(self):
return self.__horz_split_pos
horz_split_pos = property(get_horz_split_pos, set_horz_split_pos)
#################################################################
@accepts(object, int)
def set_vert_split_first_visible(self, value):
self.__vert_split_first_visible = abs(value)
def get_vert_split_first_visible(self):
return self.__vert_split_first_visible
vert_split_first_visible = property(get_vert_split_first_visible, set_vert_split_first_visible)
#################################################################
@accepts(object, int)
def set_horz_split_first_visible(self, value):
self.__horz_split_first_visible = abs(value)
def get_horz_split_first_visible(self):
return self.__horz_split_first_visible
horz_split_first_visible = property(get_horz_split_first_visible, set_horz_split_first_visible)
#################################################################
#@accepts(object, int)
#def set_split_active_pane(self, value):
# self.__split_active_pane = abs(value) & 0x03
#
#def get_split_active_pane(self):
# return self.__split_active_pane
#
#split_active_pane = property(get_split_active_pane, set_split_active_pane)
#################################################################
#@accepts(object, int)
#def set_row_gut_width(self, value):
# self.__row_gut_width = value
#
#def get_row_gut_width(self):
# return self.__row_gut_width
#
#row_gut_width = property(get_row_gut_width, set_row_gut_width)
#
#################################################################
#
#@accepts(object, int)
#def set_col_gut_height(self, value):
# self.__col_gut_height = value
#
#def get_col_gut_height(self):
# return self.__col_gut_height
#
#col_gut_height = property(get_col_gut_height, set_col_gut_height)
#
#################################################################
@accepts(object, bool)
def set_show_auto_page_breaks(self, value):
self.__show_auto_page_breaks = int(value)
def get_show_auto_page_breaks(self):
return bool(self.__show_auto_page_breaks)
show_auto_page_breaks = property(get_show_auto_page_breaks, set_show_auto_page_breaks)
#################################################################
@accepts(object, bool)
def set_dialogue_sheet(self, value):
self.__dialogue_sheet = int(value)
def get_dialogue_sheet(self):
return bool(self.__dialogue_sheet)
dialogue_sheet = property(get_dialogue_sheet, set_dialogue_sheet)
#################################################################
@accepts(object, bool)
def set_auto_style_outline(self, value):
self.__auto_style_outline = int(value)
def get_auto_style_outline(self):
return bool(self.__auto_style_outline)
auto_style_outline = property(get_auto_style_outline, set_auto_style_outline)
#################################################################
@accepts(object, bool)
def set_outline_below(self, value):
self.__outline_below = int(value)
def get_outline_below(self):
return bool(self.__outline_below)
outline_below = property(get_outline_below, set_outline_below)
#################################################################
@accepts(object, bool)
def set_outline_right(self, value):
self.__outline_right = int(value)
def get_outline_right(self):
return bool(self.__outline_right)
outline_right = property(get_outline_right, set_outline_right)
#################################################################
@accepts(object, int)
def set_fit_num_pages(self, value):
self.__fit_num_pages = value
def get_fit_num_pages(self):
return self.__fit_num_pages
fit_num_pages = property(get_fit_num_pages, set_fit_num_pages)
#################################################################
@accepts(object, bool)
def set_show_row_outline(self, value):
self.__show_row_outline = int(value)
def get_show_row_outline(self):
return bool(self.__show_row_outline)
show_row_outline = property(get_show_row_outline, set_show_row_outline)
#################################################################
@accepts(object, bool)
def set_show_col_outline(self, value):
self.__show_col_outline = int(value)
def get_show_col_outline(self):
return bool(self.__show_col_outline)
show_col_outline = property(get_show_col_outline, set_show_col_outline)
#################################################################
@accepts(object, bool)
def set_alt_expr_eval(self, value):
self.__alt_expr_eval = int(value)
def get_alt_expr_eval(self):
return bool(self.__alt_expr_eval)
alt_expr_eval = property(get_alt_expr_eval, set_alt_expr_eval)
#################################################################
@accepts(object, bool)
def set_alt_formula_entries(self, value):
self.__alt_formula_entries = int(value)
def get_alt_formula_entries(self):
return bool(self.__alt_formula_entries)
alt_formula_entries = property(get_alt_formula_entries, set_alt_formula_entries)
#################################################################
@accepts(object, int)
def set_row_default_height(self, value):
self.__row_default_height = value
def get_row_default_height(self):
return self.__row_default_height
row_default_height = property(get_row_default_height, set_row_default_height)
#################################################################
@accepts(object, int)
def set_col_default_width(self, value):
self.__col_default_width = value
def get_col_default_width(self):
return self.__col_default_width
col_default_width = property(get_col_default_width, set_col_default_width)
#################################################################
@accepts(object, int)
def set_calc_mode(self, value):
self.__calc_mode = value & 0x03
def get_calc_mode(self):
return self.__calc_mode
calc_mode = property(get_calc_mode, set_calc_mode)
#################################################################
@accepts(object, int)
def set_calc_count(self, value):
self.__calc_count = value
def get_calc_count(self):
return self.__calc_count
calc_count = property(get_calc_count, set_calc_count)
#################################################################
@accepts(object, bool)
def set_RC_ref_mode(self, value):
self.__RC_ref_mode = int(value)
def get_RC_ref_mode(self):
return bool(self.__RC_ref_mode)
RC_ref_mode = property(get_RC_ref_mode, set_RC_ref_mode)
#################################################################
@accepts(object, bool)
def set_iterations_on(self, value):
self.__iterations_on = int(value)
def get_iterations_on(self):
return bool(self.__iterations_on)
iterations_on = property(get_iterations_on, set_iterations_on)
#################################################################
@accepts(object, float)
def set_delta(self, value):
self.__delta = value
def get_delta(self):
return self.__delta
delta = property(get_delta, set_delta)
#################################################################
@accepts(object, bool)
def set_save_recalc(self, value):
self.__save_recalc = int(value)
def get_save_recalc(self):
return bool(self.__save_recalc)
save_recalc = property(get_save_recalc, set_save_recalc)
#################################################################
@accepts(object, bool)
def set_print_headers(self, value):
self.__print_headers = int(value)
def get_print_headers(self):
return bool(self.__print_headers)
print_headers = property(get_print_headers, set_print_headers)
#################################################################
@accepts(object, bool)
def set_print_grid(self, value):
self.__print_grid = int(value)
def get_print_grid(self):
return bool(self.__print_grid)
print_grid = property(get_print_grid, set_print_grid)
#################################################################
#
#@accepts(object, bool)
#def set_grid_set(self, value):
# self.__grid_set = int(value)
#
#def get_grid_set(self):
# return bool(self.__grid_set)
#
#grid_set = property(get_grid_set, set_grid_set)
#
#################################################################
@accepts(object, list)
def set_vert_page_breaks(self, value):
self.__vert_page_breaks = value
def get_vert_page_breaks(self):
return self.__vert_page_breaks
vert_page_breaks = property(get_vert_page_breaks, set_vert_page_breaks)
#################################################################
@accepts(object, list)
def set_horz_page_breaks(self, value):
self.__horz_page_breaks = value
def get_horz_page_breaks(self):
return self.__horz_page_breaks
horz_page_breaks = property(get_horz_page_breaks, set_horz_page_breaks)
#################################################################
@accepts(object, (str, unicode))
def set_header_str(self, value):
self.__header_str = value
def get_header_str(self):
return self.__header_str
header_str = property(get_header_str, set_header_str)
#################################################################
@accepts(object, (str, unicode))
def set_footer_str(self, value):
self.__footer_str = value
def get_footer_str(self):
return self.__footer_str
footer_str = property(get_footer_str, set_footer_str)
#################################################################
@accepts(object, bool)
def set_print_centered_vert(self, value):
self.__print_centered_vert = int(value)
def get_print_centered_vert(self):
return bool(self.__print_centered_vert)
print_centered_vert = property(get_print_centered_vert, set_print_centered_vert)
#################################################################
@accepts(object, bool)
def set_print_centered_horz(self, value):
self.__print_centered_horz = int(value)
def get_print_centered_horz(self):
return bool(self.__print_centered_horz)
print_centered_horz = property(get_print_centered_horz, set_print_centered_horz)
#################################################################
@accepts(object, float)
def set_left_margin(self, value):
self.__left_margin = value
def get_left_margin(self):
return self.__left_margin
left_margin = property(get_left_margin, set_left_margin)
#################################################################
@accepts(object, float)
def set_right_margin(self, value):
self.__right_margin = value
def get_right_margin(self):
return self.__right_margin
right_margin = property(get_right_margin, set_right_margin)
#################################################################
@accepts(object, float)
def set_top_margin(self, value):
self.__top_margin = value
def get_top_margin(self):
return self.__top_margin
top_margin = property(get_top_margin, set_top_margin)
#################################################################
@accepts(object, float)
def set_bottom_margin(self, value):
self.__bottom_margin = value
def get_bottom_margin(self):
return self.__bottom_margin
bottom_margin = property(get_bottom_margin, set_bottom_margin)
#################################################################
@accepts(object, int)
def set_paper_size_code(self, value):
self.__paper_size_code = value
def get_paper_size_code(self):
return self.__paper_size_code
paper_size_code = property(get_paper_size_code, set_paper_size_code)
#################################################################
@accepts(object, int)
def set_print_scaling(self, value):
self.__print_scaling = value
def get_print_scaling(self):
return self.__print_scaling
print_scaling = property(get_print_scaling, set_print_scaling)
#################################################################
@accepts(object, int)
def set_start_page_number(self, value):
self.__start_page_number = value
def get_start_page_number(self):
return self.__start_page_number
start_page_number = property(get_start_page_number, set_start_page_number)
#################################################################
@accepts(object, int)
def set_fit_width_to_pages(self, value):
self.__fit_width_to_pages = value
def get_fit_width_to_pages(self):
return self.__fit_width_to_pages
fit_width_to_pages = property(get_fit_width_to_pages, set_fit_width_to_pages)
#################################################################
@accepts(object, int)
def set_fit_height_to_pages(self, value):
self.__fit_height_to_pages = value
def get_fit_height_to_pages(self):
return self.__fit_height_to_pages
fit_height_to_pages = property(get_fit_height_to_pages, set_fit_height_to_pages)
#################################################################
@accepts(object, bool)
def set_print_in_rows(self, value):
self.__print_in_rows = int(value)
def get_print_in_rows(self):
return bool(self.__print_in_rows)
print_in_rows = property(get_print_in_rows, set_print_in_rows)
#################################################################
@accepts(object, bool)
def set_portrait(self, value):
self.__portrait = int(value)
def get_portrait(self):
return bool(self.__portrait)
portrait = property(get_portrait, set_portrait)
#################################################################
@accepts(object, bool)
def set_print_colour(self, value):
self.__print_not_colour = int(not value)
def get_print_colour(self):
return not bool(self.__print_not_colour)
print_colour = property(get_print_colour, set_print_colour)
#################################################################
@accepts(object, bool)
def set_print_draft(self, value):
self.__print_draft = int(value)
def get_print_draft(self):
return bool(self.__print_draft)
print_draft = property(get_print_draft, set_print_draft)
#################################################################
@accepts(object, bool)
def set_print_notes(self, value):
self.__print_notes = int(value)
def get_print_notes(self):
return bool(self.__print_notes)
print_notes = property(get_print_notes, set_print_notes)
#################################################################
@accepts(object, bool)
def set_print_notes_at_end(self, value):
self.__print_notes_at_end = int(value)
def get_print_notes_at_end(self):
return bool(self.__print_notes_at_end)
print_notes_at_end = property(get_print_notes_at_end, set_print_notes_at_end)
#################################################################
@accepts(object, bool)
def set_print_omit_errors(self, value):
self.__print_omit_errors = int(value)
def get_print_omit_errors(self):
return bool(self.__print_omit_errors)
print_omit_errors = property(get_print_omit_errors, set_print_omit_errors)
#################################################################
@accepts(object, int)
def set_print_hres(self, value):
self.__print_hres = value
def get_print_hres(self):
return self.__print_hres
print_hres = property(get_print_hres, set_print_hres)
#################################################################
@accepts(object, int)
def set_print_vres(self, value):
self.__print_vres = value
def get_print_vres(self):
return self.__print_vres
print_vres = property(get_print_vres, set_print_vres)
#################################################################
@accepts(object, float)
def set_header_margin(self, value):
self.__header_margin = value
def get_header_margin(self):
return self.__header_margin
header_margin = property(get_header_margin, set_header_margin)
#################################################################
@accepts(object, float)
def set_footer_margin(self, value):
self.__footer_margin = value
def get_footer_margin(self):
return self.__footer_margin
footer_margin = property(get_footer_margin, set_footer_margin)
#################################################################
@accepts(object, int)
def set_copies_num(self, value):
self.__copies_num = value
def get_copies_num(self):
return self.__copies_num
copies_num = property(get_copies_num, set_copies_num)
##################################################################
@accepts(object, bool)
def set_wnd_protect(self, value):
self.__wnd_protect = int(value)
def get_wnd_protect(self):
return bool(self.__wnd_protect)
wnd_protect = property(get_wnd_protect, set_wnd_protect)
#################################################################
@accepts(object, bool)
def set_obj_protect(self, value):
self.__obj_protect = int(value)
def get_obj_protect(self):
return bool(self.__obj_protect)
obj_protect = property(get_obj_protect, set_obj_protect)
#################################################################
@accepts(object, bool)
def set_protect(self, value):
self.__protect = int(value)
def get_protect(self):
return bool(self.__protect)
protect = property(get_protect, set_protect)
#################################################################
@accepts(object, bool)
def set_scen_protect(self, value):
self.__scen_protect = int(value)
def get_scen_protect(self):
return bool(self.__scen_protect)
scen_protect = property(get_scen_protect, set_scen_protect)
#################################################################
@accepts(object, str)
def set_password(self, value):
self.__password = value
def get_password(self):
return self.__password
password = property(get_password, set_password)
##################################################################
## Methods
##################################################################
def get_parent(self):
return self.__parent
def write(self, r, c, label="", style=Style.XFStyle()):
self.row(r).write(c, label, style)
def merge(self, r1, r2, c1, c2, style=Style.XFStyle()):
self.row(r1).write_blanks(c1, c2, style)
for r in range(r1+1, r2+1):
self.row(r).write_blanks(c1, c2, style)
self.__merged_ranges.append((r1, r2, c1, c2))
def write_merge(self, r1, r2, c1, c2, label="", style=Style.XFStyle()):
self.merge(r1, r2, c1, c2, style)
self.write(r1, c1, label, style)
def insert_bitmap(self, filename, row, col, x = 0, y = 0, scale_x = 1, scale_y = 1):
bmp = Bitmap.ImDataBmpRecord(filename)
obj = Bitmap.ObjBmpRecord(row, col, self, bmp, x, y, scale_x, scale_y)
self.__bmp_rec += obj.get() + bmp.get()
def col(self, indx):
if indx not in self.__cols:
self.__cols[indx] = self.Column(indx, self)
return self.__cols[indx]
def row(self, indx):
if indx not in self.__rows:
self.__rows[indx] = self.Row(indx, self)
return self.__rows[indx]
def row_height(self, row): # in pixels
if row in self.__rows:
return self.__rows[row].get_height_in_pixels()
else:
return 17
def col_width(self, col): # in pixels
#if col in self.__cols:
# return self.__cols[col].width_in_pixels()
#else:
return 64
def get_labels_count(self):
result = 0
for r in self.__rows:
result += self.__rows[r].get_str_count()
return result
##################################################################
## BIFF records generation
##################################################################
def __bof_rec(self):
return BIFFRecords.Biff8BOFRecord(BIFFRecords.Biff8BOFRecord.WORKSHEET).get()
def __guts_rec(self):
row_visible_levels = 0
if len(self.__rows) != 0:
row_visible_levels = max([self.__rows[r].level for r in self.__rows]) + 1
col_visible_levels = 0
if len(self.__cols) != 0:
col_visible_levels = max([self.__cols[c].level for c in self.__cols]) + 1
return BIFFRecords.GutsRecord(self.__row_gut_width, self.__col_gut_height, row_visible_levels, col_visible_levels).get()
def __wsbool_rec(self):
options = 0x00
options |= (self.__show_auto_page_breaks & 0x01) << 0
options |= (self.__dialogue_sheet & 0x01) << 4
options |= (self.__auto_style_outline & 0x01) << 5
options |= (self.__outline_below & 0x01) << 6
options |= (self.__outline_right & 0x01) << 7
options |= (self.__fit_num_pages & 0x01) << 8
options |= (self.__show_row_outline & 0x01) << 10
options |= (self.__show_col_outline & 0x01) << 11
options |= (self.__alt_expr_eval & 0x01) << 14
options |= (self.__alt_formula_entries & 0x01) << 15
return BIFFRecords.WSBoolRecord(options).get()
def __eof_rec(self):
return BIFFRecords.EOFRecord().get()
def __colinfo_rec(self):
result = ''
for col in self.__cols:
result += self.__cols[col].get_biff_record()
return result
def __dimensions_rec(self):
first_used_row = 0
last_used_row = 0
first_used_col = 0
last_used_col = 0
if len(self.__rows) > 0:
first_used_row = min(self.__rows)
last_used_row = max(self.__rows)
first_used_col = 0xFFFFFFFF
last_used_col = 0
for r in self.__rows:
_min = self.__rows[r].get_min_col()
_max = self.__rows[r].get_max_col()
if _min < first_used_col:
first_used_col = _min
if _max > last_used_col:
last_used_col = _max
return BIFFRecords.DimensionsRecord(first_used_row, last_used_row, first_used_col, last_used_col).get()
def __window2_rec(self):
options = 0
options |= (self.__show_formulas & 0x01) << 0
options |= (self.__show_grid & 0x01) << 1
options |= (self.__show_headers & 0x01) << 2
options |= (self.__panes_frozen & 0x01) << 3
options |= (self.__show_empty_as_zero & 0x01) << 4
options |= (self.__auto_colour_grid & 0x01) << 5
options |= (self.__cols_right_to_left & 0x01) << 6
options |= (self.__show_outline & 0x01) << 7
options |= (self.__remove_splits & 0x01) << 8
options |= (self.__selected & 0x01) << 9
options |= (self.__hidden & 0x01) << 10
options |= (self.__page_preview & 0x01) << 11
return BIFFRecords.Window2Record(options, self.__first_visible_row, self.__first_visible_col,
self.__grid_colour,
self.__preview_magn, self.__normal_magn).get()
def __panes_rec(self):
if self.__vert_split_pos is None and self.__horz_split_pos is None:
return ""
if self.__vert_split_pos is None:
self.__vert_split_pos = 0
if self.__horz_split_pos is None:
self.__horz_split_pos = 0
if self.__panes_frozen:
if self.__vert_split_first_visible is None:
self.__vert_split_first_visible = self.__vert_split_pos
if self.__horz_split_first_visible is None:
self.__horz_split_first_visible = self.__horz_split_pos
else:
if self.__vert_split_first_visible is None:
self.__vert_split_first_visible = 0
if self.__horz_split_first_visible is None:
self.__horz_split_first_visible = 0
# inspired by pyXLWriter
self.__horz_split_pos = 20*self.__horz_split_pos + 255
self.__vert_split_pos = 113.879*self.__vert_split_pos + 390
if self.__vert_split_pos > 0 and self.__horz_split_pos > 0:
self.__split_active_pane = 0
elif self.__vert_split_pos > 0 and self.__horz_split_pos == 0:
self.__split_active_pane = 1
elif self.__vert_split_pos == 0 and self.__horz_split_pos > 0:
self.__split_active_pane = 2
else:
self.__split_active_pane = 3
result = BIFFRecords.PanesRecord(self.__vert_split_pos,
self.__horz_split_pos,
self.__horz_split_first_visible,
self.__vert_split_first_visible,
self.__split_active_pane).get()
return result
def __row_blocks_rec(self):
# this function takes almost 99% of overall execution time
# when file is saved
# return ''
result = []
i = 0
used_rows = self.__rows.keys()
while i < len(used_rows):
j = 0
while i < len(used_rows) and (j < 32):
result.append(self.__rows[used_rows[i]].get_row_biff_data())
result.append(self.__rows[used_rows[i]].get_cells_biff_data())
j += 1
i += 1
return ''.join(result)
def __merged_rec(self):
return BIFFRecords.MergedCellsRecord(self.__merged_ranges).get()
def __bitmaps_rec(self):
return self.__bmp_rec
def __calc_settings_rec(self):
result = ''
result += BIFFRecords.CalcModeRecord(self.__calc_mode & 0x01).get()
result += BIFFRecords.CalcCountRecord(self.__calc_count & 0xFFFF).get()
result += BIFFRecords.RefModeRecord(self.__RC_ref_mode & 0x01).get()
result += BIFFRecords.IterationRecord(self.__iterations_on & 0x01).get()
result += BIFFRecords.DeltaRecord(self.__delta).get()
result += BIFFRecords.SaveRecalcRecord(self.__save_recalc & 0x01).get()
return result
def __print_settings_rec(self):
result = ''
result += BIFFRecords.PrintHeadersRecord(self.__print_headers).get()
result += BIFFRecords.PrintGridLinesRecord(self.__print_grid).get()
result += BIFFRecords.GridSetRecord(self.__grid_set).get()
result += BIFFRecords.HorizontalPageBreaksRecord(self.__horz_page_breaks).get()
result += BIFFRecords.VerticalPageBreaksRecord(self.__vert_page_breaks).get()
result += BIFFRecords.HeaderRecord(self.__header_str).get()
result += BIFFRecords.FooterRecord(self.__footer_str).get()
result += BIFFRecords.HCenterRecord(self.__print_centered_horz).get()
result += BIFFRecords.VCenterRecord(self.__print_centered_vert).get()
result += BIFFRecords.LeftMarginRecord(self.__left_margin).get()
result += BIFFRecords.RightMarginRecord(self.__right_margin).get()
result += BIFFRecords.TopMarginRecord(self.__top_margin).get()
result += BIFFRecords.BottomMarginRecord(self.__bottom_margin).get()
setup_page_options = (self.__print_in_rows & 0x01) << 0
setup_page_options |= (self.__portrait & 0x01) << 1
setup_page_options |= (0x00 & 0x01) << 2
setup_page_options |= (self.__print_not_colour & 0x01) << 3
setup_page_options |= (self.__print_draft & 0x01) << 4
setup_page_options |= (self.__print_notes & 0x01) << 5
setup_page_options |= (0x00 & 0x01) << 6
setup_page_options |= (0x01 & 0x01) << 7
setup_page_options |= (self.__print_notes_at_end & 0x01) << 9
setup_page_options |= (self.__print_omit_errors & 0x03) << 10
result += BIFFRecords.SetupPageRecord(self.__paper_size_code,
self.__print_scaling,
self.__start_page_number,
self.__fit_width_to_pages,
self.__fit_height_to_pages,
setup_page_options,
self.__print_hres,
self.__print_vres,
self.__header_margin,
self.__footer_margin,
self.__copies_num).get()
return result
def __protection_rec(self):
result = ''
result += BIFFRecords.ProtectRecord(self.__protect).get()
result += BIFFRecords.ScenProtectRecord(self.__scen_protect).get()
result += BIFFRecords.WindowProtectRecord(self.__wnd_protect).get()
result += BIFFRecords.ObjectProtectRecord(self.__obj_protect).get()
result += BIFFRecords.PasswordRecord(self.__password).get()
return result
def get_biff_data(self):
result = ''
result += self.__bof_rec()
result += self.__calc_settings_rec()
result += self.__guts_rec()
result += self.__wsbool_rec()
result += self.__colinfo_rec()
result += self.__dimensions_rec()
result += self.__print_settings_rec()
result += self.__protection_rec()
result += self.__row_blocks_rec()
result += self.__merged_rec()
result += self.__bitmaps_rec()
result += self.__window2_rec()
result += self.__panes_rec()
result += self.__eof_rec()
return result
|
agpl-3.0
|
renegelinas/mi-instrument
|
mi/instrument/ooici/mi/test_driver/driver.py
|
7
|
21975
|
# """
# @package mi.instrument.ooici.mi.test_driver.driver
# @file marine-integrations/mi/instrument/ooici/mi/test_driver/driver.py
# @author Bill French
# @brief Driver for the test_driver
# Release notes:
#
# This driver is used for coi testing
#
# """
#
# __author__ = 'Bill French'
# __license__ = 'Apache 2.0'
#
# import string
# import time
# import random
# import struct
# import base64
#
# from mi.core.log import get_logger ; log = get_logger()
#
# from threading import Thread
#
# from mi.core.time_tools import time_to_ntp_date_time
#
# from mi.core.common import BaseEnum
# from mi.core.instrument.data_particle import RawDataParticle
# from mi.core.instrument.data_particle import RawDataParticleKey
# from mi.core.instrument.instrument_protocol import CommandResponseInstrumentProtocol
# from mi.core.instrument.instrument_fsm import InstrumentFSM
# from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver
# from mi.core.instrument.instrument_driver import DriverEvent
# from mi.core.instrument.instrument_driver import DriverAsyncEvent
# from mi.core.instrument.instrument_driver import DriverProtocolState
# from mi.core.instrument.instrument_driver import DriverParameter
# from mi.core.instrument.instrument_driver import ResourceAgentState
# from mi.core.instrument.instrument_driver import DriverConnectionState
# from mi.core.instrument.data_particle import DataParticle
# from mi.core.instrument.data_particle import DataParticleKey
# from mi.core.instrument.data_particle import CommonDataParticleType
# from mi.core.instrument.chunker import StringChunker
# from mi.core.instrument.protocol_param_dict import ParameterDictType
# from mi.core.instrument.protocol_param_dict import Parameter
# from mi.core.exceptions import InstrumentParameterException
#
# # newline.
# NEWLINE = '\r\n'
#
# # default timeout.
# TIMEOUT = 10
#
# ###
# # Driver Constant Definitions
# ###
#
# class DataParticleType(BaseEnum):
# """
# Data particle types produced by this driver
# """
# RAW = CommonDataParticleType.RAW
#
# class ProtocolState(BaseEnum):
# """
# Instrument protocol states
# """
# UNKNOWN = DriverProtocolState.UNKNOWN
# COMMAND = DriverProtocolState.COMMAND
# AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
# DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
# TEST = DriverProtocolState.TEST
# CALIBRATE = DriverProtocolState.CALIBRATE
#
# class ProtocolEvent(BaseEnum):
# """
# Protocol events
# """
# ENTER = DriverEvent.ENTER
# EXIT = DriverEvent.EXIT
# GET = DriverEvent.GET
# SET = DriverEvent.SET
# DISCOVER = DriverEvent.DISCOVER
# START_DIRECT = DriverEvent.START_DIRECT
# STOP_DIRECT = DriverEvent.STOP_DIRECT
# ACQUIRE_SAMPLE = DriverEvent.ACQUIRE_SAMPLE
# START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
# STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
# EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
# INIT_PARAMS = DriverEvent.INIT_PARAMS
#
# class Capability(BaseEnum):
# """
# Protocol events that should be exposed to users (subset of above).
# """
# START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
# STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
#
# class ParameterName(DriverParameter):
# """
# Device specific parameters.
# """
# PAYLOAD_SIZE = 'PAYLOAD_SIZE'
# SAMPLE_INTERVAL = 'SAMPLE_INTERVAL'
#
# class Prompt(BaseEnum):
# """
# Device i/o prompts..
# """
#
# class InstrumentCommand(BaseEnum):
# """
# Instrument command strings
# """
#
#
# ###############################################################################
# # Data Particles
# ###############################################################################
# class TestDataParticle(RawDataParticle):
# def _build_parsed_values(self):
# """
# Build a particle out of a port agent packet.
# @returns A list that is ready to be added to the "values" tag before
# the structure is JSONified
# """
#
# payload = base64.b64encode(self.raw_data)
# length = len(self.raw_data)
# checksum = 1
# ptype = 1
#
# result = [{
# DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,
# DataParticleKey.VALUE: payload,
# DataParticleKey.BINARY: True},
# {
# DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,
# DataParticleKey.VALUE: length},
# {
# DataParticleKey.VALUE_ID: RawDataParticleKey.TYPE,
# DataParticleKey.VALUE: ptype},
# {
# DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,
# DataParticleKey.VALUE: checksum},
#
# ]
#
# return result
#
#
# ###############################################################################
# # Driver
# ###############################################################################
#
# class InstrumentDriver(SingleConnectionInstrumentDriver):
# """
# InstrumentDriver subclass
# Subclasses SingleConnectionInstrumentDriver with connection state
# machine.
# """
#
# ########################################################################
# # Protocol builder.
# ########################################################################
#
# def _build_protocol(self):
# """
# Construct the driver protocol state machine.
# """
# self._protocol = Protocol(Prompt, NEWLINE, self._driver_event)
#
# ########################################################################
# # Connected handlers.
# ########################################################################
#
# def _handler_connected_disconnect(self, *args, **kwargs):
# """
# Disconnect to the device via port agent / logger and destroy the
# protocol FSM.
# @retval (next_state, result) tuple, (DriverConnectionState.DISCONNECTED,
# None) if successful.
# """
# next_state = None
# result = None
#
# log.info("_handler_connected_disconnect: invoking stop_comms().")
# self._protocol = None
# next_state = DriverConnectionState.DISCONNECTED
#
# return (next_state, result)
#
# ########################################################################
# # Disconnected handlers.
# ########################################################################
#
# def _handler_disconnected_configure(self, *args, **kwargs):
# """
# Configure driver for device comms.
# @param args[0] Communiations config dictionary.
# @retval (next_state, result) tuple, (None, None).
# @raises InstrumentParameterException if missing or invalid param dict.
# """
# next_state = None
# result = None
#
# return (next_state, result)
#
# def _handler_disconnected_connect(self, *args, **kwargs):
# """
# Establish communications with the device via port agent / logger and
# construct and intialize a protocol FSM for device interaction.
# @retval (next_state, result) tuple, (DriverConnectionState.CONNECTED,
# None) if successful.
# @raises InstrumentConnectionException if the attempt to connect failed.
# """
# log.debug("_handler_disconnected_connect. Mocked")
# next_state = DriverConnectionState.CONNECTED
# result = None
#
# self._build_protocol()
#
# return (next_state, result)
#
#
# ###########################################################################
# # Protocol
# ###########################################################################
#
# class Protocol(CommandResponseInstrumentProtocol):
# """
# Instrument protocol class
# Subclasses CommandResponseInstrumentProtocol
# """
# def __init__(self, prompts, newline, driver_event):
# """
# Protocol constructor.
# @param prompts A BaseEnum class containing instrument prompts.
# @param newline The newline.
# @param driver_event Driver process event callback.
# """
# # Construct protocol superclass.
# CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)
#
# # Build protocol state machine.
# self._protocol_fsm = InstrumentFSM(ProtocolState, ProtocolEvent,
# ProtocolEvent.ENTER, ProtocolEvent.EXIT)
#
# # Add event handlers for protocol state machine.
# self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.ENTER, self._handler_unknown_enter)
# self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.EXIT, self._handler_unknown_exit)
# self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.DISCOVER, self._handler_unknown_discover)
# self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.START_DIRECT, self._handler_command_start_direct)
#
# self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ENTER, self._handler_command_enter)
# self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.EXIT, self._handler_command_exit)
# self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.GET, self._handler_get)
# self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.SET, self._handler_command_set)
# self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_AUTOSAMPLE, self._handler_command_autosample_start)
# self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.INIT_PARAMS, self._handler_command_init_params)
#
# self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.ENTER, self._handler_autosample_enter)
# self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.EXIT, self._handler_autosample_exit)
# self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.STOP_AUTOSAMPLE, self._handler_autosample_stop)
#
# self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.ENTER, self._handler_direct_access_enter)
# self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXIT, self._handler_direct_access_exit)
# self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.STOP_DIRECT, self._handler_direct_access_stop_direct)
# self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXECUTE_DIRECT, self._handler_direct_access_execute_direct)
#
# # Construct the parameter dictionary containing device parameters,
# # current parameter values, and set formatting functions.
# self._build_param_dict()
#
# # Add build handlers for device commands.
#
# # Add response handlers for device commands.
#
# # Add sample handlers.
#
# # State state machine in UNKNOWN state.
# self._protocol_fsm.start(ProtocolState.UNKNOWN)
#
# # commands sent sent to device to be filtered in responses for telnet DA
# self._sent_cmds = []
#
# #
# self._chunker = StringChunker(Protocol.sieve_function)
#
# self._payload_cache = {}
#
#
# @staticmethod
# def sieve_function(raw_data):
# """
# The method that splits samples
# """
#
# return_list = []
#
# return return_list
#
# def _build_param_dict(self):
# """
# Populate the parameter dictionary with parameters.
# For each parameter key, add match stirng, match lambda function,
# and value formatting function for set commands.
# """
# # Add parameter handlers to parameter dict.
# self._param_dict.add_parameter(
# Parameter(ParameterName.PAYLOAD_SIZE,
# int,
# type=ParameterDictType.INT,
# display_name="Payload Size",
# startup_param = True,
# direct_access = True,
# default_value = 1024)
# )
# self._param_dict.add_parameter(
# Parameter(ParameterName.SAMPLE_INTERVAL,
# int,
# type=ParameterDictType.INT,
# display_name="Sample Interval (sec)",
# startup_param = True,
# direct_access = True,
# default_value = 1)
# )
#
# def _got_chunk(self, chunk):
# """
# The base class got_data has gotten a chunk from the chunker. Pass it to extract_sample
# with the appropriate particle objects and REGEXes.
# """
#
# def _filter_capabilities(self, events):
# """
# Return a list of currently available capabilities.
# """
# return [x for x in events if Capability.has(x)]
#
# ########################################################################
# # Unknown handlers.
# ########################################################################
#
# def _handler_unknown_enter(self, *args, **kwargs):
# """
# Enter unknown state.
# """
# # Tell driver superclass to send a state change event.
# # Superclass will query the state.
# self._driver_event(DriverAsyncEvent.STATE_CHANGE)
#
# def _handler_unknown_exit(self, *args, **kwargs):
# """
# Exit unknown state.
# """
# pass
#
# def _handler_unknown_discover(self, *args, **kwargs):
# """
# Discover current state
# @retval (next_state, result)
# """
# return (ProtocolState.COMMAND, ResourceAgentState.IDLE)
#
# ########################################################################
# # Command handlers.
# ########################################################################
#
# def _handler_command_enter(self, *args, **kwargs):
# """
# Enter command state.
# @throws InstrumentTimeoutException if the device cannot be woken.
# @throws InstrumentProtocolException if the update commands and not recognized.
# """
# self._protocol_fsm.on_event(DriverEvent.INIT_PARAMS)
#
# # Tell driver superclass to send a state change event.
# # Superclass will query the state.
# self._driver_event(DriverAsyncEvent.STATE_CHANGE)
#
# def _handler_command_set(self, *args, **kwargs):
# """
# Set parameter
# """
# next_state = None
# result = None
#
# self._set_params(*args, **kwargs)
#
# log.debug("_handler_command_set: result: %s", result)
#
# return (next_state, result)
#
# def _handler_command_exit(self, *args, **kwargs):
# """
# Exit command state.
# """
# pass
#
# def _handler_command_start_direct(self):
# """
# Start direct access
# """
# next_state = ProtocolState.DIRECT_ACCESS
# next_agent_state = ResourceAgentState.DIRECT_ACCESS
# result = None
# log.debug("_handler_command_start_direct: entering DA mode")
# return (next_state, (next_agent_state, result))
#
# def _handler_command_autosample_start(self, *args, **kwargs):
# next_state = ProtocolState.AUTOSAMPLE
# next_agent_state = ResourceAgentState.STREAMING
# result = None
# return (next_state, (next_agent_state, result))
#
# def _handler_command_init_params(self, *args, **kwargs):
# """
# initialize parameters
# """
# next_state = None
# result = None
#
# self._init_params()
# return (next_state, result)
#
# ########################################################################
# # Autosample handlers.
# ########################################################################
#
# def _handler_autosample_enter(self, *args, **kwargs):
# """
# Enter autosample state.
# """
# # Tell driver superclass to send a state change event.
# # Superclass will query the state.
# self._driver_event(DriverAsyncEvent.STATE_CHANGE)
#
# self._start_packet_generator()
#
# def _handler_autosample_exit(self, *args, **kwargs):
# """
# Exit autosample state.
# """
# self._stop_packet_generator()
#
# def _handler_autosample_stop(self, *args, **kwargs):
# """
# Stop autosample and switch back to command mode.
# @retval (next_state, result) tuple, (ProtocolState.COMMAND,
# (next_agent_state, None) if successful.
# @throws InstrumentTimeoutException if device cannot be woken for command.
# @throws InstrumentProtocolException if command misunderstood or
# incorrect prompt received.
# """
# next_state = None
# result = None
#
# next_state = ProtocolState.COMMAND
# next_agent_state = ResourceAgentState.COMMAND
#
# return (next_state, (next_agent_state, result))
#
# ########################################################################
# # Direct access handlers.
# ########################################################################
#
# def _handler_direct_access_enter(self, *args, **kwargs):
# """
# Enter direct access state.
# """
# # Tell driver superclass to send a state change event.
# # Superclass will query the state.
# self._driver_event(DriverAsyncEvent.STATE_CHANGE)
#
# self._sent_cmds = []
#
# def _handler_direct_access_exit(self, *args, **kwargs):
# """
# Exit direct access state.
# """
# pass
#
# def _handler_direct_access_execute_direct(self, data):
# """
# """
# next_state = None
# result = None
# next_agent_state = None
#
# self._do_cmd_direct(data)
#
# # add sent command to list for 'echo' filtering in callback
# self._sent_cmds.append(data)
#
# return (next_state, (next_agent_state, result))
#
# def _handler_direct_access_stop_direct(self):
# """
# @throw InstrumentProtocolException on invalid command
# """
# next_state = None
# result = None
#
# next_state = ProtocolState.COMMAND
# next_agent_state = ResourceAgentState.COMMAND
#
# return (next_state, (next_agent_state, result))
#
#
# ########################################################################
# # Helpers
# ########################################################################
# def _start_packet_generator(self):
# packet_size = self._param_dict.get(ParameterName.PAYLOAD_SIZE)
# sample_interval = self._param_dict.get(ParameterName.SAMPLE_INTERVAL)
#
# self._generate_payload_value(packet_size)
#
# self._stop_generator_thread = False
# self._generator_thread = Thread(
# target=self._generate_packets,
# args=(packet_size, sample_interval, self._publish_packet ))
# self._generator_thread.start()
#
# def _generate_packets(self, *args, **kwargs):
# packet_size = args[0]
# sample_interval = args[1]
# publish_callback = args[2]
#
# log.debug("_generate_packets, starting packet generator. packet_size: %s, sample_interval: %s", packet_size, sample_interval)
#
# while(self._stop_generator_thread != True):
# publish_callback(packet_size)
# time.sleep(sample_interval)
#
# log.debug("_generate_packets, stopping packet generator")
#
# def _publish_packet(self, packet_size):
# buf = self._get_payload_value(packet_size)
# particle = TestDataParticle(buf, port_timestamp=time_to_ntp_date_time())
#
# log.debug("_publish_packet, packet size: %d", len(buf))
# self._driver_event(DriverAsyncEvent.SAMPLE, particle.generate())
#
# def _get_payload_value(self, packet_size):
# if self._payload_cache.get(packet_size):
# return self._payload_cache[packet_size]
#
# return self._generate_payload_value(packet_size)
#
# def _generate_payload_value(self, packet_size):
# log.debug("generating new value, packet size: %s", packet_size)
# charlist = [random.choice(string.letters) for _ in range(packet_size)]
# buf = struct.pack('%sc' % len(charlist), *charlist)
# self._payload_cache[packet_size] = buf
# return buf
#
# def _stop_packet_generator(self):
# log.debug("_stop_packet_generator: Signal the packet generator to stop")
# self._stop_generator_thread = True
#
# self._generator_thread.join(60)
#
# def _set_params(self, *args, **kwargs):
# """
# Issue commands to the instrument to set various parameters
# """
# startup = False
# config_change = False
#
# result = {}
#
# try:
# params = args[0]
# except IndexError:
# raise InstrumentParameterException('Set command requires a parameter dict.')
#
# try:
# startup = args[1]
# except IndexError:
# pass
#
# for (key, val) in params.iteritems():
# log.debug("KEY = " + str(key) + " VALUE = " + str(val))
# if self._param_dict.get(key) != val:
# config_change = True
# self._param_dict.set_value(key, val)
# result[key] = val
#
# if config_change:
# self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
#
# return result
|
bsd-2-clause
|
sputnick-dev/weboob
|
modules/attilasub/test.py
|
7
|
1310
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
import urllib
from random import choice
class AttilasubTest(BackendTest):
MODULE = 'attilasub'
def test_subtitle(self):
subtitles = list(self.backend.iter_subtitles('fr', 'spiderman'))
assert (len(subtitles) > 0)
for subtitle in subtitles:
path, qs = urllib.splitquery(subtitle.url)
assert path.endswith('.rar')
# get the file of a random sub
if len(subtitles):
subtitle = choice(subtitles)
self.backend.get_subtitle_file(subtitle.id)
|
agpl-3.0
|
chwiede/pyads
|
pyads/adsclient.py
|
1
|
6764
|
import time
import select
import socket
import struct
import threading
import errno
from .amspacket import AmsPacket
from .adsconnection import AdsConnection
from .adsexception import AdsException
from .commands import *
class InvalidPacket(AdsException):
pass
class AdsClient:
def __init__(self, adsConnection = None, amsTarget = None, amsSource = None, targetIP = None):
if adsConnection != None and amsTarget == None and amsSource == None:
self.AdsConnection = adsConnection
elif amsTarget != None and adsConnection == None:
self.AdsConnection = AdsConnection(amsTarget, amsSource, targetIP)
else:
raise Exception('You must specify either connection or adsTarget, not both.')
self.response = b''
MAX_RETRY_ON_FAIL = 3
Debug = False
RetryOnFail = 0
AdsConnection = None
AdsPortDefault = 0xBF02
AdsIndexGroupIn = 0xF020
AdsIndexGroupOut = 0xF030
AdsChunkSizeDefault = 1024
Socket = None
_CurrentInvokeID = 0x8000
_CurrentPacket = None
_CurrentError = None
@property
def IsConnected(self):
return self.Socket != None and self.Socket.fileno() >= 0
def Close(self):
if (self.Socket != None):
self.Socket.close()
self.Socket = None
def Connect(self):
self.Close()
self.Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.Socket.settimeout(2)
try:
self.Socket.connect((self.AdsConnection.TargetIP, self.AdsPortDefault))
self._BeginAsyncRead()
except socket.error:
raise AdsException(0x274c)
def _BeginAsyncRead(self):
self._AsyncReadThread = threading.Thread(target=self._AsyncRead)
self._AsyncReadThread.start()
def _AsyncRead(self):
while self.IsConnected:
try:
ready = select.select([self.Socket], [], [], 0.1)
if ready[0] and self.IsConnected:
newPacket = self.ReadAmsPacketFromSocket()
if newPacket.InvokeID == self._CurrentInvokeID:
self._CurrentPacket = newPacket
else:
print("Packet dropped:")
print(newPacket)
except (socket.error, select.error, InvalidPacket) as e:
self.Close()
self._CurrentError = e
break
def ReadAmsPacketFromSocket(self):
# generate packet from cache, or read more data from recv buffer.
if len(self.response) == 0:
response = self.Socket.recv(self.AdsChunkSizeDefault)
else:
response = self.response
# ensure correct beckhoff tcp header
if(len(response) < 6):
raise InvalidPacket('Invalid packet received')
# first two bits must be 0
if (response[0:2] != b'\x00\x00'):
raise InvalidPacket('Invalid packet received')
# read whole data length
dataLen = struct.unpack('I', response[2:6])[0] + 6
# read rest of data, if any
while (len(response) < dataLen):
nextReadLen = min(self.AdsChunkSizeDefault, dataLen - len(response))
response += self.Socket.recv(nextReadLen)
# cut off tcp-header and return response amspacket
packet = AmsPacket.FromBinaryData(response[6:dataLen])
self.response = response[dataLen:]
return packet
def GetTcpHeader(self, amsData):
# pack 2 bytes (reserved) and 4 bytes (length)
# format _must_ be little endian!
return struct.pack('<HI', 0, len(amsData))
def SendAndRecv(self, amspacket):
if not self.IsConnected:
self.Connect()
# prepare packet with invoke id
self.PrepareCommandInvoke(amspacket)
# send tcp-header and ams-data
try:
self.Socket.send(self.GetTCPPacket(amspacket))
except socket.error as e:
# if i fail Socket.send i try again for 3 times
if self.RetryOnFail < self.MAX_RETRY_ON_FAIL:
self.RetryOnFail += 1
# if i have a BROKEN PIPE error i reconnect
# the socket before try again
if e.errno == errno.EPIPE:
self.Connect()
return self.SendAndRecv(amspacket)
else:
self.RetryOnFail = 0
raise AdsException(0x274c)
# here's your packet
return self.AwaitCommandInvoke()
def GetTCPPacket(self, amspacket):
# get ams-data and generate tcp-header
amsData = amspacket.GetBinaryData()
tcpHeader = self.GetTcpHeader(amsData)
return tcpHeader + amsData
def PrepareCommandInvoke(self, amspacket):
if(self._CurrentInvokeID < 0xFFFF):
self._CurrentInvokeID += 1
else:
self._CurrentInvokeID = 0x8000
self._CurrentPacket = None
self._CurrentError = None
amspacket.InvokeID = self._CurrentInvokeID
if self.Debug:
print(">>> sending ams-packet:")
print(amspacket)
def AwaitCommandInvoke(self):
# unfortunately threading.event is slower than this oldschool poll :-(
timeout = 0
while (self._CurrentPacket == None):
if self._CurrentError:
raise self._CurrentError
timeout += 0.001
time.sleep(0.001)
if (timeout > 10):
raise AdsException(0x745)
if self.Debug:
print("<<< received ams-packet:")
print(self._CurrentPacket)
return self._CurrentPacket
def ReadDeviceInfo(self):
return DeviceInfoCommand().Execute(self)
def Read(self, indexGroup, indexOffset, length):
return ReadCommand(indexGroup, indexOffset, length).Execute(self)
def Write(self, indexGroup, indexOffset, data):
return WriteCommand(indexGroup, indexOffset, data).Execute(self)
def ReadState(self):
return ReadStateCommand().Execute(self)
def WriteControl(self, adsState, deviceState, data = b''):
return WriteControlCommand(adsState, deviceState, data).Execute(self)
def AddDeviceNotification(self):
raise NotImplementedError()
def DeleteDeviceNotification(self):
raise NotImplementedError()
def ReadWrite(self, indexGroup, indexOffset, readLen, dataToWrite = b''):
return ReadWriteCommand(indexGroup, indexOffset, readLen, dataToWrite).Execute(self)
def __enter__(self):
return self
def __exit__(self, vtype, value, traceback):
self.Close()
|
mit
|
SirPigles/rsf
|
forums/test/python/test.py
|
7
|
1204
|
import sys
import os
import re
def print_environ(environ=os.environ):
"""Dump the shell environment as HTML."""
keys = environ.keys()
keys.sort()
i = 0
for key in keys:
if not re.search("^HTTP_|^REQUEST_", key):
continue
if i == 0:
print """<tr class="normal"><td>""", escape(key), "</td><td>", escape(environ[key]), "</td></tr>"
i = 1
else:
print """<tr class="alt"><td>""", escape(key), "</td><td>", escape(environ[key]), "</td></tr>"
i = 0
def escape(s, quote=None):
"""Replace special characters '&', '<' and '>' by SGML entities."""
s = s.replace("&", "&") # Must be done first!
s = s.replace("<", "<")
s = s.replace(">", ">")
if quote:
s = s.replace('"', """)
return s
print """Content-type: text/html
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<title></title>
<link rel="stylesheet" type="text/css" href="../../css/style.css">
</head>
<body>
<table cellspacing="0" cellpadding="0" border="0">
<tr class="subhead" align="Left"><th>Name</th><th>Value</th></tr>"""
print_environ()
print """</table>
</body>
</html>"""
|
apache-2.0
|
OMNILab/CCFBDC2014
|
data_process/feature_spatio-temporal/feature_spatio/feature_spatio.py
|
1
|
2551
|
# -*- coding: utf-8 -*-
import sys
import os
import math
#读写文件接口函数文件函数
def read_write_file(rfname,wfname,resource_files):
readf = open(rfname,'r')
writef = open(wfname,'w')
for line in readf:
#add other functions to deal with each line
result_line = location_extr(line.strip(),resource_files)
writef.write(result_line + '\n')
writef.close()
readf.close()
#由值查主键,输入一个单词,如果查到返回主键名称,没有返回0
def value_to_key(value,resourcef_name):
readf = open(resourcef_name)
result = 0
for line in readf:
line_split = line.split('\t')
key = line_split[0]
values = line_split[1]
if values.find(value)==-1:
continue
else:
result = key
break
readf.close()
return result
#地点提取函数,先在标题里面找,如果没有再在内容里面找,从频次高的到频次低的找,找到即标注地点,输出“地区、省、市”这样格式的信息
def location_extr(record,resource_files):
record_split = record.split('\t')
event_id = record_split[0]
title_loc = record_split[1].strip('"')
content_loc = record_split[2].strip('"')
basic_info = event_id + '\t'
line_rslt = event_id + '' + '' + ''
province = ''
area = ''
title_loc_dealed = content_loc_match(title_loc,resource_files)
if title_loc_dealed!=0:
return basic_info + title_loc_dealed
content_loc_dealed = content_loc_match(content_loc,resource_files)
if content_loc_dealed!=0:
return basic_info + content_loc_dealed
return line_rslt
#内容与地点库匹配函数
def content_loc_match(content_loc,resource_files):
resource_files_split = resource_files.split('\t')
prov_city_map = resource_files_split[0]
area_prov_map = resource_files_split[1]
if content_loc!='':
content_loc_split = content_loc.split(' ')
#map the cities
for num in range(len(content_loc_split)):
content_city_name = content_loc_split[num].split(':')[0]
province = value_to_key(content_city_name,prov_city_map)
if(province!=0):
area = value_to_key(province,area_prov_map)
if content_city_name.find("市")==-1:
content_city_name = content_city_name + "市"
line_rslt = area + '\t' + province + '\t' + content_city_name
return line_rslt
#if there arenn't cities, map provinces
for num in range(len(content_loc_split)):
content_province_name = content_loc_split[num].split(':')[0]
area = value_to_key(content_province_name,area_prov_map)
if(area!=0):
line_rslt = area + '\t' + content_province_name
return line_rslt
return 0
|
mit
|
Axelio/pruebas_camelot
|
videostore/model.py
|
1
|
1389
|
from sqlalchemy import Unicode, Date, Integer
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.orm import relationship
import sqlalchemy.types
from camelot.admin.entity_admin import EntityAdmin
from camelot.core.orm import Entity
import camelot.types
class Movie( Entity ):
__tablename__ = 'movie'
title = Column( Unicode(60), nullable = False )
short_description = Column( Unicode(512) )
release_date = Column( Date() )
genre = Column( Unicode(15) )
director_id = Column( Integer, ForeignKey('director.id') )
director = relationship( 'Director',
backref = 'movies' )
def __unicode__( self ):
return self.title or 'Untitled movie'
class Admin( EntityAdmin ):
verbose_name = 'Movie'
list_display = ['title', 'short_description', 'release_date', 'genre']
list_display = [ 'title',
'short_description',
'release_date',
'genre',
'director' ]
class Director( Entity ):
__tablename__ = 'director'
name = Column( Unicode( 60 ) )
class Admin( EntityAdmin ):
verbose_name = 'Director'
list_display = [ 'name' ]
form_display = list_display + ['movies']
def __unicode__(self):
return self.name or 'unknown director'
|
gpl-3.0
|
h-mayorquin/competitive_and_selective_learning
|
play.py
|
1
|
1250
|
"""
This is the play
"""
import numpy as np
import matplotlib.pyplot as plt
import math
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
from functions import selection_algorithm, scl
from csl import CSL
plot = True
verbose = False
tracking = True
selection = False
# Generate the data
n_samples = 1500
random_state = 20 # Does not converge
random_state = 41
random_state = 105 # Does not converge
random_state = 325325
random_state = 1111
n_features = 2
centers = 7
X, y = make_blobs(n_samples, n_features, centers, random_state=random_state)
# The algorithm
N = centers
s = 2 # Number of neurons to change per round
eta = 0.1
T = 100
csl = CSL(n_clusters=N, n_iter=T, tol=0.001, eta=eta, s0=s, random_state=np.random)
csl.fit(X)
neurons = csl.centers_
if False:
kmeans = KMeans(n_clusters=N)
kmeans.fit(X)
neurons = kmeans.cluster_centers_
if plot:
# Visualize X
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)
ax.plot(X[:, 0], X[:, 1], 'x', markersize=6)
ax.hold(True)
if True:
for n in range(N):
ax.plot(neurons[n, 0], neurons[n, 1], 'o', markersize=12, label='neuron ' + str(n))
ax.legend()
# fig.show()
plt.show()
|
mit
|
repotvsupertuga/tvsupertuga.repository
|
plugin.video.TVsupertuga/resources/lib/plugins/unidecode/x1d6.py
|
248
|
3974
|
data = (
's', # 0x00
't', # 0x01
'u', # 0x02
'v', # 0x03
'w', # 0x04
'x', # 0x05
'y', # 0x06
'z', # 0x07
'A', # 0x08
'B', # 0x09
'C', # 0x0a
'D', # 0x0b
'E', # 0x0c
'F', # 0x0d
'G', # 0x0e
'H', # 0x0f
'I', # 0x10
'J', # 0x11
'K', # 0x12
'L', # 0x13
'M', # 0x14
'N', # 0x15
'O', # 0x16
'P', # 0x17
'Q', # 0x18
'R', # 0x19
'S', # 0x1a
'T', # 0x1b
'U', # 0x1c
'V', # 0x1d
'W', # 0x1e
'X', # 0x1f
'Y', # 0x20
'Z', # 0x21
'a', # 0x22
'b', # 0x23
'c', # 0x24
'd', # 0x25
'e', # 0x26
'f', # 0x27
'g', # 0x28
'h', # 0x29
'i', # 0x2a
'j', # 0x2b
'k', # 0x2c
'l', # 0x2d
'm', # 0x2e
'n', # 0x2f
'o', # 0x30
'p', # 0x31
'q', # 0x32
'r', # 0x33
's', # 0x34
't', # 0x35
'u', # 0x36
'v', # 0x37
'w', # 0x38
'x', # 0x39
'y', # 0x3a
'z', # 0x3b
'A', # 0x3c
'B', # 0x3d
'C', # 0x3e
'D', # 0x3f
'E', # 0x40
'F', # 0x41
'G', # 0x42
'H', # 0x43
'I', # 0x44
'J', # 0x45
'K', # 0x46
'L', # 0x47
'M', # 0x48
'N', # 0x49
'O', # 0x4a
'P', # 0x4b
'Q', # 0x4c
'R', # 0x4d
'S', # 0x4e
'T', # 0x4f
'U', # 0x50
'V', # 0x51
'W', # 0x52
'X', # 0x53
'Y', # 0x54
'Z', # 0x55
'a', # 0x56
'b', # 0x57
'c', # 0x58
'd', # 0x59
'e', # 0x5a
'f', # 0x5b
'g', # 0x5c
'h', # 0x5d
'i', # 0x5e
'j', # 0x5f
'k', # 0x60
'l', # 0x61
'm', # 0x62
'n', # 0x63
'o', # 0x64
'p', # 0x65
'q', # 0x66
'r', # 0x67
's', # 0x68
't', # 0x69
'u', # 0x6a
'v', # 0x6b
'w', # 0x6c
'x', # 0x6d
'y', # 0x6e
'z', # 0x6f
'A', # 0x70
'B', # 0x71
'C', # 0x72
'D', # 0x73
'E', # 0x74
'F', # 0x75
'G', # 0x76
'H', # 0x77
'I', # 0x78
'J', # 0x79
'K', # 0x7a
'L', # 0x7b
'M', # 0x7c
'N', # 0x7d
'O', # 0x7e
'P', # 0x7f
'Q', # 0x80
'R', # 0x81
'S', # 0x82
'T', # 0x83
'U', # 0x84
'V', # 0x85
'W', # 0x86
'X', # 0x87
'Y', # 0x88
'Z', # 0x89
'a', # 0x8a
'b', # 0x8b
'c', # 0x8c
'd', # 0x8d
'e', # 0x8e
'f', # 0x8f
'g', # 0x90
'h', # 0x91
'i', # 0x92
'j', # 0x93
'k', # 0x94
'l', # 0x95
'm', # 0x96
'n', # 0x97
'o', # 0x98
'p', # 0x99
'q', # 0x9a
'r', # 0x9b
's', # 0x9c
't', # 0x9d
'u', # 0x9e
'v', # 0x9f
'w', # 0xa0
'x', # 0xa1
'y', # 0xa2
'z', # 0xa3
'i', # 0xa4
'j', # 0xa5
'', # 0xa6
'', # 0xa7
'Alpha', # 0xa8
'Beta', # 0xa9
'Gamma', # 0xaa
'Delta', # 0xab
'Epsilon', # 0xac
'Zeta', # 0xad
'Eta', # 0xae
'Theta', # 0xaf
'Iota', # 0xb0
'Kappa', # 0xb1
'Lamda', # 0xb2
'Mu', # 0xb3
'Nu', # 0xb4
'Xi', # 0xb5
'Omicron', # 0xb6
'Pi', # 0xb7
'Rho', # 0xb8
'Theta', # 0xb9
'Sigma', # 0xba
'Tau', # 0xbb
'Upsilon', # 0xbc
'Phi', # 0xbd
'Chi', # 0xbe
'Psi', # 0xbf
'Omega', # 0xc0
'nabla', # 0xc1
'alpha', # 0xc2
'beta', # 0xc3
'gamma', # 0xc4
'delta', # 0xc5
'epsilon', # 0xc6
'zeta', # 0xc7
'eta', # 0xc8
'theta', # 0xc9
'iota', # 0xca
'kappa', # 0xcb
'lamda', # 0xcc
'mu', # 0xcd
'nu', # 0xce
'xi', # 0xcf
'omicron', # 0xd0
'pi', # 0xd1
'rho', # 0xd2
'sigma', # 0xd3
'sigma', # 0xd4
'tai', # 0xd5
'upsilon', # 0xd6
'phi', # 0xd7
'chi', # 0xd8
'psi', # 0xd9
'omega', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
'', # 0xff
)
|
gpl-2.0
|
GeyerA/android_external_chromium_org
|
chrome/common/extensions/docs/server2/api_list_data_source_test.py
|
24
|
2930
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from api_list_data_source import APIListDataSource
from compiled_file_system import CompiledFileSystem
from copy import deepcopy
from object_store_creator import ObjectStoreCreator
from test_file_system import TestFileSystem
def _ToTestData(obj):
'''Transforms |obj| into test data by turning a list of files into an object
mapping that file to its contents (derived from its name).
'''
return (dict((name, name) for name in obj) if isinstance(obj, list) else
dict((key, _ToTestData(value)) for key, value in obj.items()))
_TEST_DATA = _ToTestData({
'api': [
'alarms.idl',
'app_window.idl',
'browser_action.json',
'experimental_bluetooth.idl',
'experimental_history.idl',
'experimental_power.idl',
'infobars.idl',
'something_internal.idl',
'something_else_internal.json',
'storage.json',
],
'public': {
'apps': [
'alarms.html',
'app_window.html',
'experimental_bluetooth.html',
'experimental_power.html',
'storage.html',
],
'extensions': [
'alarms.html',
'browserAction.html',
'experimental_history.html',
'experimental_power.html',
'infobars.html',
'storage.html',
],
},
})
class APIListDataSourceTest(unittest.TestCase):
def setUp(self):
file_system = TestFileSystem(deepcopy(_TEST_DATA))
self._factory = APIListDataSource.Factory(
CompiledFileSystem.Factory(
file_system, ObjectStoreCreator.ForTest()),
file_system,
'api',
'public')
def testApps(self):
api_list = self._factory.Create()
self.assertEqual([{'name': 'alarms'},
{'name': 'app.window'},
{'name': 'storage', 'last': True}],
api_list.get('apps').get('chrome'))
def testExperimentalApps(self):
api_list = self._factory.Create()
self.assertEqual([{'name': 'experimental.bluetooth'},
{'name': 'experimental.power', 'last': True}],
sorted(api_list.get('apps').get('experimental')))
def testExtensions(self):
api_list = self._factory.Create()
self.assertEqual([{'name': 'alarms'},
{'name': 'browserAction'},
{'name': 'infobars'},
{'name': 'storage', 'last': True}],
sorted(api_list.get('extensions').get('chrome')))
def testExperimentalApps(self):
api_list = self._factory.Create()
self.assertEqual([{'name': 'experimental.history'},
{'name': 'experimental.power', 'last': True}],
sorted(api_list.get('extensions').get('experimental')))
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
arrabito/DIRAC
|
DataManagementSystem/Client/DataManager.py
|
3
|
74453
|
"""
:mod: DataManager
.. module: DataManager
:synopsis: DataManager links the functionalities of StorageElement and FileCatalog.
This module consists of DataManager and related classes.
"""
# # imports
from datetime import datetime, timedelta
import fnmatch
import os
import time
import errno
# # from DIRAC
import DIRAC
from DIRAC import S_OK, S_ERROR, gLogger, gConfig
from DIRAC.Core.Utilities import DErrno
from DIRAC.Core.Utilities.Adler import fileAdler, compareAdler
from DIRAC.Core.Utilities.File import makeGuid, getSize
from DIRAC.Core.Utilities.List import randomize, breakListIntoChunks
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
# # RSCID
__RCSID__ = "$Id$"
def _isOlderThan(stringTime, days):
""" Check if a time stamp is older than a given number of days """
timeDelta = timedelta(days=days)
maxCTime = datetime.utcnow() - timeDelta
# st = time.strptime( stringTime, "%a %b %d %H:%M:%S %Y" )
# cTimeStruct = datetime( st[0], st[1], st[2], st[3], st[4], st[5], st[6], None )
cTimeStruct = stringTime
if cTimeStruct < maxCTime:
return True
return False
def _initialiseAccountingObject(operation, se, files):
""" create accouting record """
accountingDict = {}
accountingDict['OperationType'] = operation
result = getProxyInfo()
if not result['OK']:
userName = 'system'
else:
userName = result['Value'].get('username', 'unknown')
accountingDict['User'] = userName
accountingDict['Protocol'] = 'DataManager'
accountingDict['RegistrationTime'] = 0.0
accountingDict['RegistrationOK'] = 0
accountingDict['RegistrationTotal'] = 0
accountingDict['Destination'] = se
accountingDict['TransferTotal'] = files
accountingDict['TransferOK'] = files
accountingDict['TransferSize'] = files
accountingDict['TransferTime'] = 0.0
accountingDict['FinalStatus'] = 'Successful'
accountingDict['Source'] = DIRAC.siteName()
oDataOperation = DataOperation()
oDataOperation.setValuesFromDict(accountingDict)
return oDataOperation
class DataManager(object):
"""
.. class:: DataManager
A DataManager is taking all the actions that impact or require the FileCatalog and the StorageElement together
"""
def __init__(self, catalogs=None, masterCatalogOnly=False, vo=False):
""" c'tor
:param self: self reference
:param catalogs: the list of catalog in which to perform the operations. This
list will be ignored if masterCatalogOnly is set to True
:param masterCatalogOnly: if set to True, the operations will be performed only on the master catalog.
The catalogs parameter will be ignored.
:param vo: the VO for which the DataManager is created, get VO from the current proxy if not specified
"""
self.log = gLogger.getSubLogger(self.__class__.__name__, True)
self.voName = vo
if catalogs is None:
catalogs = []
catalogsToUse = FileCatalog(vo=self.voName).getMasterCatalogNames()[
'Value'] if masterCatalogOnly else catalogs
self.fileCatalog = FileCatalog(catalogs=catalogsToUse, vo=self.voName)
self.accountingClient = None
self.resourceStatus = ResourceStatus()
self.ignoreMissingInFC = Operations(vo=self.voName).getValue(
'DataManagement/IgnoreMissingInFC', False)
self.useCatalogPFN = Operations(vo=self.voName).getValue(
'DataManagement/UseCatalogPFN', True)
self.dmsHelper = DMSHelpers(vo=vo)
self.registrationProtocol = self.dmsHelper.getRegistrationProtocols()
self.thirdPartyProtocols = self.dmsHelper.getThirdPartyProtocols()
def setAccountingClient(self, client):
""" Set Accounting Client instance
"""
self.accountingClient = client
def __hasAccess(self, opType, path):
""" Check if we have permission to execute given operation on the given file (if exists) or its directory
"""
if isinstance(path, basestring):
paths = [path]
else:
paths = list(path)
res = self.fileCatalog.hasAccess(paths, opType)
if not res['OK']:
return res
result = {'Successful': list(), 'Failed': list()}
for path in paths:
isAllowed = res['Value']['Successful'].get(path, False)
if isAllowed:
result['Successful'].append(path)
else:
result['Failed'].append(path)
return S_OK(result)
##########################################################################
#
# These are the bulk removal methods
#
def cleanLogicalDirectory(self, lfnDir):
""" Clean the logical directory from the catalog and storage
"""
log = self.log.getSubLogger('cleanLogicalDirectory')
if isinstance(lfnDir, basestring):
lfnDir = [lfnDir]
retDict = {"Successful": {}, "Failed": {}}
for folder in lfnDir:
res = self.__cleanDirectory(folder)
if not res['OK']:
log.debug("Failed to clean directory.", "%s %s" %
(folder, res['Message']))
retDict["Failed"][folder] = res['Message']
else:
log.debug("Successfully removed directory.", folder)
retDict["Successful"][folder] = res['Value']
return S_OK(retDict)
def __cleanDirectory(self, folder):
""" delete all files from directory :folder: in FileCatalog and StorageElement
:param self: self reference
:param str folder: directory name
"""
log = self.log.getSubLogger('__cleanDirectory')
res = self.__hasAccess('removeDirectory', folder)
if not res['OK']:
return res
if folder not in res['Value']['Successful']:
errStr = "Write access not permitted for this credential."
log.debug(errStr, folder)
return S_ERROR(errStr)
res = self.__getCatalogDirectoryContents([folder])
if not res['OK']:
return res
res = self.removeFile(res['Value'])
if not res['OK']:
return res
for lfn, reason in res['Value']['Failed'].iteritems():
log.error("Failed to remove file found in the catalog",
"%s %s" % (lfn, reason))
res = returnSingleResult(self.removeFile(['%s/dirac_directory' % folder]))
if not res['OK']:
if not DErrno.cmpError(res, errno.ENOENT):
log.warn('Failed to delete dirac_directory placeholder file')
storageElements = gConfig.getValue(
'Resources/StorageElementGroups/SE_Cleaning_List', [])
failed = False
for storageElement in sorted(storageElements):
res = self.__removeStorageDirectory(folder, storageElement)
if not res['OK']:
failed = True
if failed:
return S_ERROR("Failed to clean storage directory at all SEs")
res = returnSingleResult(
self.fileCatalog.removeDirectory(folder, recursive=True))
if not res['OK']:
return res
return S_OK()
def __removeStorageDirectory(self, directory, storageElement):
""" delete SE directory
:param self: self reference
:param str directory: folder to be removed
:param str storageElement: DIRAC SE name
"""
se = StorageElement(storageElement, vo=self.voName)
res = returnSingleResult(se.exists(directory))
log = self.log.getSubLogger('__removeStorageDirectory')
if not res['OK']:
log.debug("Failed to obtain existance of directory", res['Message'])
return res
exists = res['Value']
if not exists:
log.debug("The directory %s does not exist at %s " %
(directory, storageElement))
return S_OK()
res = returnSingleResult(se.removeDirectory(directory, recursive=True))
if not res['OK']:
log.debug("Failed to remove storage directory", res['Message'])
return res
log.debug("Successfully removed %d files from %s at %s" % (res['Value']['FilesRemoved'],
directory,
storageElement))
return S_OK()
def __getCatalogDirectoryContents(self, directories):
""" ls recursively all files in directories
:param self: self reference
:param list directories: folder names
"""
log = self.log.getSubLogger('__getCatalogDirectoryContents')
log.debug('Obtaining the catalog contents for %d directories:' %
len(directories))
activeDirs = directories
allFiles = {}
while len(activeDirs) > 0:
currentDir = activeDirs[0]
res = returnSingleResult(
self.fileCatalog.listDirectory(currentDir, verbose=True))
activeDirs.remove(currentDir)
if not res['OK']:
log.debug("Problem getting the %s directory content" %
currentDir, res['Message'])
else:
dirContents = res['Value']
activeDirs.extend(dirContents['SubDirs'])
allFiles.update(dirContents['Files'])
log.debug("Found %d files" % len(allFiles))
return S_OK(allFiles)
def getReplicasFromDirectory(self, directory):
""" get all replicas from a given directory
:param self: self reference
:param mixed directory: list of directories or one directory
"""
if isinstance(directory, basestring):
directories = [directory]
else:
directories = directory
res = self.__getCatalogDirectoryContents(directories)
if not res['OK']:
return res
allReplicas = dict((lfn, metadata['Replicas'])
for lfn, metadata in res['Value'].iteritems())
return S_OK(allReplicas)
def getFilesFromDirectory(self, directory, days=0, wildcard='*'):
""" get all files from :directory: older than :days: days matching to :wildcard:
:param self: self reference
:param mixed directory: list of directories or directory name
:param int days: ctime days
:param str wildcard: pattern to match
"""
if isinstance(directory, basestring):
directories = [directory]
else:
directories = directory
log = self.log.getSubLogger('getFilesFromDirectory')
log.debug("Obtaining the files older than %d days in %d directories:" %
(days, len(directories)))
for folder in directories:
log.debug(folder)
activeDirs = directories
allFiles = []
while len(activeDirs) > 0:
currentDir = activeDirs[0]
# We only need the metadata (verbose) if a limit date is given
res = returnSingleResult(
self.fileCatalog.listDirectory(currentDir, verbose=(days != 0)))
activeDirs.remove(currentDir)
if not res['OK']:
log.debug("Error retrieving directory contents", "%s %s" %
(currentDir, res['Message']))
else:
dirContents = res['Value']
subdirs = dirContents['SubDirs']
files = dirContents['Files']
log.debug("%s: %d files, %d sub-directories" %
(currentDir, len(files), len(subdirs)))
for subdir in subdirs:
if (not days) or _isOlderThan(subdirs[subdir]['CreationDate'], days):
if subdir[0] != '/':
subdir = currentDir + '/' + subdir
activeDirs.append(subdir)
for fileName in files:
fileInfo = files[fileName]
fileInfo = fileInfo.get('Metadata', fileInfo)
if (not days) or not fileInfo.get('CreationDate') or _isOlderThan(fileInfo['CreationDate'], days):
if wildcard == '*' or fnmatch.fnmatch(fileName, wildcard):
fileName = fileInfo.get('LFN', fileName)
allFiles.append(fileName)
return S_OK(allFiles)
##########################################################################
#
# These are the data transfer methods
#
def getFile(self, lfn, destinationDir='', sourceSE=None):
""" Get a local copy of a LFN from Storage Elements.
'lfn' is the logical file name for the desired file
"""
log = self.log.getSubLogger('getFile')
if isinstance(lfn, list):
lfns = lfn
elif isinstance(lfn, basestring):
lfns = [lfn]
else:
errStr = "Supplied lfn must be string or list of strings."
log.debug(errStr)
return S_ERROR(errStr)
log.debug("Attempting to get %s files." % len(lfns))
res = self.getActiveReplicas(lfns, getUrl=False)
if not res['OK']:
return res
failed = res['Value']['Failed']
lfnReplicas = res['Value']['Successful']
res = self.fileCatalog.getFileMetadata(lfnReplicas.keys())
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
fileMetadata = res['Value']['Successful']
successful = {}
for lfn in fileMetadata:
res = self.__getFile(
lfn, lfnReplicas[lfn], fileMetadata[lfn], destinationDir, sourceSE=sourceSE)
if not res['OK']:
failed[lfn] = res['Message']
else:
successful[lfn] = res['Value']
return S_OK({'Successful': successful, 'Failed': failed})
def __getFile(self, lfn, replicas, metadata, destinationDir, sourceSE=None):
"""
Method actually doing the job to get a file from storage
"""
log = self.log.getSubLogger('__getFile')
if not replicas:
errStr = "No accessible replicas found"
log.debug(errStr)
return S_ERROR(errStr)
# Determine the best replicas
errTuple = ("No SE", "found")
if sourceSE is None:
sortedSEs = self._getSEProximity(replicas)
else:
if sourceSE not in replicas:
return S_ERROR('No replica at %s' % sourceSE)
else:
sortedSEs = [sourceSE]
for storageElementName in sortedSEs:
se = StorageElement(storageElementName, vo=self.voName)
res = returnSingleResult(se.getFile(
lfn, localPath=os.path.realpath(destinationDir)))
if not res['OK']:
errTuple = ("Error getting file from storage:", "%s from %s, %s" %
(lfn, storageElementName, res['Message']))
errToReturn = res
else:
localFile = os.path.realpath(os.path.join(
destinationDir, os.path.basename(lfn)))
localAdler = fileAdler(localFile)
if metadata['Size'] != res['Value']:
errTuple = ("Mismatch of sizes:", "downloaded = %d, catalog = %d" %
(res['Value'], metadata['Size']))
errToReturn = S_ERROR(DErrno.EFILESIZE, errTuple[1])
elif (metadata['Checksum']) and (not compareAdler(metadata['Checksum'], localAdler)):
errTuple = ("Mismatch of checksums:", "downloaded = %s, catalog = %s" %
(localAdler, metadata['Checksum']))
errToReturn = S_ERROR(DErrno.EBADCKS, errTuple[1])
else:
return S_OK(localFile)
# If we are here, there was an error, log it debug level
log.debug(errTuple[0], errTuple[1])
log.verbose("Failed to get local copy from any replicas:",
"\n%s %s" % errTuple)
return errToReturn
def _getSEProximity(self, replicas):
""" get SE proximity """
siteName = DIRAC.siteName()
self.__filterTapeSEs(replicas)
localSEs = [se for se in self.dmsHelper.getSEsAtSite(
siteName).get('Value', []) if se in replicas]
countrySEs = []
countryCode = str(siteName).split('.')[-1]
res = self.dmsHelper.getSEsAtCountry(countryCode)
if res['OK']:
countrySEs = [se for se in res['Value']
if se in replicas and se not in localSEs]
sortedSEs = randomize(localSEs) + randomize(countrySEs)
sortedSEs += randomize(se for se in replicas if se not in sortedSEs)
return sortedSEs
def putAndRegister(self, lfn, fileName, diracSE, guid=None, path=None,
checksum=None, overwrite=False):
""" Put a local file to a Storage Element and register in the File Catalogues
'lfn' is the file LFN
'file' is the full path to the local file
'diracSE' is the Storage Element to which to put the file
'guid' is the guid with which the file is to be registered (if not provided will be generated)
'path' is the path on the storage where the file will be put (if not provided the LFN will be used)
'overwrite' removes file from the file catalogue and SE before attempting upload
"""
res = self.__hasAccess('addFile', lfn)
if not res['OK']:
return res
log = self.log.getSubLogger('putAndRegister')
if lfn not in res['Value']['Successful']:
errStr = "Write access not permitted for this credential."
log.debug(errStr, lfn)
return S_ERROR(errStr)
# Check that the local file exists
if not os.path.exists(fileName):
errStr = "Supplied file does not exist."
log.debug(errStr, fileName)
return S_ERROR(errStr)
# If the path is not provided then use the LFN path
if not path:
path = os.path.dirname(lfn)
# Obtain the size of the local file
size = getSize(fileName)
if size == 0:
errStr = "Supplied file is zero size."
log.debug(errStr, fileName)
return S_ERROR(errStr)
# If the GUID is not given, generate it here
if not guid:
guid = makeGuid(fileName)
if not checksum:
log.debug("Checksum information not provided. Calculating adler32.")
checksum = fileAdler(fileName)
# Make another try
if not checksum:
log.debug("Checksum calculation failed, try again")
checksum = fileAdler(fileName)
if checksum:
log.debug("Checksum calculated to be %s." % checksum)
else:
return S_ERROR(DErrno.EBADCKS, "Unable to calculate checksum")
res = self.fileCatalog.exists({lfn: guid})
if not res['OK']:
errStr = "Completely failed to determine existence of destination LFN."
log.debug(errStr, lfn)
return res
if lfn not in res['Value']['Successful']:
errStr = "Failed to determine existence of destination LFN."
log.debug(errStr, lfn)
return S_ERROR(errStr)
if res['Value']['Successful'][lfn]:
if res['Value']['Successful'][lfn] == lfn:
if overwrite:
resRm = self.removeFile(lfn, force=True)
if not resRm['OK']:
errStr = "Failed to prepare file for overwrite"
log.debug(errStr, lfn)
return resRm
if lfn not in resRm['Value']['Successful']:
errStr = "Failed to either delete file or LFN"
log.debug(errStr, lfn)
return S_ERROR("%s %s" % (errStr, lfn))
else:
errStr = "The supplied LFN already exists in the File Catalog."
log.debug(errStr, lfn)
return S_ERROR("%s %s" % (errStr, res['Value']['Successful'][lfn]))
else:
# If the returned LFN is different, this is the name of a file
# with the same GUID
errStr = "This file GUID already exists for another file"
log.debug(errStr, res['Value']['Successful'][lfn])
return S_ERROR("%s %s" % (errStr, res['Value']['Successful'][lfn]))
##########################################################
# Instantiate the destination storage element here.
storageElement = StorageElement(diracSE, vo=self.voName)
res = storageElement.isValid()
if not res['OK']:
errStr = "The storage element is not currently valid."
log.verbose(errStr, "%s %s" % (diracSE, res['Message']))
return S_ERROR("%s %s" % (errStr, res['Message']))
fileDict = {lfn: fileName}
successful = {}
failed = {}
##########################################################
# Perform the put here.
oDataOperation = _initialiseAccountingObject('putAndRegister', diracSE, 1)
oDataOperation.setStartTime()
oDataOperation.setValueByKey('TransferSize', size)
startTime = time.time()
res = returnSingleResult(storageElement.putFile(fileDict))
putTime = time.time() - startTime
oDataOperation.setValueByKey('TransferTime', putTime)
if not res['OK']:
# We don't consider it a failure if the SE is not valid
if not DErrno.cmpError(res, errno.EACCES):
oDataOperation.setValueByKey('TransferOK', 0)
oDataOperation.setValueByKey('FinalStatus', 'Failed')
oDataOperation.setEndTime()
gDataStoreClient.addRegister(oDataOperation)
gDataStoreClient.commit()
startTime = time.time()
log.debug('putAndRegister: Sending accounting took %.1f seconds' %
(time.time() - startTime))
errStr = "Failed to put file to Storage Element."
log.debug(errStr, "%s: %s" % (fileName, res['Message']))
return S_ERROR("%s %s" % (errStr, res['Message']))
successful[lfn] = {'put': putTime}
###########################################################
# Perform the registration here
destinationSE = storageElement.storageElementName()
res = returnSingleResult(storageElement.getURL(
lfn, protocol=self.registrationProtocol))
if not res['OK']:
errStr = "Failed to generate destination PFN."
log.debug(errStr, res['Message'])
return S_ERROR("%s %s" % (errStr, res['Message']))
destUrl = res['Value']
oDataOperation.setValueByKey('RegistrationTotal', 1)
fileTuple = (lfn, destUrl, size, destinationSE, guid, checksum)
registerDict = {'LFN': lfn, 'PFN': destUrl, 'Size': size,
'TargetSE': destinationSE, 'GUID': guid, 'Addler': checksum}
startTime = time.time()
res = self.registerFile(fileTuple)
registerTime = time.time() - startTime
oDataOperation.setValueByKey('RegistrationTime', registerTime)
if not res['OK']:
errStr = "Completely failed to register file."
log.debug(errStr, res['Message'])
failed[lfn] = {'register': registerDict}
oDataOperation.setValueByKey('FinalStatus', 'Failed')
elif lfn in res['Value']['Failed']:
errStr = "Failed to register file."
log.debug(errStr, "%s %s" % (lfn, res['Value']['Failed'][lfn]))
oDataOperation.setValueByKey('FinalStatus', 'Failed')
failed[lfn] = {'register': registerDict}
else:
successful[lfn]['register'] = registerTime
oDataOperation.setValueByKey('RegistrationOK', 1)
oDataOperation.setEndTime()
gDataStoreClient.addRegister(oDataOperation)
startTime = time.time()
gDataStoreClient.commit()
log.debug('Sending accounting took %.1f seconds' %
(time.time() - startTime))
return S_OK({'Successful': successful, 'Failed': failed})
def replicateAndRegister(self, lfn, destSE, sourceSE='', destPath='', localCache='', catalog=''):
""" Replicate a LFN to a destination SE and register the replica.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' is the local file system location to be used as a temporary cache
"""
log = self.log.getSubLogger('replicateAndRegister')
successful = {}
failed = {}
log.debug("Attempting to replicate %s to %s." % (lfn, destSE))
startReplication = time.time()
res = self.__replicate(lfn, destSE, sourceSE, destPath, localCache)
replicationTime = time.time() - startReplication
if not res['OK']:
errStr = "Completely failed to replicate file."
log.debug(errStr, res['Message'])
return S_ERROR("%s %s" % (errStr, res['Message']))
if not res['Value']:
# The file was already present at the destination SE
log.debug("%s already present at %s." % (lfn, destSE))
successful[lfn] = {'replicate': 0, 'register': 0}
resDict = {'Successful': successful, 'Failed': failed}
return S_OK(resDict)
successful[lfn] = {'replicate': replicationTime}
destPfn = res['Value']['DestPfn']
destSE = res['Value']['DestSE']
log.debug("Attempting to register %s at %s." % (destPfn, destSE))
replicaTuple = (lfn, destPfn, destSE)
startRegistration = time.time()
res = self.registerReplica(replicaTuple, catalog=catalog)
registrationTime = time.time() - startRegistration
if not res['OK']:
# Need to return to the client that the file was replicated but not
# registered
errStr = "Completely failed to register replica."
log.debug(errStr, res['Message'])
failed[lfn] = {'Registration': {
'LFN': lfn, 'TargetSE': destSE, 'PFN': destPfn}}
else:
if lfn in res['Value']['Successful']:
log.debug("Successfully registered replica.")
successful[lfn]['register'] = registrationTime
else:
errStr = "Failed to register replica."
log.debug(errStr, res['Value']['Failed'][lfn])
failed[lfn] = {'Registration': {
'LFN': lfn, 'TargetSE': destSE, 'PFN': destPfn}}
return S_OK({'Successful': successful, 'Failed': failed})
def replicate(self, lfn, destSE, sourceSE='', destPath='', localCache=''):
""" Replicate a LFN to a destination SE and register the replica.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' is the local file system location to be used as a temporary cache
"""
log = self.log.getSubLogger('replicate')
log.debug("Attempting to replicate %s to %s." % (lfn, destSE))
res = self.__replicate(lfn, destSE, sourceSE, destPath, localCache)
if not res['OK']:
errStr = "Replication failed."
log.debug(errStr, "%s %s" % (lfn, destSE))
return res
if not res['Value']:
# The file was already present at the destination SE
log.debug("%s already present at %s." % (lfn, destSE))
return res
return S_OK(lfn)
def __getSERealName(self, storageName):
""" get the base name of an SE possibly defined as an alias"""
rootConfigPath = '/Resources/StorageElements'
configPath = '%s/%s' % (rootConfigPath, storageName)
res = gConfig.getOptions(configPath)
if not res['OK']:
errStr = "Failed to get storage options"
return S_ERROR(errStr)
if not res['Value']:
errStr = "Supplied storage doesn't exist."
return S_ERROR(errStr)
if 'Alias' in res['Value']:
configPath += '/Alias'
aliasName = gConfig.getValue(configPath)
result = self.__getSERealName(aliasName)
if not result['OK']:
return result
resolvedName = result['Value']
else:
resolvedName = storageName
return S_OK(resolvedName)
def __isSEInList(self, seName, seList):
""" Check whether an SE is in a list of SEs... All could be aliases """
seSet = set()
for se in seList:
res = self.__getSERealName(se)
if res['OK']:
seSet.add(res['Value'])
return self.__getSERealName(seName).get('Value') in seSet
def __replicate(self, lfn, destSEName, sourceSEName='', destPath='', localCache=''):
""" Replicate a LFN to a destination SE.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' if cannot do third party transfer, we do get and put through this local directory
"""
log = self.log.getSubLogger('__replicate', True)
###########################################################
# Check that we have write permissions to this directory.
res = self.__hasAccess('addReplica', lfn)
if not res['OK']:
return res
if lfn not in res['Value']['Successful']:
errStr = "__replicate: Write access not permitted for this credential."
log.debug(errStr, lfn)
return S_ERROR(errStr)
# Check that the destination storage element is sane and resolve its name
log.debug("Verifying destination StorageElement validity (%s)." %
(destSEName))
destStorageElement = StorageElement(destSEName, vo=self.voName)
res = destStorageElement.isValid()
if not res['OK']:
errStr = "The storage element is not currently valid."
log.verbose(errStr, "%s %s" % (destSEName, res['Message']))
return S_ERROR("%s %s" % (errStr, res['Message']))
# Get the real name of the SE
destSEName = destStorageElement.storageElementName()
###########################################################
# Check whether the destination storage element is banned
log.verbose(
"Determining whether %s ( destination ) is Write-banned." % destSEName)
if not destStorageElement.status()['Write']:
infoStr = "Supplied destination Storage Element is not currently allowed for Write."
log.debug(infoStr, destSEName)
return S_ERROR(infoStr)
# Get the LFN replicas from the file catalog
log.debug("Attempting to obtain replicas for %s." % (lfn))
res = returnSingleResult(self.getReplicas(lfn, getUrl=False))
if not res['OK']:
errStr = "Failed to get replicas for LFN."
log.debug(errStr, "%s %s" % (lfn, res['Message']))
return S_ERROR("%s %s" % (errStr, res['Message']))
log.debug("Successfully obtained replicas for LFN.")
lfnReplicas = res['Value']
###########################################################
# If the file catalog size is zero fail the transfer
log.debug("Attempting to obtain size for %s." % lfn)
res = returnSingleResult(self.fileCatalog.getFileSize(lfn))
if not res['OK']:
errStr = "Failed to get size for LFN."
log.debug(errStr, "%s %s" % (lfn, res['Message']))
return S_ERROR("%s %s" % (errStr, res['Message']))
catalogSize = res['Value']
if catalogSize == 0:
errStr = "Registered file size is 0."
log.debug(errStr, lfn)
return S_ERROR(errStr)
log.debug("File size determined to be %s." % catalogSize)
###########################################################
# If the LFN already exists at the destination we have nothing to do
if self.__isSEInList(destSEName, lfnReplicas):
log.debug("__replicate: LFN is already registered at %s." % destSEName)
return S_OK()
###########################################################
# If the source is specified, check that it is in the replicas
if sourceSEName:
log.debug("Determining whether source Storage Element specified is sane.")
if sourceSEName not in lfnReplicas:
errStr = "LFN does not exist at supplied source SE."
log.error(errStr, "%s %s" % (lfn, sourceSEName))
return S_ERROR(errStr)
# If sourceSE is specified, then we consider this one only, otherwise
# we consider them all
possibleSourceSEs = [sourceSEName] if sourceSEName else lfnReplicas
# We sort the possibileSourceSEs with the SEs that are on the same site than the destination first
# reverse = True because True > False
possibleSourceSEs = sorted(possibleSourceSEs,
key=lambda x: self.dmsHelper.isSameSiteSE(
x, destSEName).get('Value', False),
reverse=True)
# In case we manage to find SEs that would work as a source, but we can't negotiate a protocol
# we will do a get and put using one of this sane SE
possibleIntermediateSEs = []
# Take into account the destination path
if destPath:
destPath = '%s/%s' % (destPath, os.path.basename(lfn))
else:
destPath = lfn
for candidateSEName in possibleSourceSEs:
log.debug("Consider %s as a source" % candidateSEName)
# Check that the candidate is active
if not self.__checkSEStatus(candidateSEName, status='Read'):
log.debug("%s is currently not allowed as a source." % candidateSEName)
continue
else:
log.debug("%s is available for use." % candidateSEName)
candidateSE = StorageElement(candidateSEName, vo=self.voName)
# Check that the SE is valid
res = candidateSE.isValid()
if not res['OK']:
log.verbose("The storage element is not currently valid.",
"%s %s" % (candidateSEName, res['Message']))
continue
else:
log.debug("The storage is currently valid", candidateSEName)
# Check that the file size corresponds to the one in the FC
res = returnSingleResult(candidateSE.getFileSize(lfn))
if not res['OK']:
log.debug("could not get fileSize on %s" %
candidateSEName, res['Message'])
continue
seFileSize = res['Value']
if seFileSize != catalogSize:
log.debug("Catalog size and physical file size mismatch.",
"%s %s" % (catalogSize, seFileSize))
continue
else:
log.debug("Catalog size and physical size match")
res = destStorageElement.negociateProtocolWithOtherSE(
candidateSE, protocols=self.thirdPartyProtocols)
if not res['OK']:
log.debug("Error negotiating replication protocol", res['Message'])
continue
replicationProtocols = res['Value']
if not replicationProtocols:
possibleIntermediateSEs.append(candidateSE)
log.debug("No protocol suitable for replication found")
continue
log.debug('Found common protocols', replicationProtocols)
# THIS WOULD NOT WORK IF PROTO == file !!
# Why did I write that comment ?!
# We try the protocols one by one
# That obviously assumes that there is an overlap and not only
# a compatibility between the output protocols of the source
# and the input protocols of the destination.
# But that is the only way to make sure we are not replicating
# over ourselves.
for compatibleProtocol in replicationProtocols:
# Compare the urls to make sure we are not overwriting
res = returnSingleResult(candidateSE.getURL(
lfn, protocol=compatibleProtocol))
if not res['OK']:
log.debug("Cannot get sourceURL", res['Message'])
continue
sourceURL = res['Value']
destURL = ''
res = returnSingleResult(destStorageElement.getURL(
destPath, protocol=compatibleProtocol))
if not res['OK']:
# for some protocols, in particular srm
# you might get an error because the file does not exist
# which is exactly what we want
# in that case, we just keep going with the comparison
# since destURL will be an empty string
if not DErrno.cmpError(res, errno.ENOENT):
log.debug("Cannot get destURL", res['Message'])
continue
else:
log.debug("File does not exist: Expected error for TargetSE !!")
destURL = res['Value']
if sourceURL == destURL:
log.debug("Same source and destination, give up")
continue
# Attempt the transfer
res = returnSingleResult(destStorageElement.replicateFile({destPath: sourceURL},
sourceSize=catalogSize,
inputProtocol=compatibleProtocol))
if not res['OK']:
log.debug("Replication failed", "%s from %s to %s." %
(lfn, candidateSEName, destSEName))
continue
log.debug("Replication successful.", res['Value'])
res = returnSingleResult(destStorageElement.getURL(
destPath, protocol=self.registrationProtocol))
if not res['OK']:
log.debug('Error getting the registration URL', res['Message'])
# it's maybe pointless to try the other candidateSEs...
continue
registrationURL = res['Value']
return S_OK({'DestSE': destSEName, 'DestPfn': registrationURL})
# If we are here, that means that we could not make a third party transfer.
# Check if we have some sane SEs from which we could do a get/put
localDir = os.path.realpath(localCache if localCache else '.')
localFile = os.path.join(localDir, os.path.basename(lfn))
log.debug("Will try intermediate transfer from %s sources" %
len(possibleIntermediateSEs))
for candidateSE in possibleIntermediateSEs:
res = returnSingleResult(candidateSE.getFile(lfn, localPath=localDir))
if not res['OK']:
log.debug('Error getting the file from %s' %
candidateSE.name, res['Message'])
continue
res = returnSingleResult(
destStorageElement.putFile({destPath: localFile}))
# Remove the local file whatever happened
try:
os.remove(localFile)
except OSError as e:
log.error('Error removing local file', '%s %s' % (localFile, e))
if not res['OK']:
log.debug('Error putting file coming from %s' %
candidateSE.name, res['Message'])
# if the put is the problem, it's maybe pointless to try the other
# candidateSEs...
continue
# get URL with default protocol to return it
res = returnSingleResult(destStorageElement.getURL(
destPath, protocol=self.registrationProtocol))
if not res['OK']:
log.debug('Error getting the registration URL', res['Message'])
# it's maybe pointless to try the other candidateSEs...
continue
registrationURL = res['Value']
return S_OK({'DestSE': destSEName, 'DestPfn': registrationURL})
# If here, we are really doomed
errStr = "Failed to replicate with all sources."
log.debug(errStr, lfn)
return S_ERROR(errStr)
###################################################################
#
# These are the file catalog write methods
#
def registerFile(self, fileTuple, catalog=''):
""" Register a file or a list of files
:param self: self reference
:param tuple fileTuple: (lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum )
:param str catalog: catalog name
"""
log = self.log.getSubLogger('registerFile')
if isinstance(fileTuple, (list, set)):
fileTuples = fileTuple
elif isinstance(fileTuple, tuple):
fileTuples = [fileTuple]
for fileTuple in fileTuples:
if not isinstance(fileTuple, tuple):
errStr = "Supplied file info must be tuple or list of tuples."
log.debug(errStr)
return S_ERROR(errStr)
if not fileTuples:
return S_OK({'Successful': [], 'Failed': {}})
log.debug("Attempting to register %s files." % len(fileTuples))
res = self.__registerFile(fileTuples, catalog)
if not res['OK']:
errStr = "Completely failed to register files."
log.debug(errStr, res['Message'])
return res
return res
def __registerFile(self, fileTuples, catalog):
""" register file to catalog """
fileDict = {}
for lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum in fileTuples:
fileDict[lfn] = {'PFN': physicalFile,
'Size': fileSize,
'SE': storageElementName,
'GUID': fileGuid,
'Checksum': checksum}
if catalog:
fileCatalog = FileCatalog(catalog, vo=self.voName)
if not fileCatalog.isOK():
return S_ERROR("Can't get FileCatalog %s" % catalog)
else:
fileCatalog = self.fileCatalog
res = fileCatalog.addFile(fileDict)
if not res['OK']:
errStr = "Completely failed to register files."
self.log.getSubLogger('__registerFile').debug(errStr, res['Message'])
return res
def registerReplica(self, replicaTuple, catalog=''):
""" Register a replica (or list of) supplied in the replicaTuples.
'replicaTuple' is a tuple or list of tuples of the form (lfn,pfn,se)
"""
log = self.log.getSubLogger('registerReplica')
if isinstance(replicaTuple, (list, set)):
replicaTuples = replicaTuple
elif isinstance(replicaTuple, tuple):
replicaTuples = [replicaTuple]
for replicaTuple in replicaTuples:
if not isinstance(replicaTuple, tuple):
errStr = "Supplied file info must be tuple or list of tuples."
log.debug(errStr)
return S_ERROR(errStr)
if not replicaTuples:
return S_OK({'Successful': [], 'Failed': {}})
log.debug("Attempting to register %s replicas." % len(replicaTuples))
res = self.__registerReplica(replicaTuples, catalog)
if not res['OK']:
errStr = "Completely failed to register replicas."
log.debug(errStr, res['Message'])
return res
return res
def __registerReplica(self, replicaTuples, catalog):
""" register replica to catalogue """
log = self.log.getSubLogger('__registerReplica')
seDict = {}
for lfn, url, storageElementName in replicaTuples:
seDict.setdefault(storageElementName, []).append((lfn, url))
failed = {}
replicaTuples = []
for storageElementName, replicaTuple in seDict.iteritems():
destStorageElement = StorageElement(storageElementName, vo=self.voName)
res = destStorageElement.isValid()
if not res['OK']:
errStr = "The storage element is not currently valid."
log.verbose(errStr, "%s %s" % (storageElementName, res['Message']))
for lfn, url in replicaTuple:
failed[lfn] = errStr
else:
storageElementName = destStorageElement.storageElementName()
for lfn, url in replicaTuple:
res = returnSingleResult(destStorageElement.getURL(
lfn, protocol=self.registrationProtocol))
if not res['OK']:
failed[lfn] = res['Message']
else:
replicaTuple = (lfn, res['Value'], storageElementName, False)
replicaTuples.append(replicaTuple)
log.debug("Successfully resolved %s replicas for registration." %
len(replicaTuples))
# HACK!
replicaDict = {}
for lfn, url, se, _master in replicaTuples:
replicaDict[lfn] = {'SE': se, 'PFN': url}
if catalog:
fileCatalog = FileCatalog(catalog, vo=self.voName)
res = fileCatalog.addReplica(replicaDict)
else:
res = self.fileCatalog.addReplica(replicaDict)
if not res['OK']:
errStr = "Completely failed to register replicas."
log.debug(errStr, res['Message'])
return S_ERROR("%s %s" % (errStr, res['Message']))
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
resDict = {'Successful': successful, 'Failed': failed}
return S_OK(resDict)
###################################################################
#
# These are the removal methods for physical and catalogue removal
#
def removeFile(self, lfn, force=None):
""" Remove the file (all replicas) from Storage Elements and file catalogue
'lfn' is the file to be removed
"""
log = self.log.getSubLogger('removeFile')
if not lfn:
return S_OK({'Successful': {}, 'Failed': {}})
if force is None:
force = self.ignoreMissingInFC
if isinstance(lfn, (list, dict, set, tuple)):
lfns = list(lfn)
else:
lfns = [lfn]
for lfn in lfns:
if not isinstance(lfn, basestring):
errStr = "Supplied lfns must be string or list of strings."
log.debug(errStr)
return S_ERROR(errStr)
successful = {}
failed = {}
if not lfns:
return S_OK({'Successful': successful, 'Failed': failed})
# First check if the file exists in the FC
res = self.fileCatalog.exists(lfns)
if not res['OK']:
return res
success = res['Value']['Successful']
lfns = [lfn for lfn in success if success[lfn]]
if force:
# Files that don't exist are removed successfully
successful = dict.fromkeys(
(lfn for lfn in success if not success[lfn]), True)
else:
failed = dict.fromkeys(
(lfn for lfn in success if not success[lfn]), 'No such file or directory')
# Check that we have write permissions to this directory and to the file.
if lfns:
res = self.__hasAccess('removeFile', lfns)
if not res['OK']:
return res
if res['Value']['Failed']:
errStr = "Write access not permitted for this credential."
log.debug(errStr, 'for %d files' % len(res['Value']['Failed']))
failed.update(dict.fromkeys(res['Value']['Failed'], errStr))
lfns = res['Value']['Successful']
if lfns:
log.debug(
"Attempting to remove %d files from Storage and Catalogue. Get replicas first" % len(lfns))
res = self.fileCatalog.getReplicas(lfns, allStatus=True)
if not res['OK']:
errStr = "DataManager.removeFile: Completely failed to get replicas for lfns."
log.debug(errStr, res['Message'])
return res
lfnDict = res['Value']['Successful']
for lfn, reason in res['Value']['Failed'].iteritems():
# Ignore files missing in FC if force is set
if reason == 'No such file or directory' and force:
successful[lfn] = True
elif reason == 'File has zero replicas':
lfnDict[lfn] = {}
else:
failed[lfn] = reason
res = self.__removeFile(lfnDict)
if not res['OK']:
# This can never happen
return res
failed.update(res['Value']['Failed'])
successful.update(res['Value']['Successful'])
gDataStoreClient.commit()
return S_OK({'Successful': successful, 'Failed': failed})
def __removeFile(self, lfnDict):
""" remove file """
storageElementDict = {}
# # sorted and reversed
for lfn, repDict in sorted(lfnDict.items(), reverse=True):
for se in repDict:
storageElementDict.setdefault(se, []).append(lfn)
failed = {}
successful = {}
for storageElementName in sorted(storageElementDict):
lfns = storageElementDict[storageElementName]
res = self.__removeReplica(storageElementName, lfns, replicaDict=lfnDict)
if not res['OK']:
errStr = res['Message']
for lfn in lfns:
failed[lfn] = failed.setdefault(lfn, '') + " %s" % errStr
else:
for lfn, errStr in res['Value']['Failed'].iteritems():
failed[lfn] = failed.setdefault(lfn, '') + " %s" % errStr
completelyRemovedFiles = set(lfnDict) - set(failed)
if completelyRemovedFiles:
res = self.fileCatalog.removeFile(list(completelyRemovedFiles))
if not res['OK']:
failed.update(dict.fromkeys(completelyRemovedFiles,
"Failed to remove file from the catalog: %s" % res['Message']))
else:
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def removeReplica(self, storageElementName, lfn):
""" Remove replica at the supplied Storage Element from Storage Element then file catalogue
'storageElementName' is the storage where the file is to be removed
'lfn' is the file to be removed
"""
log = self.log.getSubLogger('removeReplica')
if isinstance(lfn, (list, dict, set, tuple)):
lfns = set(lfn)
else:
lfns = set([lfn])
for lfn in lfns:
if not isinstance(lfn, basestring):
errStr = "Supplied lfns must be string or list of strings."
log.debug(errStr)
return S_ERROR(errStr)
successful = {}
failed = {}
if not lfns:
return S_OK({'Successful': successful, 'Failed': failed})
# Check that we have write permissions to this file.
res = self.__hasAccess('removeReplica', lfns)
if not res['OK']:
log.debug('Error in __verifyWritePermisison', res['Message'])
return res
if res['Value']['Failed']:
errStr = "Write access not permitted for this credential."
log.debug(errStr, 'for %d files' % len(res['Value']['Failed']))
failed.update(dict.fromkeys(res['Value']['Failed'], errStr))
lfns -= set(res['Value']['Failed'])
if not lfns:
log.debug('Permission denied for all files')
else:
log.debug("Will remove %s lfns at %s." % (len(lfns), storageElementName))
res = self.fileCatalog.getReplicas(list(lfns), allStatus=True)
if not res['OK']:
errStr = "Completely failed to get replicas for lfns."
log.debug(errStr, res['Message'])
return res
failed.update(res['Value']['Failed'])
replicaDict = res['Value']['Successful']
lfnsToRemove = set()
for lfn, repDict in replicaDict.iteritems():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to
# remove it
successful[lfn] = True
elif len(repDict) == 1:
# The file has only a single replica so don't remove
log.debug("The replica you are trying to remove is the only one.",
"%s @ %s" % (lfn, storageElementName))
failed[lfn] = "Failed to remove sole replica"
else:
lfnsToRemove.add(lfn)
if lfnsToRemove:
res = self.__removeReplica(
storageElementName, lfnsToRemove, replicaDict=replicaDict)
if not res['OK']:
log.debug("Failed in __removeReplica", res['Message'])
return res
failed.update(res['Value']['Failed'])
successful.update(res['Value']['Successful'])
gDataStoreClient.commit()
return S_OK({'Successful': successful, 'Failed': failed})
def __removeReplica(self, storageElementName, lfns, replicaDict=None):
""" remove replica
Remove the replica from the storageElement, and then from the catalog
:param storageElementName : The name of the storage Element
:param lfns : list of lfn we want to remove
:param replicaDict : cache of fc.getReplicas(lfns) : { lfn { se : catalog url } }
"""
log = self.log.getSubLogger('__removeReplica')
failed = {}
successful = {}
replicaDict = replicaDict if replicaDict else {}
lfnsToRemove = set()
for lfn in lfns:
res = self.__hasAccess('removeReplica', lfn)
if not res['OK']:
log.debug('Error in __verifyWritePermission', res['Message'])
return res
if lfn not in res['Value']['Successful']:
errStr = "Write access not permitted for this credential."
log.debug(errStr, lfn)
failed[lfn] = errStr
else:
lfnsToRemove.add(lfn)
# Remove physical replicas first
res = self.__removePhysicalReplica(
storageElementName, lfnsToRemove, replicaDict=replicaDict)
if not res['OK']:
errStr = "Failed to remove physical replicas."
log.debug(errStr, res['Message'])
return res
failed.update(res['Value']['Failed'])
# Here we use the FC PFN...
replicaTuples = [(lfn, replicaDict[lfn][storageElementName], storageElementName)
for lfn in res['Value']['Successful']]
if replicaTuples:
res = self.__removeCatalogReplica(replicaTuples)
if not res['OK']:
errStr = "Completely failed to remove physical files."
log.debug(errStr, res['Message'])
failed.update(dict.fromkeys(
(lfn for lfn, _pfn, _se in replicaTuples), res['Message']))
successful = {}
else:
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def removeReplicaFromCatalog(self, storageElementName, lfn):
""" remove :lfn: replica from :storageElementName: SE
:param self: self reference
:param str storageElementName: SE name
:param mixed lfn: a single LFN or list of LFNs
"""
# FIXME: this method is dangerous ans should eventually be removed as well
# as the script dirac-dms-remove-catalog-replicas
log = self.log.getSubLogger('removeReplicaFromCatalog')
# Remove replica from the file catalog 'lfn' are the file
# to be removed 'storageElementName' is the storage where the file is to
# be removed
if isinstance(lfn, (list, dict, set, tuple)):
lfns = list(lfn)
else:
lfns = [lfn]
for lfn in lfns:
if not isinstance(lfn, basestring):
errStr = "Supplied lfns must be string or list of strings."
log.debug(errStr)
return S_ERROR(errStr)
successful = {}
failed = {}
if not lfns:
return S_OK({'Successful': successful, 'Failed': failed})
log.debug("Will remove catalogue entry for %s lfns at %s." %
(len(lfns), storageElementName))
res = self.fileCatalog.getReplicas(lfns, allStatus=True)
if not res['OK']:
errStr = "Completely failed to get replicas for lfns."
log.debug(errStr, res['Message'])
return res
failed = {}
successful = {}
for lfn, reason in res['Value']['Failed'].iteritems():
if reason in ('No such file or directory', 'File has zero replicas'):
successful[lfn] = True
else:
failed[lfn] = reason
replicaTuples = []
for lfn, repDict in res['Value']['Successful'].iteritems():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove
# it
successful[lfn] = True
else:
replicaTuples.append(
(lfn, repDict[storageElementName], storageElementName))
log.debug("Resolved %s pfns for catalog removal at %s." % (len(replicaTuples),
storageElementName))
res = self.__removeCatalogReplica(replicaTuples)
failed.update(res['Value']['Failed'])
successful.update(res['Value']['Successful'])
resDict = {'Successful': successful, 'Failed': failed}
return S_OK(resDict)
def __removeCatalogReplica(self, replicaTuples):
""" remove replica form catalogue
:param replicaTuples : list of (lfn, catalogPFN, se)
"""
log = self.log.getSubLogger('__removeCatalogReplica')
oDataOperation = _initialiseAccountingObject(
'removeCatalogReplica', '', len(replicaTuples))
oDataOperation.setStartTime()
start = time.time()
# HACK!
replicaDict = {}
for lfn, pfn, se in replicaTuples:
replicaDict[lfn] = {'SE': se, 'PFN': pfn}
res = self.fileCatalog.removeReplica(replicaDict)
oDataOperation.setEndTime()
oDataOperation.setValueByKey('RegistrationTime', time.time() - start)
if not res['OK']:
oDataOperation.setValueByKey('RegistrationOK', 0)
oDataOperation.setValueByKey('FinalStatus', 'Failed')
gDataStoreClient.addRegister(oDataOperation)
errStr = "Completely failed to remove replica: "
log.debug(errStr, res['Message'])
return S_ERROR("%s %s" % (errStr, res['Message']))
success = res['Value']['Successful']
failed = res['Value']['Failed']
for lfn, error in failed.items():
# Ignore error if file doesn't exist
# This assumes all catalogs return an error as { catalog : error }
for catalog, err in error.items():
if 'no such file' in err.lower():
success.setdefault(lfn, {}).update({catalog: True})
error.pop(catalog)
if not failed[lfn]:
failed.pop(lfn)
else:
log.error("Failed to remove replica.", "%s %s" % (lfn, error))
# Only for logging information
if success:
log.debug("Removed %d replicas" % len(success))
for lfn in success:
log.debug("Successfully removed replica.", lfn)
oDataOperation.setValueByKey('RegistrationOK', len(success))
gDataStoreClient.addRegister(oDataOperation)
return res
def __removePhysicalReplica(self, storageElementName, lfnsToRemove, replicaDict=None):
""" remove replica from storage element
:param storageElementName : name of the storage Element
:param lfnsToRemove : set of lfn to removes
:param replicaDict : cache of fc.getReplicas, to be passed to the SE
"""
log = self.log.getSubLogger('__removePhysicalReplica')
log.debug("Attempting to remove %s pfns at %s." %
(len(lfnsToRemove), storageElementName))
storageElement = StorageElement(storageElementName, vo=self.voName)
res = storageElement.isValid()
if not res['OK']:
errStr = "The storage element is not currently valid."
log.verbose(errStr, "%s %s" % (storageElementName, res['Message']))
return S_ERROR("%s %s" % (errStr, res['Message']))
oDataOperation = _initialiseAccountingObject('removePhysicalReplica',
storageElementName,
len(lfnsToRemove))
oDataOperation.setStartTime()
start = time.time()
lfnsToRemove = list(lfnsToRemove)
ret = storageElement.getFileSize(lfnsToRemove, replicaDict=replicaDict)
deletedSizes = ret.get('Value', {}).get('Successful', {})
res = storageElement.removeFile(lfnsToRemove, replicaDict=replicaDict)
oDataOperation.setEndTime()
oDataOperation.setValueByKey('TransferTime', time.time() - start)
if not res['OK']:
oDataOperation.setValueByKey('TransferOK', 0)
oDataOperation.setValueByKey('FinalStatus', 'Failed')
gDataStoreClient.addRegister(oDataOperation)
log.debug("Failed to remove replicas.", res['Message'])
else:
for lfn, value in res['Value']['Failed'].items():
if 'No such file or directory' in value:
res['Value']['Successful'][lfn] = lfn
res['Value']['Failed'].pop(lfn)
for lfn in res['Value']['Successful']:
res['Value']['Successful'][lfn] = True
deletedSize = sum(deletedSizes.get(lfn, 0)
for lfn in res['Value']['Successful'])
oDataOperation.setValueByKey('TransferSize', deletedSize)
oDataOperation.setValueByKey(
'TransferOK', len(res['Value']['Successful']))
gDataStoreClient.addRegister(oDataOperation)
infoStr = "Successfully issued accounting removal request."
log.debug(infoStr)
return res
#########################################################################
#
# File transfer methods
#
def put(self, lfn, fileName, diracSE, path=None):
""" Put a local file to a Storage Element
:param self: self reference
:param str lfn: LFN
:param str fileName: the full path to the local file
:param str diracSE: the Storage Element to which to put the file
:param str path: the path on the storage where the file will be put (if not provided the LFN will be used)
"""
log = self.log.getSubLogger('put')
# Check that the local file exists
if not os.path.exists(fileName):
errStr = "Supplied file does not exist."
log.debug(errStr, fileName)
return S_ERROR(errStr)
# If the path is not provided then use the LFN path
if not path:
path = os.path.dirname(lfn)
# Obtain the size of the local file
size = getSize(fileName)
if size == 0:
errStr = "Supplied file is zero size."
log.debug(errStr, fileName)
return S_ERROR(errStr)
##########################################################
# Instantiate the destination storage element here.
storageElement = StorageElement(diracSE, vo=self.voName)
res = storageElement.isValid()
if not res['OK']:
errStr = "The storage element is not currently valid."
log.verbose(errStr, "%s %s" % (diracSE, res['Message']))
return S_ERROR("%s %s" % (errStr, res['Message']))
fileDict = {lfn: fileName}
successful = {}
failed = {}
##########################################################
# Perform the put here.
startTime = time.time()
res = returnSingleResult(storageElement.putFile(fileDict))
putTime = time.time() - startTime
if not res['OK']:
errStr = "Failed to put file to Storage Element."
failed[lfn] = res['Message']
log.debug(errStr, "%s: %s" % (fileName, res['Message']))
else:
log.debug("Put file to storage in %s seconds." % putTime)
successful[lfn] = res['Value']
resDict = {'Successful': successful, 'Failed': failed}
return S_OK(resDict)
#########################################################################
#
# File catalog methods
#
def getActiveReplicas(self, lfns, getUrl=True, diskOnly=False, preferDisk=False):
""" Get all the replicas for the SEs which are in Active status for reading.
"""
return self.getReplicas(lfns, allStatus=False, getUrl=getUrl, diskOnly=diskOnly,
preferDisk=preferDisk, active=True)
def __filterTapeReplicas(self, replicaDict, diskOnly=False):
"""
Check a replica dictionary for disk replicas:
If there is a disk replica, removetape replicas, else keep all
The input argument is modified
"""
seList = set(
se for ses in replicaDict['Successful'].itervalues() for se in ses)
# Get a cache of SE statuses for long list of replicas
seStatus = dict((se,
(self.__checkSEStatus(se, status='DiskSE'),
self.__checkSEStatus(se, status='TapeSE'))) for se in seList)
# Beware, there is a del below
for lfn, replicas in replicaDict['Successful'].items():
self.__filterTapeSEs(replicas, diskOnly=diskOnly, seStatus=seStatus)
# If diskOnly, one may not have any replica in the end, set Failed
if diskOnly and not replicas:
del replicaDict['Successful'][lfn]
replicaDict['Failed'][lfn] = 'No disk replicas'
return
def __filterReplicasForJobs(self, replicaDict):
""" Remove the SEs that are not to be used for jobs, and archive SEs if there are others
The input argument is modified
"""
seList = set(
se for ses in replicaDict['Successful'].itervalues() for se in ses)
# Get a cache of SE statuses for long list of replicas
seStatus = dict((se, (self.dmsHelper.isSEForJobs(
se), self.dmsHelper.isSEArchive(se))) for se in seList)
# Beware, there is a del below
for lfn, replicas in replicaDict['Successful'].items():
otherThanArchive = set(se for se in replicas if not seStatus[se][1])
for se in replicas.keys():
# Remove the SE if it should not be used for jobs or if it is an
# archive and there are other SEs
if not seStatus[se][0] or (otherThanArchive and seStatus[se][1]):
replicas.pop(se)
# If in the end there is no replica, set Failed
if not replicas:
del replicaDict['Successful'][lfn]
replicaDict['Failed'][lfn] = 'No replicas for jobs'
return
def __filterTapeSEs(self, replicas, diskOnly=False, seStatus=None):
""" Remove the tape SEs as soon as there is one disk SE or diskOnly is requested
The input argument is modified
"""
# Build the SE status cache if not existing
if seStatus is None:
seStatus = dict((se,
(self.__checkSEStatus(se, status='DiskSE'),
self.__checkSEStatus(se, status='TapeSE'))) for se in replicas)
for se in replicas: # There is a del below but we then return!
# First find a disk replica, otherwise do nothing unless diskOnly is set
if diskOnly or seStatus[se][0]:
# There is one disk replica, remove tape replicas and exit loop
for se in replicas.keys(): # Beware: there is a pop below
if seStatus[se][1]:
replicas.pop(se)
return
return
def checkActiveReplicas(self, replicaDict):
"""
Check a replica dictionary for active replicas, and verify input structure first
"""
if not isinstance(replicaDict, dict):
return S_ERROR('Wrong argument type %s, expected a dictionary' % type(replicaDict))
for key in ['Successful', 'Failed']:
if key not in replicaDict:
return S_ERROR('Missing key "%s" in replica dictionary' % key)
if not isinstance(replicaDict[key], dict):
return S_ERROR('Wrong argument type %s, expected a dictionary' % type(replicaDict[key]))
activeDict = {'Successful': {}, 'Failed': replicaDict['Failed'].copy()}
for lfn, replicas in replicaDict['Successful'].iteritems():
if not isinstance(replicas, dict):
activeDict['Failed'][lfn] = 'Wrong replica info'
else:
activeDict['Successful'][lfn] = replicas.copy()
self.__filterActiveReplicas(activeDict)
return S_OK(activeDict)
def __filterActiveReplicas(self, replicaDict):
"""
Check a replica dictionary for active replicas
The input dict is modified, no returned value
"""
seList = set(
se for ses in replicaDict['Successful'].itervalues() for se in ses)
# Get a cache of SE statuses for long list of replicas
seStatus = dict((se, self.__checkSEStatus(se, status='Read'))
for se in seList)
for replicas in replicaDict['Successful'].itervalues():
for se in replicas.keys(): # Beware: there is a pop below
if not seStatus[se]:
replicas.pop(se)
return
def __checkSEStatus(self, se, status='Read'):
""" returns the value of a certain SE status flag (access or other) """
return StorageElement(se, vo=self.voName).status().get(status, False)
def getReplicas(self, lfns, allStatus=True, getUrl=True, diskOnly=False, preferDisk=False, active=False):
""" get replicas from catalogue and filter if requested
Warning: all filters are independent, hence active and preferDisk should be set if using forJobs
"""
catalogReplicas = {}
failed = {}
for lfnChunk in breakListIntoChunks(lfns, 1000):
res = self.fileCatalog.getReplicas(lfnChunk, allStatus=allStatus)
if res['OK']:
catalogReplicas.update(res['Value']['Successful'])
failed.update(res['Value']['Failed'])
else:
return res
if not getUrl:
for lfn in catalogReplicas:
catalogReplicas[lfn] = dict.fromkeys(catalogReplicas[lfn], True)
elif not self.useCatalogPFN:
if res['OK']:
se_lfn = {}
# We group the query to getURL by storage element to gain in speed
for lfn in catalogReplicas:
for se in catalogReplicas[lfn]:
se_lfn.setdefault(se, []).append(lfn)
for se in se_lfn:
seObj = StorageElement(se, vo=self.voName)
succPfn = seObj.getURL(se_lfn[se],
protocol=self.registrationProtocol).get('Value', {}).get('Successful', {})
for lfn in succPfn:
# catalogReplicas still points res["value"]["Successful"] so res
# will be updated
catalogReplicas[lfn][se] = succPfn[lfn]
result = {'Successful': catalogReplicas, 'Failed': failed}
if active:
self.__filterActiveReplicas(result)
if diskOnly or preferDisk:
self.__filterTapeReplicas(result, diskOnly=diskOnly)
return S_OK(result)
def getReplicasForJobs(self, lfns, allStatus=False, getUrl=True, diskOnly=False):
""" get replicas useful for jobs
"""
# Call getReplicas with no filter and enforce filters in this method
result = self.getReplicas(lfns, allStatus=allStatus, getUrl=getUrl)
if not result['OK']:
return result
replicaDict = result['Value']
# For jobs replicas must be active
self.__filterActiveReplicas(replicaDict)
# For jobs, give preference to disk replicas but not only
self.__filterTapeReplicas(replicaDict, diskOnly=diskOnly)
# don't use SEs excluded for jobs (e.g. Failover)
self.__filterReplicasForJobs(replicaDict)
return S_OK(replicaDict)
# 3
# Methods from the catalogToStorage. It would all work with the direct call to the SE, but this checks
# first if the replica is known to the catalog
def __executeIfReplicaExists(self, storageElementName, lfn, method, **kwargs):
""" a simple wrapper that allows replica querying then perform the StorageElement operation
:param self: self reference
:param str storageElementName: DIRAC SE name
:param mixed lfn: a LFN str, list of LFNs or dict with LFNs as keys
"""
log = self.log.getSubLogger('__executeIfReplicaExists')
# # default value
kwargs = kwargs if kwargs else {}
# # get replicas for lfn
res = FileCatalog(vo=self.voName).getReplicas(lfn)
if not res["OK"]:
errStr = "Completely failed to get replicas for LFNs."
log.debug(errStr, res["Message"])
return res
# # returned dict, get failed replicase
retDict = {"Failed": res["Value"]["Failed"],
"Successful": {}}
# # print errors
for lfn, reason in retDict["Failed"].iteritems():
log.error("_callReplicaSEFcn: Failed to get replicas for file.",
"%s %s" % (lfn, reason))
# # good replicas
lfnReplicas = res["Value"]["Successful"]
# # store PFN to LFN mapping
lfnList = []
for lfn, replicas in lfnReplicas.iteritems():
if storageElementName in replicas:
lfnList.append(lfn)
else:
errStr = "File hasn't got replica at supplied Storage Element."
log.error(errStr, "%s %s" % (lfn, storageElementName))
retDict["Failed"][lfn] = errStr
if 'replicaDict' not in kwargs:
kwargs['replicaDict'] = lfnReplicas
# # call StorageElement function at least
se = StorageElement(storageElementName, vo=self.voName)
fcn = getattr(se, method)
res = fcn(lfnList, **kwargs)
# # check result
if not res["OK"]:
errStr = "Failed to execute %s StorageElement method." % method
log.error(errStr, res["Message"])
return res
# # filter out failed and successful
retDict["Successful"].update(res["Value"]["Successful"])
retDict["Failed"].update(res["Value"]["Failed"])
return S_OK(retDict)
def getReplicaIsFile(self, lfn, storageElementName):
""" determine whether the supplied lfns are files at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "isFile")
def getReplicaSize(self, lfn, storageElementName):
""" get the size of files for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "getFileSize")
def getReplicaAccessUrl(self, lfn, storageElementName, protocol=False):
""" get the access url for lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "getURL", protocol=protocol)
def getReplicaMetadata(self, lfn, storageElementName):
""" get the file metadata for lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "getFileMetadata")
def prestageReplica(self, lfn, storageElementName, lifetime=86400):
""" issue a prestage requests for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn,
"prestageFile", lifetime=lifetime)
def pinReplica(self, lfn, storageElementName, lifetime=86400):
""" pin the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn,
"pinFile", lifetime=lifetime)
def releaseReplica(self, lfn, storageElementName):
""" release pins for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "releaseFile")
def getReplica(self, lfn, storageElementName, localPath=False):
""" copy replicas from DIRAC SE to local directory
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param mixed localPath: path in the local file system, if False, os.getcwd() will be used
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn,
"getFile", localPath=localPath)
|
gpl-3.0
|
Sebubu/mushroom_crawler
|
mushroom/GoogleInceptionV3.py
|
1
|
6668
|
from keras.models import Graph
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers.advanced_activations import PReLU
import datetime
'''
Inception v3 paper
http://arxiv.org/pdf/1512.00567v1.pdf
Old inception paper
http://arxiv.org/pdf/1409.4842.pdf
'''
def activation_function():
return "relu"
def cinput_shape(graph):
shape = list(graph.output_shape)
shape.pop(0)
return shape
def conv(input_shape):
graph = Graph()
graph.add_input("input", input_shape)
graph.add_node(Convolution2D(32, 3, 3, subsample=(2,2), activation=activation_function()), name="conv1", input="input")
graph.add_node(Convolution2D(32, 3,3, activation=activation_function()), name="conv2", input="conv1")
graph.add_node(Convolution2D(64, 3,3, activation=activation_function()), name="conv3", input="conv2")
graph.add_node(MaxPooling2D((3, 3), stride=(2, 2)), name="pool4", input="conv3")
graph.add_node(Convolution2D(80, 3,3, activation=activation_function()), name="conv5", input="pool4")
graph.add_node(Convolution2D(192, 3,3, subsample=(2,2), activation=activation_function()), name="conv6", input="conv5")
graph.add_node(Convolution2D(288, 3,3, activation=activation_function()), name="conv7", input="conv6")
graph.add_output("output", input="conv7")
return graph
def inception4(input_shape):
graph = Graph()
graph.add_input("input", input_shape)
graph.add_node(Convolution2D(16, 1, 1, activation=activation_function()), "conv1_1", "input")
graph.add_node(ZeroPadding2D(padding=(2, 2)), "zero1_2", "conv1_1")
graph.add_node(Convolution2D(32, 5, 5, activation=activation_function()), "conv1_3", "zero1_2")
graph.add_node(Convolution2D(96, 1, 1, activation=activation_function()), "conv2_1", "input")
graph.add_node(ZeroPadding2D(padding=(1, 1)), "zero2_2", "conv2_1")
graph.add_node(Convolution2D(128, 3, 3, activation=activation_function()), "conv2_3", "zero2_2")
graph.add_node(ZeroPadding2D(padding=(1, 1)), "zero3_1", "input")
graph.add_node(MaxPooling2D((3, 3), stride=(1, 1)), "pool3_2", "zero3_1")
graph.add_node(Convolution2D(32, 1, 1, activation=activation_function()), "conv3_3","pool3_2")
graph.add_node(Convolution2D(64, 1, 1, activation=activation_function()), "conv4_1", "input")
graph.add_output("output", inputs=["conv1_3", "conv2_3", "conv3_3", "conv4_1"], merge_mode="concat", concat_axis=1)
return graph
def inception5(input_shape):
graph = Graph()
graph.add_input("input", input_shape)
graph.add_node(Convolution2D(16, 1, 1, activation=activation_function()), "conv1_1", "input")
graph.add_node(ZeroPadding2D(padding=(2, 2)), "zero1_2", "conv1_1")
graph.add_node(Convolution2D(32, 3, 3, activation=activation_function()), "conv1_3", "zero1_2")
graph.add_node(Convolution2D(32, 3, 3, activation=activation_function()), "conv1_4", "conv1_3")
graph.add_node(Convolution2D(96, 1, 1, activation=activation_function()), "conv2_1", "input")
graph.add_node(ZeroPadding2D(padding=(1, 1)), "zero2_2", "conv2_1")
graph.add_node(Convolution2D(128, 3, 3, activation=activation_function()), "conv2_3", "zero2_2")
graph.add_node(ZeroPadding2D(padding=(1, 1)), "zero3_1", "input")
graph.add_node(MaxPooling2D((3, 3), stride=(1, 1)), "pool3_2", "zero3_1")
graph.add_node(Convolution2D(32, 1, 1, activation=activation_function()), "conv3_3", "pool3_2")
graph.add_node(Convolution2D(64, 1, 1, activation=activation_function()), "conv4_1", "input")
graph.add_output("output",inputs=["conv1_4", "conv2_3", "conv3_3", "conv4_1"], merge_mode="concat", concat_axis=1)
return graph
def inception6(input_shape, n):
graph = Graph()
graph.add_input("input", input_shape)
graph.add_node(Convolution2D(16, 1, 1, activation=activation_function()), "conv1_1", "input")
graph.add_node(ZeroPadding2D(padding=(2, 2)), "zero1_2", "conv1_1")
graph.add_node(Convolution2D(32, 1, n, activation=activation_function()), "conv1_3", "zero1_2")
graph.add_node(Convolution2D(32, n, 1, activation=activation_function()), "conv1_4", "conv1_3")
graph.add_node(Convolution2D(32, 1, n, activation=activation_function()), "conv1_5", "conv1_4")
graph.add_node(Convolution2D(32, n, 1, activation=activation_function()), "conv1_6", "conv1_5")
graph.add_node(Convolution2D(96, 1, 1, activation=activation_function()), "conv2_1", "input")
graph.add_node(ZeroPadding2D(padding=(1, 1)), "zero2_2", "conv2_1")
graph.add_node(Convolution2D(128, 1, n, activation=activation_function()), "conv2_3", "zero2_2")
graph.add_node(Convolution2D(128, n, 1, activation=activation_function()), "conv2_4", "conv2_3")
graph.add_node(ZeroPadding2D(padding=(1, 1)), "zero3_1", "input")
graph.add_node(MaxPooling2D((3, 3), stride=(1, 1)), "pool3_2", "zero3_1")
graph.add_node(Convolution2D(32, 1, 1, activation=activation_function()), "conv3_3", "pool3_2")
graph.add_node(Convolution2D(64, 1, 1, activation=activation_function()), "conv4_1", "input")
graph.add_output("output", inputs=["conv1_6", "conv2_4", "conv3_3", "conv4_1"], merge_mode="concat", concat_axis=1)
return graph
def printl(name):
print str(datetime.datetime.now()) + name
def create_model():
input_shape = (3,244,244)
n = 3
conv1 = conv(input_shape)
inc41 = inception4(cinput_shape(conv1))
inc42 = inception4(cinput_shape(inc41))
inc43 = inception4(cinput_shape(inc42))
inc51 = inception5(cinput_shape(inc43))
inc52 = inception5(cinput_shape(inc51))
inc53 = inception5(cinput_shape(inc52))
inc54 = inception5(cinput_shape(inc53))
inc55 = inception5(cinput_shape(inc54))
inc61 = inception6(cinput_shape(inc55), n)
inc62 = inception6(cinput_shape(inc61), n)
inc63 = inception6(cinput_shape(inc62), n)
graph = Graph()
graph.add_input("input", input_shape)
graph.add_node(conv1,"conv1", "input")
graph.add_node(inc41, "inc41", "conv1")
graph.add_node(inc42, "inc42", "inc41")
graph.add_node(inc43, "inc43", "inc42")
graph.add_node(inc51, "inc51", "inc43")
graph.add_node(inc52, "inc52", "inc51")
graph.add_node(inc53, "inc53", "inc52")
graph.add_node(inc54, "inc54", "inc53")
graph.add_node(inc55, "inc55", "inc54")
graph.add_node(inc61, "inc61", "inc55")
graph.add_node(inc62, "inc62", "inc61")
graph.add_node(inc63, "inc63", "inc62")
graph.add_output("output", "inc63")
print "out " + str(graph.output_shape)
return graph
graph = create_model()
graph.compile(optimizer='rmsprop', loss={'output':'mse'})
print graph
|
unlicense
|
cloudera/hue
|
desktop/core/ext-py/celery-4.2.1/t/unit/bin/test_base.py
|
2
|
12976
|
from __future__ import absolute_import, unicode_literals
import os
import pytest
from case import Mock, mock, patch
from celery.bin.base import Command, Extensions, Option
from celery.five import bytes_if_py2
class MyApp(object):
user_options = {'preload': None}
APP = MyApp() # <-- Used by test_with_custom_app
class MockCommand(Command):
mock_args = ('arg1', 'arg2', 'arg3')
def parse_options(self, prog_name, arguments, command=None):
options = {'foo': 'bar', 'prog_name': prog_name}
return options, self.mock_args
def run(self, *args, **kwargs):
return args, kwargs
class test_Extensions:
def test_load(self):
with patch('pkg_resources.iter_entry_points') as iterep:
with patch('celery.utils.imports.symbol_by_name') as symbyname:
ep = Mock()
ep.name = 'ep'
ep.module_name = 'foo'
ep.attrs = ['bar', 'baz']
iterep.return_value = [ep]
cls = symbyname.return_value = Mock()
register = Mock()
e = Extensions('unit', register)
e.load()
symbyname.assert_called_with('foo:bar')
register.assert_called_with(cls, name='ep')
with patch('celery.utils.imports.symbol_by_name') as symbyname:
symbyname.side_effect = SyntaxError()
with patch('warnings.warn') as warn:
e.load()
warn.assert_called()
with patch('celery.utils.imports.symbol_by_name') as symbyname:
symbyname.side_effect = KeyError('foo')
with pytest.raises(KeyError):
e.load()
class test_Command:
def test_get_options(self):
cmd = Command()
cmd.option_list = (1, 2, 3)
assert cmd.get_options() == (1, 2, 3)
def test_custom_description(self):
class C(Command):
description = 'foo'
c = C()
assert c.description == 'foo'
def test_format_epilog(self):
assert Command()._format_epilog('hello')
assert not Command()._format_epilog('')
def test_format_description(self):
assert Command()._format_description('hello')
def test_register_callbacks(self):
c = Command(on_error=8, on_usage_error=9)
assert c.on_error == 8
assert c.on_usage_error == 9
def test_run_raises_UsageError(self):
cb = Mock()
c = Command(on_usage_error=cb)
c.verify_args = Mock()
c.run = Mock()
exc = c.run.side_effect = c.UsageError('foo', status=3)
assert c() == exc.status
cb.assert_called_with(exc)
c.verify_args.assert_called_with(())
def test_default_on_usage_error(self):
cmd = Command()
cmd.handle_error = Mock()
exc = Exception()
cmd.on_usage_error(exc)
cmd.handle_error.assert_called_with(exc)
def test_verify_args_missing(self):
c = Command()
def run(a, b, c):
pass
c.run = run
with pytest.raises(c.UsageError):
c.verify_args((1,))
c.verify_args((1, 2, 3))
def test_run_interface(self):
with pytest.raises(NotImplementedError):
Command().run()
@patch('sys.stdout')
def test_early_version(self, stdout):
cmd = Command()
with pytest.raises(SystemExit):
cmd.early_version(['--version'])
def test_execute_from_commandline(self, app):
cmd = MockCommand(app=app)
args1, kwargs1 = cmd.execute_from_commandline() # sys.argv
assert args1 == cmd.mock_args
assert kwargs1['foo'] == 'bar'
assert kwargs1.get('prog_name')
args2, kwargs2 = cmd.execute_from_commandline(['foo']) # pass list
assert args2 == cmd.mock_args
assert kwargs2['foo'] == 'bar'
assert kwargs2['prog_name'] == 'foo'
def test_with_bogus_args(self, app):
with mock.stdouts() as (_, stderr):
cmd = MockCommand(app=app)
cmd.supports_args = False
with pytest.raises(SystemExit):
cmd.execute_from_commandline(argv=['--bogus'])
assert stderr.getvalue()
assert 'Unrecognized' in stderr.getvalue()
def test_with_custom_config_module(self, app):
prev = os.environ.pop('CELERY_CONFIG_MODULE', None)
try:
cmd = MockCommand(app=app)
cmd.setup_app_from_commandline(['--config=foo.bar.baz'])
assert os.environ.get('CELERY_CONFIG_MODULE') == 'foo.bar.baz'
finally:
if prev:
os.environ['CELERY_CONFIG_MODULE'] = prev
else:
os.environ.pop('CELERY_CONFIG_MODULE', None)
def test_with_custom_broker(self, app):
prev = os.environ.pop('CELERY_BROKER_URL', None)
try:
cmd = MockCommand(app=app)
cmd.setup_app_from_commandline(['--broker=xyzza://'])
assert os.environ.get('CELERY_BROKER_URL') == 'xyzza://'
finally:
if prev:
os.environ['CELERY_BROKER_URL'] = prev
else:
os.environ.pop('CELERY_BROKER_URL', None)
def test_with_custom_result_backend(self, app):
prev = os.environ.pop('CELERY_RESULT_BACKEND', None)
try:
cmd = MockCommand(app=app)
cmd.setup_app_from_commandline(['--result-backend=xyzza://'])
assert os.environ.get('CELERY_RESULT_BACKEND') == 'xyzza://'
finally:
if prev:
os.environ['CELERY_RESULT_BACKEND'] = prev
else:
os.environ.pop('CELERY_RESULT_BACKEND', None)
def test_with_custom_app(self, app):
cmd = MockCommand(app=app)
appstr = '.'.join([__name__, 'APP'])
cmd.setup_app_from_commandline(['--app=%s' % (appstr,),
'--loglevel=INFO'])
assert cmd.app is APP
cmd.setup_app_from_commandline(['-A', appstr,
'--loglevel=INFO'])
assert cmd.app is APP
def test_setup_app_sets_quiet(self, app):
cmd = MockCommand(app=app)
cmd.setup_app_from_commandline(['-q'])
assert cmd.quiet
cmd2 = MockCommand(app=app)
cmd2.setup_app_from_commandline(['--quiet'])
assert cmd2.quiet
def test_setup_app_sets_chdir(self, app):
with patch('os.chdir') as chdir:
cmd = MockCommand(app=app)
cmd.setup_app_from_commandline(['--workdir=/opt'])
chdir.assert_called_with('/opt')
def test_setup_app_sets_loader(self, app):
prev = os.environ.get('CELERY_LOADER')
try:
cmd = MockCommand(app=app)
cmd.setup_app_from_commandline(['--loader=X.Y:Z'])
assert os.environ['CELERY_LOADER'] == 'X.Y:Z'
finally:
if prev is not None:
os.environ['CELERY_LOADER'] = prev
else:
del(os.environ['CELERY_LOADER'])
def test_setup_app_no_respect(self, app):
cmd = MockCommand(app=app)
cmd.respects_app_option = False
with patch('celery.bin.base.Celery') as cp:
cmd.setup_app_from_commandline(['--app=x.y:z'])
cp.assert_called()
def test_setup_app_custom_app(self, app):
cmd = MockCommand(app=app)
app = cmd.app = Mock()
app.user_options = {'preload': None}
cmd.setup_app_from_commandline([])
assert cmd.app == app
def test_find_app_suspects(self, app):
cmd = MockCommand(app=app)
assert cmd.find_app('t.unit.bin.proj.app')
assert cmd.find_app('t.unit.bin.proj')
assert cmd.find_app('t.unit.bin.proj:hello')
assert cmd.find_app('t.unit.bin.proj.hello')
assert cmd.find_app('t.unit.bin.proj.app:app')
assert cmd.find_app('t.unit.bin.proj.app.app')
with pytest.raises(AttributeError):
cmd.find_app('t.unit.bin')
with pytest.raises(AttributeError):
cmd.find_app(__name__)
def test_ask(self, app, patching):
try:
input = patching('celery.bin.base.input')
except AttributeError:
input = patching('builtins.input')
cmd = MockCommand(app=app)
input.return_value = 'yes'
assert cmd.ask('q', ('yes', 'no'), 'no') == 'yes'
input.return_value = 'nop'
assert cmd.ask('q', ('yes', 'no'), 'no') == 'no'
def test_host_format(self, app):
cmd = MockCommand(app=app)
with patch('celery.utils.nodenames.gethostname') as hn:
hn.return_value = 'blacktron.example.com'
assert cmd.host_format('') == ''
assert (cmd.host_format('celery@%h') ==
'[email protected]')
assert cmd.host_format('celery@%d') == '[email protected]'
assert cmd.host_format('celery@%n') == 'celery@blacktron'
def test_say_chat_quiet(self, app):
cmd = MockCommand(app=app)
cmd.quiet = True
assert cmd.say_chat('<-', 'foo', 'foo') is None
def test_say_chat_show_body(self, app):
cmd = MockCommand(app=app)
cmd.out = Mock()
cmd.show_body = True
cmd.say_chat('->', 'foo', 'body')
cmd.out.assert_called_with('body')
def test_say_chat_no_body(self, app):
cmd = MockCommand(app=app)
cmd.out = Mock()
cmd.show_body = False
cmd.say_chat('->', 'foo', 'body')
@pytest.mark.usefixtures('depends_on_current_app')
def test_with_cmdline_config(self, app):
cmd = MockCommand(app=app)
cmd.enable_config_from_cmdline = True
cmd.namespace = 'worker'
rest = cmd.setup_app_from_commandline(argv=[
'--loglevel=INFO', '--',
'result.backend=redis://backend.example.com',
'broker.url=amqp://broker.example.com',
'.prefetch_multiplier=100'])
assert cmd.app.conf.result_backend == 'redis://backend.example.com'
assert cmd.app.conf.broker_url == 'amqp://broker.example.com'
assert cmd.app.conf.worker_prefetch_multiplier == 100
assert rest == ['--loglevel=INFO']
cmd.app = None
cmd.get_app = Mock(name='get_app')
cmd.get_app.return_value = app
app.user_options['preload'] = [
Option('--foo', action='store_true'),
]
cmd.setup_app_from_commandline(argv=[
'--foo', '--loglevel=INFO', '--',
'broker.url=amqp://broker.example.com',
'.prefetch_multiplier=100'])
assert cmd.app is cmd.get_app()
def test_get_default_app(self, app, patching):
patching('celery._state.get_current_app')
cmd = MockCommand(app=app)
from celery._state import get_current_app
assert cmd._get_default_app() is get_current_app()
def test_set_colored(self, app):
cmd = MockCommand(app=app)
cmd.colored = 'foo'
assert cmd.colored == 'foo'
def test_set_no_color(self, app):
cmd = MockCommand(app=app)
cmd.no_color = False
_ = cmd.colored # noqa
cmd.no_color = True
assert not cmd.colored.enabled
def test_find_app(self, app):
cmd = MockCommand(app=app)
with patch('celery.utils.imports.symbol_by_name') as sbn:
from types import ModuleType
x = ModuleType(bytes_if_py2('proj'))
def on_sbn(*args, **kwargs):
def after(*args, **kwargs):
x.app = 'quick brown fox'
x.__path__ = None
return x
sbn.side_effect = after
return x
sbn.side_effect = on_sbn
x.__path__ = [True]
assert cmd.find_app('proj') == 'quick brown fox'
def test_parse_preload_options_shortopt(self):
class TestCommand(Command):
def add_preload_arguments(self, parser):
parser.add_argument('-s', action='store', dest='silent')
cmd = TestCommand()
acc = cmd.parse_preload_options(['-s', 'yes'])
assert acc.get('silent') == 'yes'
def test_parse_preload_options_with_equals_and_append(self):
class TestCommand(Command):
def add_preload_arguments(self, parser):
parser.add_argument('--zoom', action='append', default=[])
cmd = Command()
acc = cmd.parse_preload_options(['--zoom=1', '--zoom=2'])
assert acc, {'zoom': ['1' == '2']}
def test_parse_preload_options_without_equals_and_append(self):
cmd = Command()
opt = Option('--zoom', action='append', default=[])
cmd.preload_options = (opt,)
acc = cmd.parse_preload_options(['--zoom', '1', '--zoom', '2'])
assert acc, {'zoom': ['1' == '2']}
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.